index int64 0 0 | repo_id stringclasses 596 values | file_path stringlengths 31 168 | content stringlengths 1 6.2M |
|---|---|---|---|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/amadeus/__init__.py | """Amadeus tools."""
from langchain_community.tools.amadeus.closest_airport import AmadeusClosestAirport
from langchain_community.tools.amadeus.flight_search import AmadeusFlightSearch
__all__ = [
"AmadeusClosestAirport",
"AmadeusFlightSearch",
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/searchapi/tool.py | """Tool for the SearchApi.io search API."""
from typing import Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.utilities.searchapi import SearchApiAPIWrapper
class SearchAPIRun(BaseTool): # type: ignore[override]
"""Tool that queries the SearchApi.io search API."""
name: str = "searchapi"
description: str = (
"Google search API provided by SearchApi.io."
"This tool is handy when you need to answer questions about current events."
"Input should be a search query."
)
api_wrapper: SearchApiAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.api_wrapper.run(query)
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
return await self.api_wrapper.arun(query)
class SearchAPIResults(BaseTool): # type: ignore[override]
"""Tool that queries the SearchApi.io search API and returns JSON."""
name: str = "searchapi_results_json"
description: str = (
"Google search API provided by SearchApi.io."
"This tool is handy when you need to answer questions about current events."
"The input should be a search query and the output is a JSON object "
"with the query results."
)
api_wrapper: SearchApiAPIWrapper = Field(default_factory=SearchApiAPIWrapper)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return str(self.api_wrapper.results(query))
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
return (await self.api_wrapper.aresults(query)).__str__()
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/searchapi/__init__.py | from langchain_community.tools.searchapi.tool import SearchAPIResults, SearchAPIRun
"""SearchApi.io API Toolkit."""
"""Tool for the SearchApi.io Google SERP API."""
__all__ = ["SearchAPIResults", "SearchAPIRun"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/dataherald/tool.py | """Tool for the Dataherald Hosted API"""
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.dataherald import DataheraldAPIWrapper
class DataheraldTextToSQLInput(BaseModel):
prompt: str = Field(
description="Natural language query to be translated to a SQL query."
)
class DataheraldTextToSQL(BaseTool): # type: ignore[override, override]
"""Tool that queries using the Dataherald SDK."""
name: str = "dataherald"
description: str = (
"A wrapper around Dataherald. "
"Text to SQL. "
"Input should be a prompt and an existing db_connection_id"
)
api_wrapper: DataheraldAPIWrapper
args_schema: Type[BaseModel] = DataheraldTextToSQLInput
def _run(
self,
prompt: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Dataherald tool."""
return self.api_wrapper.run(prompt)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/dataherald/__init__.py | """Dataherald API toolkit."""
from langchain_community.tools.dataherald.tool import DataheraldTextToSQL
__all__ = [
"DataheraldTextToSQL",
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/passio_nutrition_ai/tool.py | """Tool for the Passio Nutrition AI API."""
from typing import Dict, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.passio_nutrition_ai import NutritionAIAPI
class NutritionAIInputs(BaseModel):
"""Inputs to the Passio Nutrition AI tool."""
query: str = Field(
description="A query to look up using Passio Nutrition AI, usually a few words."
)
class NutritionAI(BaseTool): # type: ignore[override, override]
"""Tool that queries the Passio Nutrition AI API."""
name: str = "nutritionai_advanced_search"
description: str = (
"A wrapper around the Passio Nutrition AI. "
"Useful to retrieve nutrition facts. "
"Input should be a search query string."
)
api_wrapper: NutritionAIAPI
args_schema: Type[BaseModel] = NutritionAIInputs
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> Optional[Dict]:
"""Use the tool."""
return self.api_wrapper.run(query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/passio_nutrition_ai/__init__.py | """Passio Nutrition AI API toolkit."""
from langchain_community.tools.passio_nutrition_ai.tool import NutritionAI
__all__ = ["NutritionAI"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/asknews/tool.py | """
Tool for the AskNews API.
To use this tool, you must first set your credentials as environment variables:
ASKNEWS_CLIENT_ID
ASKNEWS_CLIENT_SECRET
"""
from typing import Any, Optional, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.asknews import AskNewsAPIWrapper
class SearchInput(BaseModel):
"""Input for the AskNews Search tool."""
query: str = Field(
description="Search query to be used for finding real-time or historical news "
"information."
)
hours_back: Optional[int] = Field(
0,
description="If the Assistant deems that the event may have occurred more "
"than 48 hours ago, it estimates the number of hours back to search. For "
"example, if the event was one month ago, the Assistant may set this to 720. "
"One week would be 168. The Assistant can estimate up to on year back (8760).",
)
class AskNewsSearch(BaseTool): # type: ignore[override]
"""Tool that searches the AskNews API."""
name: str = "asknews_search"
description: str = (
"This tool allows you to perform a search on up-to-date news and historical "
"news. If you needs news from more than 48 hours ago, you can estimate the "
"number of hours back to search."
)
api_wrapper: AskNewsAPIWrapper = Field(default_factory=AskNewsAPIWrapper) # type: ignore[arg-type]
max_results: int = 10
args_schema: Optional[Type[BaseModel]] = SearchInput
def _run(
self,
query: str,
hours_back: int = 0,
run_manager: Optional[CallbackManagerForToolRun] = None,
**kwargs: Any,
) -> str:
"""Use the tool."""
try:
return self.api_wrapper.search_news(
query,
hours_back=hours_back,
max_results=self.max_results,
)
except Exception as e:
return repr(e)
async def _arun(
self,
query: str,
hours_back: int = 0,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
**kwargs: Any,
) -> str:
"""Use the tool asynchronously."""
try:
return await self.api_wrapper.asearch_news(
query,
hours_back=hours_back,
max_results=self.max_results,
)
except Exception as e:
return repr(e)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/asknews/__init__.py | """AskNews API toolkit."""
from langchain_community.tools.asknews.tool import (
AskNewsSearch,
)
__all__ = ["AskNewsSearch"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/office365/events_search.py | """Util that Searches calendar events in Office 365.
Free, but setup is required. See link below.
https://learn.microsoft.com/en-us/graph/auth/
"""
from datetime import datetime as dt
from typing import Any, Dict, List, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, ConfigDict, Field
from langchain_community.tools.office365.base import O365BaseTool
from langchain_community.tools.office365.utils import UTC_FORMAT, clean_body
class SearchEventsInput(BaseModel):
"""Input for SearchEmails Tool.
From https://learn.microsoft.com/en-us/graph/search-query-parameter"""
start_datetime: str = Field(
description=(
" The start datetime for the search query in the following format: "
' YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date and time '
" components, and the time zone offset is specified as ±hh:mm. "
' For example: "2023-06-09T10:30:00+03:00" represents June 9th, '
" 2023, at 10:30 AM in a time zone with a positive offset of 3 "
" hours from Coordinated Universal Time (UTC)."
)
)
end_datetime: str = Field(
description=(
" The end datetime for the search query in the following format: "
' YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date and time '
" components, and the time zone offset is specified as ±hh:mm. "
' For example: "2023-06-09T10:30:00+03:00" represents June 9th, '
" 2023, at 10:30 AM in a time zone with a positive offset of 3 "
" hours from Coordinated Universal Time (UTC)."
)
)
max_results: int = Field(
default=10,
description="The maximum number of results to return.",
)
truncate: bool = Field(
default=True,
description=(
"Whether the event's body is truncated to meet token number limits. Set to "
"False for searches that will retrieve small events, otherwise, set to "
"True."
),
)
class O365SearchEvents(O365BaseTool): # type: ignore[override, override]
"""Search calendar events in Office 365.
Free, but setup is required
"""
name: str = "events_search"
args_schema: Type[BaseModel] = SearchEventsInput
description: str = (
" Use this tool to search for the user's calendar events."
" The input must be the start and end datetimes for the search query."
" The output is a JSON list of all the events in the user's calendar"
" between the start and end times. You can assume that the user can "
" not schedule any meeting over existing meetings, and that the user "
"is busy during meetings. Any times without events are free for the user. "
)
model_config = ConfigDict(
extra="forbid",
)
def _run(
self,
start_datetime: str,
end_datetime: str,
max_results: int = 10,
truncate: bool = True,
run_manager: Optional[CallbackManagerForToolRun] = None,
truncate_limit: int = 150,
) -> List[Dict[str, Any]]:
# Get calendar object
schedule = self.account.schedule()
calendar = schedule.get_default_calendar()
# Process the date range parameters
start_datetime_query = dt.strptime(start_datetime, UTC_FORMAT)
end_datetime_query = dt.strptime(end_datetime, UTC_FORMAT)
# Run the query
q = calendar.new_query("start").greater_equal(start_datetime_query)
q.chain("and").on_attribute("end").less_equal(end_datetime_query)
events = calendar.get_events(query=q, include_recurring=True, limit=max_results)
# Generate output dict
output_events = []
for event in events:
output_event = {}
output_event["organizer"] = event.organizer
output_event["subject"] = event.subject
if truncate:
output_event["body"] = clean_body(event.body)[:truncate_limit]
else:
output_event["body"] = clean_body(event.body)
# Get the time zone from the search parameters
time_zone = start_datetime_query.tzinfo
# Assign the datetimes in the search time zone
output_event["start_datetime"] = event.start.astimezone(time_zone).strftime(
UTC_FORMAT
)
output_event["end_datetime"] = event.end.astimezone(time_zone).strftime(
UTC_FORMAT
)
output_event["modified_date"] = event.modified.astimezone(
time_zone
).strftime(UTC_FORMAT)
output_events.append(output_event)
return output_events
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/office365/send_event.py | """Util that sends calendar events in Office 365.
Free, but setup is required. See link below.
https://learn.microsoft.com/en-us/graph/auth/
"""
from datetime import datetime as dt
from typing import List, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.office365.base import O365BaseTool
from langchain_community.tools.office365.utils import UTC_FORMAT
class SendEventSchema(BaseModel):
"""Input for CreateEvent Tool."""
body: str = Field(
...,
description="The message body to include in the event.",
)
attendees: List[str] = Field(
...,
description="The list of attendees for the event.",
)
subject: str = Field(
...,
description="The subject of the event.",
)
start_datetime: str = Field(
description=" The start datetime for the event in the following format: "
' YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date and time '
" components, and the time zone offset is specified as ±hh:mm. "
' For example: "2023-06-09T10:30:00+03:00" represents June 9th, '
" 2023, at 10:30 AM in a time zone with a positive offset of 3 "
" hours from Coordinated Universal Time (UTC).",
)
end_datetime: str = Field(
description=" The end datetime for the event in the following format: "
' YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date and time '
" components, and the time zone offset is specified as ±hh:mm. "
' For example: "2023-06-09T10:30:00+03:00" represents June 9th, '
" 2023, at 10:30 AM in a time zone with a positive offset of 3 "
" hours from Coordinated Universal Time (UTC).",
)
class O365SendEvent(O365BaseTool): # type: ignore[override, override]
"""Tool for sending calendar events in Office 365."""
name: str = "send_event"
description: str = (
"Use this tool to create and send an event with the provided event fields."
)
args_schema: Type[SendEventSchema] = SendEventSchema
def _run(
self,
body: str,
attendees: List[str],
subject: str,
start_datetime: str,
end_datetime: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
# Get calendar object
schedule = self.account.schedule()
calendar = schedule.get_default_calendar()
event = calendar.new_event()
event.body = body
event.subject = subject
event.start = dt.strptime(start_datetime, UTC_FORMAT)
event.end = dt.strptime(end_datetime, UTC_FORMAT)
for attendee in attendees:
event.attendees.add(attendee)
# TO-DO: Look into PytzUsageWarning
event.save()
output = "Event sent: " + str(event)
return output
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/office365/create_draft_message.py | from typing import List, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.office365.base import O365BaseTool
class CreateDraftMessageSchema(BaseModel):
"""Input for SendMessageTool."""
body: str = Field(
...,
description="The message body to include in the draft.",
)
to: List[str] = Field(
...,
description="The list of recipients.",
)
subject: str = Field(
...,
description="The subject of the message.",
)
cc: Optional[List[str]] = Field(
None,
description="The list of CC recipients.",
)
bcc: Optional[List[str]] = Field(
None,
description="The list of BCC recipients.",
)
class O365CreateDraftMessage(O365BaseTool): # type: ignore[override, override]
"""Tool for creating a draft email in Office 365."""
name: str = "create_email_draft"
description: str = (
"Use this tool to create a draft email with the provided message fields."
)
args_schema: Type[CreateDraftMessageSchema] = CreateDraftMessageSchema
def _run(
self,
body: str,
to: List[str],
subject: str,
cc: Optional[List[str]] = None,
bcc: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
# Get mailbox object
mailbox = self.account.mailbox()
message = mailbox.new_message()
# Assign message values
message.body = body
message.subject = subject
message.to.add(to)
if cc is not None:
message.cc.add(cc)
if bcc is not None:
message.bcc.add(bcc)
message.save_draft()
output = "Draft created: " + str(message)
return output
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/office365/base.py | """Base class for Office 365 tools."""
from __future__ import annotations
from typing import TYPE_CHECKING
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.tools.office365.utils import authenticate
if TYPE_CHECKING:
from O365 import Account
class O365BaseTool(BaseTool): # type: ignore[override]
"""Base class for the Office 365 tools."""
account: Account = Field(default_factory=authenticate)
"""The account object for the Office 365 account."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/office365/messages_search.py | """Util that Searches email messages in Office 365.
Free, but setup is required. See link below.
https://learn.microsoft.com/en-us/graph/auth/
"""
from typing import Any, Dict, List, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, ConfigDict, Field
from langchain_community.tools.office365.base import O365BaseTool
from langchain_community.tools.office365.utils import UTC_FORMAT, clean_body
class SearchEmailsInput(BaseModel):
"""Input for SearchEmails Tool."""
"""From https://learn.microsoft.com/en-us/graph/search-query-parameter"""
folder: str = Field(
default="",
description=(
" If the user wants to search in only one folder, the name of the folder. "
'Default folders are "inbox", "drafts", "sent items", "deleted ttems", but '
"users can search custom folders as well."
),
)
query: str = Field(
description=(
"The Microsoift Graph v1.0 $search query. Example filters include "
"from:sender, from:sender, to:recipient, subject:subject, "
"recipients:list_of_recipients, body:excitement, importance:high, "
"received>2022-12-01, received<2021-12-01, sent>2022-12-01, "
"sent<2021-12-01, hasAttachments:true attachment:api-catalog.md, "
"cc:samanthab@contoso.com, bcc:samanthab@contoso.com, body:excitement date "
"range example: received:2023-06-08..2023-06-09 matching example: "
"from:amy OR from:david."
)
)
max_results: int = Field(
default=10,
description="The maximum number of results to return.",
)
truncate: bool = Field(
default=True,
description=(
"Whether the email body is truncated to meet token number limits. Set to "
"False for searches that will retrieve small messages, otherwise, set to "
"True"
),
)
class O365SearchEmails(O365BaseTool): # type: ignore[override, override]
"""Search email messages in Office 365.
Free, but setup is required.
"""
name: str = "messages_search"
args_schema: Type[BaseModel] = SearchEmailsInput
description: str = (
"Use this tool to search for email messages."
" The input must be a valid Microsoft Graph v1.0 $search query."
" The output is a JSON list of the requested resource."
)
model_config = ConfigDict(
extra="forbid",
)
def _run(
self,
query: str,
folder: str = "",
max_results: int = 10,
truncate: bool = True,
run_manager: Optional[CallbackManagerForToolRun] = None,
truncate_limit: int = 150,
) -> List[Dict[str, Any]]:
# Get mailbox object
mailbox = self.account.mailbox()
# Pull the folder if the user wants to search in a folder
if folder != "":
mailbox = mailbox.get_folder(folder_name=folder)
# Retrieve messages based on query
query = mailbox.q().search(query)
messages = mailbox.get_messages(limit=max_results, query=query)
# Generate output dict
output_messages = []
for message in messages:
output_message = {}
output_message["from"] = message.sender
if truncate:
output_message["body"] = message.body_preview[:truncate_limit]
else:
output_message["body"] = clean_body(message.body)
output_message["subject"] = message.subject
output_message["date"] = message.modified.strftime(UTC_FORMAT)
output_message["to"] = []
for recipient in message.to._recipients:
output_message["to"].append(str(recipient))
output_message["cc"] = []
for recipient in message.cc._recipients:
output_message["cc"].append(str(recipient))
output_message["bcc"] = []
for recipient in message.bcc._recipients:
output_message["bcc"].append(str(recipient))
output_messages.append(output_message)
return output_messages
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/office365/utils.py | """O365 tool utils."""
from __future__ import annotations
import logging
import os
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from O365 import Account
logger = logging.getLogger(__name__)
def clean_body(body: str) -> str:
"""Clean body of a message or event."""
try:
from bs4 import BeautifulSoup
try:
# Remove HTML
soup = BeautifulSoup(str(body), "html.parser")
body = soup.get_text()
# Remove return characters
body = "".join(body.splitlines())
# Remove extra spaces
body = " ".join(body.split())
return str(body)
except Exception:
return str(body)
except ImportError:
return str(body)
def authenticate() -> Account:
"""Authenticate using the Microsoft Graph API"""
try:
from O365 import Account
except ImportError as e:
raise ImportError(
"Cannot import 0365. Please install the package with `pip install O365`."
) from e
if "CLIENT_ID" in os.environ and "CLIENT_SECRET" in os.environ:
client_id = os.environ["CLIENT_ID"]
client_secret = os.environ["CLIENT_SECRET"]
credentials = (client_id, client_secret)
else:
logger.error(
"Error: The CLIENT_ID and CLIENT_SECRET environmental variables have not "
"been set. Visit the following link on how to acquire these authorization "
"tokens: https://learn.microsoft.com/en-us/graph/auth/"
)
return None
account = Account(credentials)
if account.is_authenticated is False:
if not account.authenticate(
scopes=[
"https://graph.microsoft.com/Mail.ReadWrite",
"https://graph.microsoft.com/Mail.Send",
"https://graph.microsoft.com/Calendars.ReadWrite",
"https://graph.microsoft.com/MailboxSettings.ReadWrite",
]
):
print("Error: Could not authenticate") # noqa: T201
return None
else:
return account
else:
return account
UTC_FORMAT = "%Y-%m-%dT%H:%M:%S%z"
"""UTC format for datetime objects."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/office365/__init__.py | """O365 tools."""
from langchain_community.tools.office365.create_draft_message import (
O365CreateDraftMessage,
)
from langchain_community.tools.office365.events_search import O365SearchEvents
from langchain_community.tools.office365.messages_search import O365SearchEmails
from langchain_community.tools.office365.send_event import O365SendEvent
from langchain_community.tools.office365.send_message import O365SendMessage
from langchain_community.tools.office365.utils import authenticate
__all__ = [
"O365SearchEmails",
"O365SearchEvents",
"O365CreateDraftMessage",
"O365SendMessage",
"O365SendEvent",
"authenticate",
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/office365/send_message.py | from typing import List, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.office365.base import O365BaseTool
class SendMessageSchema(BaseModel):
"""Input for SendMessageTool."""
body: str = Field(
...,
description="The message body to be sent.",
)
to: List[str] = Field(
...,
description="The list of recipients.",
)
subject: str = Field(
...,
description="The subject of the message.",
)
cc: Optional[List[str]] = Field(
None,
description="The list of CC recipients.",
)
bcc: Optional[List[str]] = Field(
None,
description="The list of BCC recipients.",
)
class O365SendMessage(O365BaseTool): # type: ignore[override, override]
"""Send an email in Office 365."""
name: str = "send_email"
description: str = (
"Use this tool to send an email with the provided message fields."
)
args_schema: Type[SendMessageSchema] = SendMessageSchema
def _run(
self,
body: str,
to: List[str],
subject: str,
cc: Optional[List[str]] = None,
bcc: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
# Get mailbox object
mailbox = self.account.mailbox()
message = mailbox.new_message()
# Assign message values
message.body = body
message.subject = subject
message.to.add(to)
if cc is not None:
message.cc.add(cc)
if bcc is not None:
message.bcc.add(bcc)
message.send()
output = "Message sent: " + str(message)
return output
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/interaction/tool.py | """Tools for interacting with the user."""
import warnings
from typing import Any
from langchain_community.tools.human.tool import HumanInputRun
def StdInInquireTool(*args: Any, **kwargs: Any) -> HumanInputRun:
"""Tool for asking the user for input."""
warnings.warn(
"StdInInquireTool will be deprecated in the future. "
"Please use HumanInputRun instead.",
DeprecationWarning,
)
return HumanInputRun(*args, **kwargs)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/interaction/__init__.py | """Tools for interacting with the user."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/nasa/tool.py | """
This tool allows agents to interact with the NASA API, specifically
the the NASA Image & Video Library and Exoplanet
"""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.utilities.nasa import NasaAPIWrapper
class NasaAction(BaseTool): # type: ignore[override]
"""Tool that queries the Atlassian Jira API."""
api_wrapper: NasaAPIWrapper = Field(default_factory=NasaAPIWrapper)
mode: str
name: str = ""
description: str = ""
def _run(
self,
instructions: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the NASA API to run an operation."""
return self.api_wrapper.run(self.mode, instructions)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/nasa/prompt.py | # flake8: noqa
NASA_SEARCH_PROMPT = """
This tool is a wrapper around NASA's search API, useful when you need to search through NASA's Image and Video Library.
The input to this tool is a query specified by the user, and will be passed into NASA's `search` function.
At least one parameter must be provided.
There are optional parameters that can be passed by the user based on their query
specifications. Each item in this list contains pound sign (#) separated values, the first value is the parameter name,
the second value is the datatype and the third value is the description: {{
- q#string#Free text search terms to compare to all indexed metadata.
- center#string#NASA center which published the media.
- description#string#Terms to search for in “Description” fields.
- description_508#string#Terms to search for in “508 Description” fields.
- keywords #string#Terms to search for in “Keywords” fields. Separate multiple values with commas.
- location #string#Terms to search for in “Location” fields.
- media_type#string#Media types to restrict the search to. Available types: [“image”,“video”, “audio”]. Separate multiple values with commas.
- nasa_id #string#The media asset’s NASA ID.
- page#integer#Page number, starting at 1, of results to get.-
- page_size#integer#Number of results per page. Default: 100.
- photographer#string#The primary photographer’s name.
- secondary_creator#string#A secondary photographer/videographer’s name.
- title #string#Terms to search for in “Title” fields.
- year_start#string#The start year for results. Format: YYYY.
- year_end #string#The end year for results. Format: YYYY.
}}
Below are several task descriptions along with their respective input examples.
Task: get the 2nd page of image and video content starting from the year 2002 to 2010
Example Input: {{"year_start": "2002", "year_end": "2010", "page": 2}}
Task: get the image and video content of saturn photographed by John Appleseed
Example Input: {{"q": "saturn", "photographer": "John Appleseed"}}
Task: search for Meteor Showers with description "Search Description" with media type image
Example Input: {{"q": "Meteor Shower", "description": "Search Description", "media_type": "image"}}
Task: get the image and video content from year 2008 to 2010 from Kennedy Center
Example Input: {{"year_start": "2002", "year_end": "2010", "location": "Kennedy Center}}
"""
NASA_MANIFEST_PROMPT = """
This tool is a wrapper around NASA's media asset manifest API, useful when you need to retrieve a media
asset's manifest. The input to this tool should include a string representing a NASA ID for a media asset that the user is trying to get the media asset manifest data for. The NASA ID will be passed as a string into NASA's `get_media_metadata_manifest` function.
The following list are some examples of NASA IDs for a media asset that you can use to better extract the NASA ID from the input string to the tool.
- GSFC_20171102_Archive_e000579
- Launch-Sound_Delta-PAM-Random-Commentary
- iss066m260341519_Expedition_66_Education_Inflight_with_Random_Lake_School_District_220203
- 6973610
- GRC-2020-CM-0167.4
- Expedition_55_Inflight_Japan_VIP_Event_May_31_2018_659970
- NASA 60th_SEAL_SLIVER_150DPI
"""
NASA_METADATA_PROMPT = """
This tool is a wrapper around NASA's media asset metadata location API, useful when you need to retrieve the media asset's metadata. The input to this tool should include a string representing a NASA ID for a media asset that the user is trying to get the media asset metadata location for. The NASA ID will be passed as a string into NASA's `get_media_metadata_manifest` function.
The following list are some examples of NASA IDs for a media asset that you can use to better extract the NASA ID from the input string to the tool.
- GSFC_20171102_Archive_e000579
- Launch-Sound_Delta-PAM-Random-Commentary
- iss066m260341519_Expedition_66_Education_Inflight_with_Random_Lake_School_District_220203
- 6973610
- GRC-2020-CM-0167.4
- Expedition_55_Inflight_Japan_VIP_Event_May_31_2018_659970
- NASA 60th_SEAL_SLIVER_150DPI
"""
NASA_CAPTIONS_PROMPT = """
This tool is a wrapper around NASA's video assests caption location API, useful when you need
to retrieve the location of the captions of a specific video. The input to this tool should include a string representing a NASA ID for a video media asset that the user is trying to get the get the location of the captions for. The NASA ID will be passed as a string into NASA's `get_media_metadata_manifest` function.
The following list are some examples of NASA IDs for a video asset that you can use to better extract the NASA ID from the input string to the tool.
- 2017-08-09 - Video File RS-25 Engine Test
- 20180415-TESS_Social_Briefing
- 201_TakingWildOutOfWildfire
- 2022-H1_V_EuropaClipper-4
- 2022_0429_Recientemente
"""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/steam/tool.py | """Tool for Steam Web API"""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.steam import SteamWebAPIWrapper
class SteamWebAPIQueryRun(BaseTool): # type: ignore[override]
"""Tool that searches the Steam Web API."""
mode: str
name: str = "steam"
description: str = (
"A wrapper around Steam Web API."
"Steam Tool is useful for fetching User profiles and stats, Game data and more!"
"Input should be the User or Game you want to query."
)
api_wrapper: SteamWebAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Steam-WebAPI tool."""
return self.api_wrapper.run(self.mode, query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/steam/__init__.py | """Steam API toolkit"""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/steam/prompt.py | STEAM_GET_GAMES_DETAILS = """
This tool is a wrapper around python-steam-api's steam.apps.search_games API and
steam.apps.get_app_details API, useful when you need to search for a game.
The input to this tool is a string specifying the name of the game you want to
search for. For example, to search for a game called "Counter-Strike: Global
Offensive", you would input "Counter-Strike: Global Offensive" as the game name.
This input will be passed into steam.apps.search_games to find the game id, link
and price, and then the game id will be passed into steam.apps.get_app_details to
get the detailed description and supported languages of the game. Finally the
results are combined and returned as a string.
"""
STEAM_GET_RECOMMENDED_GAMES = """
This tool is a wrapper around python-steam-api's steam.users.get_owned_games API
and steamspypi's steamspypi.download API, useful when you need to get a list of
recommended games. The input to this tool is a string specifying the steam id of
the user you want to get recommended games for. For example, to get recommended
games for a user with steam id 76561197960435530, you would input
"76561197960435530" as the steam id. This steamid is then utilized to form a
data_request sent to steamspypi's steamspypi.download to retrieve genres of user's
owned games. Then, calculates the frequency of each genre, identifying the most
popular one, and stored it in a dictionary. Subsequently, use steamspypi.download
to returns all games in this genre and return 5 most-played games that is not owned
by the user.
"""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/zapier/tool.py | """[DEPRECATED]
## Zapier Natural Language Actions API
\
Full docs here: https://nla.zapier.com/start/
**Zapier Natural Language Actions** gives you access to the 5k+ apps, 20k+ actions
on Zapier's platform through a natural language API interface.
NLA supports apps like Gmail, Salesforce, Trello, Slack, Asana, HubSpot, Google Sheets,
Microsoft Teams, and thousands more apps: https://zapier.com/apps
Zapier NLA handles ALL the underlying API auth and translation from
natural language --> underlying API call --> return simplified output for LLMs
The key idea is you, or your users, expose a set of actions via an oauth-like setup
window, which you can then query and execute via a REST API.
NLA offers both API Key and OAuth for signing NLA API requests.
1. Server-side (API Key): for quickly getting started, testing, and production scenarios
where LangChain will only use actions exposed in the developer's Zapier account
(and will use the developer's connected accounts on Zapier.com)
2. User-facing (Oauth): for production scenarios where you are deploying an end-user
facing application and LangChain needs access to end-user's exposed actions and
connected accounts on Zapier.com
This quick start will focus on the server-side use case for brevity.
Review [full docs](https://nla.zapier.com/start/) for user-facing oauth developer
support.
Typically, you'd use SequentialChain, here's a basic example:
1. Use NLA to find an email in Gmail
2. Use LLMChain to generate a draft reply to (1)
3. Use NLA to send the draft reply (2) to someone in Slack via direct message
In code, below:
```python
import os
# get from https://platform.openai.com/
os.environ["OPENAI_API_KEY"] = os.environ.get("OPENAI_API_KEY", "")
# get from https://nla.zapier.com/docs/authentication/
os.environ["ZAPIER_NLA_API_KEY"] = os.environ.get("ZAPIER_NLA_API_KEY", "")
from langchain_community.agent_toolkits import ZapierToolkit
from langchain_community.utilities.zapier import ZapierNLAWrapper
## step 0. expose gmail 'find email' and slack 'send channel message' actions
# first go here, log in, expose (enable) the two actions:
# https://nla.zapier.com/demo/start
# -- for this example, can leave all fields "Have AI guess"
# in an oauth scenario, you'd get your own <provider> id (instead of 'demo')
# which you route your users through first
zapier = ZapierNLAWrapper()
## To leverage OAuth you may pass the value `nla_oauth_access_token` to
## the ZapierNLAWrapper. If you do this there is no need to initialize
## the ZAPIER_NLA_API_KEY env variable
# zapier = ZapierNLAWrapper(zapier_nla_oauth_access_token="TOKEN_HERE")
toolkit = ZapierToolkit.from_zapier_nla_wrapper(zapier)
```
"""
from typing import Any, Dict, Optional
from langchain_core._api import warn_deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from langchain_core.utils import pre_init
from pydantic import Field
from langchain_community.tools.zapier.prompt import BASE_ZAPIER_TOOL_PROMPT
from langchain_community.utilities.zapier import ZapierNLAWrapper
class ZapierNLARunAction(BaseTool): # type: ignore[override]
"""Tool to run a specific action from the user's exposed actions.
Params:
action_id: a specific action ID (from list actions) of the action to execute
(the set api_key must be associated with the action owner)
instructions: a natural language instruction string for using the action
(eg. "get the latest email from Mike Knoop" for "Gmail: find email" action)
params: a dict, optional. Any params provided will *override* AI guesses
from `instructions` (see "understanding the AI guessing flow" here:
https://nla.zapier.com/docs/using-the-api#ai-guessing)
"""
api_wrapper: ZapierNLAWrapper = Field(default_factory=ZapierNLAWrapper) # type: ignore[arg-type]
action_id: str
params: Optional[dict] = None
base_prompt: str = BASE_ZAPIER_TOOL_PROMPT
zapier_description: str
params_schema: Dict[str, str] = Field(default_factory=dict)
name: str = ""
description: str = ""
@pre_init
def set_name_description(cls, values: Dict[str, Any]) -> Dict[str, Any]:
zapier_description = values["zapier_description"]
params_schema = values["params_schema"]
if "instructions" in params_schema:
del params_schema["instructions"]
# Ensure base prompt (if overridden) contains necessary input fields
necessary_fields = {"{zapier_description}", "{params}"}
if not all(field in values["base_prompt"] for field in necessary_fields):
raise ValueError(
"Your custom base Zapier prompt must contain input fields for "
"{zapier_description} and {params}."
)
values["name"] = zapier_description
values["description"] = values["base_prompt"].format(
zapier_description=zapier_description,
params=str(list(params_schema.keys())),
)
return values
def _run(
self, instructions: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
"""Use the Zapier NLA tool to return a list of all exposed user actions."""
warn_deprecated(
since="0.0.319",
message=(
"This tool will be deprecated on 2023-11-17. See "
"<https://nla.zapier.com/sunset/> for details"
),
)
return self.api_wrapper.run_as_str(self.action_id, instructions, self.params)
async def _arun(
self,
instructions: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the Zapier NLA tool to return a list of all exposed user actions."""
warn_deprecated(
since="0.0.319",
message=(
"This tool will be deprecated on 2023-11-17. See "
"<https://nla.zapier.com/sunset/> for details"
),
)
return await self.api_wrapper.arun_as_str(
self.action_id,
instructions,
self.params,
)
ZapierNLARunAction.__doc__ = (
ZapierNLAWrapper.run.__doc__ + ZapierNLARunAction.__doc__ # type: ignore
)
# other useful actions
class ZapierNLAListActions(BaseTool): # type: ignore[override]
"""Tool to list all exposed actions for the user."""
name: str = "ZapierNLA_list_actions"
description: str = BASE_ZAPIER_TOOL_PROMPT + (
"This tool returns a list of the user's exposed actions."
)
api_wrapper: ZapierNLAWrapper = Field(default_factory=ZapierNLAWrapper) # type: ignore[arg-type]
def _run(
self,
_: str = "",
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Zapier NLA tool to return a list of all exposed user actions."""
warn_deprecated(
since="0.0.319",
message=(
"This tool will be deprecated on 2023-11-17. See "
"<https://nla.zapier.com/sunset/> for details"
),
)
return self.api_wrapper.list_as_str()
async def _arun(
self,
_: str = "",
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the Zapier NLA tool to return a list of all exposed user actions."""
warn_deprecated(
since="0.0.319",
message=(
"This tool will be deprecated on 2023-11-17. See "
"<https://nla.zapier.com/sunset/> for details"
),
)
return await self.api_wrapper.alist_as_str()
ZapierNLAListActions.__doc__ = (
ZapierNLAWrapper.list.__doc__ + ZapierNLAListActions.__doc__ # type: ignore
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/zapier/__init__.py | """Zapier Tool."""
from langchain_community.tools.zapier.tool import (
ZapierNLAListActions,
ZapierNLARunAction,
)
__all__ = [
"ZapierNLARunAction",
"ZapierNLAListActions",
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/zapier/prompt.py | # flake8: noqa
BASE_ZAPIER_TOOL_PROMPT = (
"A wrapper around Zapier NLA actions. "
"The input to this tool is a natural language instruction, "
'for example "get the latest email from my bank" or '
'"send a slack message to the #general channel". '
"Each tool will have params associated with it that are specified as a list. You MUST take into account the params when creating the instruction. "
"For example, if the params are ['Message_Text', 'Channel'], your instruction should be something like 'send a slack message to the #general channel with the text hello world'. "
"Another example: if the params are ['Calendar', 'Search_Term'], your instruction should be something like 'find the meeting in my personal calendar at 3pm'. "
"Do not make up params, they will be explicitly specified in the tool description. "
"If you do not have enough information to fill in the params, just say 'not enough information provided in the instruction, missing <param>'. "
"If you get a none or null response, STOP EXECUTION, do not try to another tool!"
"This tool specifically used for: {zapier_description}, "
"and has params: {params}"
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/audio/huggingface_text_to_speech_inference.py | import logging
import os
import uuid
from datetime import datetime
from typing import Callable, Literal, Optional
import requests
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import SecretStr
logger = logging.getLogger(__name__)
class HuggingFaceTextToSpeechModelInference(BaseTool): # type: ignore[override]
"""HuggingFace Text-to-Speech Model Inference.
Requirements:
- Environment variable ``HUGGINGFACE_API_KEY`` must be set,
or passed as a named parameter to the constructor.
"""
name: str = "openai_text_to_speech"
"""Name of the tool."""
description: str = "A wrapper around OpenAI Text-to-Speech API. "
"""Description of the tool."""
model: str
"""Model name."""
file_extension: str
"""File extension of the output audio file."""
destination_dir: str
"""Directory to save the output audio file."""
file_namer: Callable[[], str]
"""Function to generate unique file names."""
api_url: str
huggingface_api_key: SecretStr
_HUGGINGFACE_API_KEY_ENV_NAME: str = "HUGGINGFACE_API_KEY"
_HUGGINGFACE_API_URL_ROOT: str = "https://api-inference.huggingface.co/models"
def __init__(
self,
model: str,
file_extension: str,
*,
destination_dir: str = "./tts",
file_naming_func: Literal["uuid", "timestamp"] = "uuid",
huggingface_api_key: Optional[SecretStr] = None,
_HUGGINGFACE_API_KEY_ENV_NAME: str = "HUGGINGFACE_API_KEY",
_HUGGINGFACE_API_URL_ROOT: str = "https://api-inference.huggingface.co/models",
) -> None:
if not huggingface_api_key:
huggingface_api_key = SecretStr(
os.getenv(_HUGGINGFACE_API_KEY_ENV_NAME, "")
)
if (
not huggingface_api_key
or not huggingface_api_key.get_secret_value()
or huggingface_api_key.get_secret_value() == ""
):
raise ValueError(
f"'{_HUGGINGFACE_API_KEY_ENV_NAME}' must be or set or passed"
)
if file_naming_func == "uuid":
file_namer = lambda: str(uuid.uuid4()) # noqa: E731
elif file_naming_func == "timestamp":
file_namer = lambda: str(int(datetime.now().timestamp())) # noqa: E731
else:
raise ValueError(
f"Invalid value for 'file_naming_func': {file_naming_func}"
)
super().__init__( # type: ignore[call-arg]
model=model,
file_extension=file_extension,
api_url=f"{_HUGGINGFACE_API_URL_ROOT}/{model}",
destination_dir=destination_dir,
file_namer=file_namer,
huggingface_api_key=huggingface_api_key,
_HUGGINGFACE_API_KEY_ENV_NAME=_HUGGINGFACE_API_KEY_ENV_NAME,
_HUGGINGFACE_API_URL_ROOT=_HUGGINGFACE_API_URL_ROOT,
)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
response = requests.post(
self.api_url,
headers={
"Authorization": f"Bearer {self.huggingface_api_key.get_secret_value()}"
},
json={"inputs": query},
)
audio_bytes = response.content
try:
os.makedirs(self.destination_dir, exist_ok=True)
except Exception as e:
logger.error(f"Error creating directory '{self.destination_dir}': {e}")
raise
output_file = os.path.join(
self.destination_dir,
f"{str(self.file_namer())}.{self.file_extension}",
)
try:
with open(output_file, mode="xb") as f:
f.write(audio_bytes)
except FileExistsError:
raise ValueError("Output name must be unique")
except Exception as e:
logger.error(f"Error occurred while creating file: {e}")
raise
return output_file
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/audio/__init__.py | from langchain_community.tools.audio.huggingface_text_to_speech_inference import (
HuggingFaceTextToSpeechModelInference,
)
__all__ = [
"HuggingFaceTextToSpeechModelInference",
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/you/tool.py | from typing import List, Optional, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.documents import Document
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.you import YouSearchAPIWrapper
class YouInput(BaseModel):
"""Input schema for the you.com tool."""
query: str = Field(description="should be a search query")
class YouSearchTool(BaseTool): # type: ignore[override, override]
"""Tool that searches the you.com API."""
name: str = "you_search"
description: str = (
"The YOU APIs make LLMs and search experiences more factual and"
"up to date with realtime web data."
)
args_schema: Type[BaseModel] = YouInput
api_wrapper: YouSearchAPIWrapper = Field(default_factory=YouSearchAPIWrapper)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> List[Document]:
"""Use the you.com tool."""
return self.api_wrapper.results(query)
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> List[Document]:
"""Use the you.com tool asynchronously."""
return await self.api_wrapper.results_async(query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/you/__init__.py | """You.com API toolkit."""
from langchain_community.tools.you.tool import YouSearchTool
__all__ = [
"YouSearchTool",
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/arxiv/tool.py | """Tool for the Arxiv API."""
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.arxiv import ArxivAPIWrapper
class ArxivInput(BaseModel):
"""Input for the Arxiv tool."""
query: str = Field(description="search query to look up")
class ArxivQueryRun(BaseTool): # type: ignore[override, override]
"""Tool that searches the Arxiv API."""
name: str = "arxiv"
description: str = (
"A wrapper around Arxiv.org "
"Useful for when you need to answer questions about Physics, Mathematics, "
"Computer Science, Quantitative Biology, Quantitative Finance, Statistics, "
"Electrical Engineering, and Economics "
"from scientific articles on arxiv.org. "
"Input should be a search query."
)
api_wrapper: ArxivAPIWrapper = Field(default_factory=ArxivAPIWrapper) # type: ignore[arg-type]
args_schema: Type[BaseModel] = ArxivInput
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Arxiv tool."""
return self.api_wrapper.run(query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/arxiv/__init__.py | from langchain_community.tools.arxiv.tool import ArxivQueryRun
"""Arxiv API toolkit."""
"""Tool for the Arxiv Search API."""
__all__ = ["ArxivQueryRun"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/graphql/tool.py | import json
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import ConfigDict
from langchain_community.utilities.graphql import GraphQLAPIWrapper
class BaseGraphQLTool(BaseTool): # type: ignore[override]
"""Base tool for querying a GraphQL API."""
graphql_wrapper: GraphQLAPIWrapper
name: str = "query_graphql"
description: str = """\
Input to this tool is a detailed and correct GraphQL query, output is a result from the API.
If the query is not correct, an error message will be returned.
If an error is returned with 'Bad request' in it, rewrite the query and try again.
If an error is returned with 'Unauthorized' in it, do not try again, but tell the user to change their authentication.
Example Input: query {{ allUsers {{ id, name, email }} }}\
""" # noqa: E501
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def _run(
self,
tool_input: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
result = self.graphql_wrapper.run(tool_input)
return json.dumps(result, indent=2)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/graphql/__init__.py | """Tools for interacting with a GraphQL API"""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/stackexchange/tool.py | """Tool for the Wikipedia API."""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.stackexchange import StackExchangeAPIWrapper
class StackExchangeTool(BaseTool): # type: ignore[override]
"""Tool that uses StackExchange"""
name: str = "stack_exchange"
description: str = (
"A wrapper around StackExchange. "
"Useful for when you need to answer specific programming questions"
"code excerpts, code examples and solutions"
"Input should be a fully formed question."
)
api_wrapper: StackExchangeAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Stack Exchange tool."""
return self.api_wrapper.run(query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/stackexchange/__init__.py | """StackExchange API toolkit."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/wikipedia/tool.py | """Tool for the Wikipedia API."""
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.wikipedia import WikipediaAPIWrapper
class WikipediaQueryInput(BaseModel):
"""Input for the WikipediaQuery tool."""
query: str = Field(description="query to look up on wikipedia")
class WikipediaQueryRun(BaseTool): # type: ignore[override, override]
"""Tool that searches the Wikipedia API."""
name: str = "wikipedia"
description: str = (
"A wrapper around Wikipedia. "
"Useful for when you need to answer general questions about "
"people, places, companies, facts, historical events, or other subjects. "
"Input should be a search query."
)
api_wrapper: WikipediaAPIWrapper
args_schema: Type[BaseModel] = WikipediaQueryInput
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Wikipedia tool."""
return self.api_wrapper.run(query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/wikipedia/__init__.py | """Wikipedia API toolkit."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/azure_cognitive_services/form_recognizer.py | from __future__ import annotations
import logging
from typing import Any, Dict, List, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_core.utils import get_from_dict_or_env
from pydantic import model_validator
from langchain_community.tools.azure_cognitive_services.utils import (
detect_file_src_type,
)
logger = logging.getLogger(__name__)
class AzureCogsFormRecognizerTool(BaseTool): # type: ignore[override]
"""Tool that queries the Azure Cognitive Services Form Recognizer API.
In order to set this up, follow instructions at:
https://learn.microsoft.com/en-us/azure/applied-ai-services/form-recognizer/quickstarts/get-started-sdks-rest-api?view=form-recog-3.0.0&pivots=programming-language-python
"""
azure_cogs_key: str = "" #: :meta private:
azure_cogs_endpoint: str = "" #: :meta private:
doc_analysis_client: Any #: :meta private:
name: str = "azure_cognitive_services_form_recognizer"
description: str = (
"A wrapper around Azure Cognitive Services Form Recognizer. "
"Useful for when you need to "
"extract text, tables, and key-value pairs from documents. "
"Input should be a url to a document."
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and endpoint exists in environment."""
azure_cogs_key = get_from_dict_or_env(
values, "azure_cogs_key", "AZURE_COGS_KEY"
)
azure_cogs_endpoint = get_from_dict_or_env(
values, "azure_cogs_endpoint", "AZURE_COGS_ENDPOINT"
)
try:
from azure.ai.formrecognizer import DocumentAnalysisClient
from azure.core.credentials import AzureKeyCredential
values["doc_analysis_client"] = DocumentAnalysisClient(
endpoint=azure_cogs_endpoint,
credential=AzureKeyCredential(azure_cogs_key),
)
except ImportError:
raise ImportError(
"azure-ai-formrecognizer is not installed. "
"Run `pip install azure-ai-formrecognizer` to install."
)
return values
def _parse_tables(self, tables: List[Any]) -> List[Any]:
result = []
for table in tables:
rc, cc = table.row_count, table.column_count
_table = [["" for _ in range(cc)] for _ in range(rc)]
for cell in table.cells:
_table[cell.row_index][cell.column_index] = cell.content
result.append(_table)
return result
def _parse_kv_pairs(self, kv_pairs: List[Any]) -> List[Any]:
result = []
for kv_pair in kv_pairs:
key = kv_pair.key.content if kv_pair.key else ""
value = kv_pair.value.content if kv_pair.value else ""
result.append((key, value))
return result
def _document_analysis(self, document_path: str) -> Dict:
document_src_type = detect_file_src_type(document_path)
if document_src_type == "local":
with open(document_path, "rb") as document:
poller = self.doc_analysis_client.begin_analyze_document(
"prebuilt-document", document
)
elif document_src_type == "remote":
poller = self.doc_analysis_client.begin_analyze_document_from_url(
"prebuilt-document", document_path
)
else:
raise ValueError(f"Invalid document path: {document_path}")
result = poller.result()
res_dict = {}
if result.content is not None:
res_dict["content"] = result.content
if result.tables is not None:
res_dict["tables"] = self._parse_tables(result.tables)
if result.key_value_pairs is not None:
res_dict["key_value_pairs"] = self._parse_kv_pairs(result.key_value_pairs)
return res_dict
def _format_document_analysis_result(self, document_analysis_result: Dict) -> str:
formatted_result = []
if "content" in document_analysis_result:
formatted_result.append(
f"Content: {document_analysis_result['content']}".replace("\n", " ")
)
if "tables" in document_analysis_result:
for i, table in enumerate(document_analysis_result["tables"]):
formatted_result.append(f"Table {i}: {table}".replace("\n", " "))
if "key_value_pairs" in document_analysis_result:
for kv_pair in document_analysis_result["key_value_pairs"]:
formatted_result.append(
f"{kv_pair[0]}: {kv_pair[1]}".replace("\n", " ")
)
return "\n".join(formatted_result)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
try:
document_analysis_result = self._document_analysis(query)
if not document_analysis_result:
return "No good document analysis result was found"
return self._format_document_analysis_result(document_analysis_result)
except Exception as e:
raise RuntimeError(f"Error while running AzureCogsFormRecognizerTool: {e}")
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/azure_cognitive_services/text2speech.py | from __future__ import annotations
import logging
import tempfile
from typing import Any, Dict, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_core.utils import get_from_dict_or_env
from pydantic import model_validator
logger = logging.getLogger(__name__)
class AzureCogsText2SpeechTool(BaseTool): # type: ignore[override]
"""Tool that queries the Azure Cognitive Services Text2Speech API.
In order to set this up, follow instructions at:
https://learn.microsoft.com/en-us/azure/cognitive-services/speech-service/get-started-text-to-speech?pivots=programming-language-python
"""
azure_cogs_key: str = "" #: :meta private:
azure_cogs_region: str = "" #: :meta private:
speech_language: str = "en-US" #: :meta private:
speech_config: Any #: :meta private:
name: str = "azure_cognitive_services_text2speech"
description: str = (
"A wrapper around Azure Cognitive Services Text2Speech. "
"Useful for when you need to convert text to speech. "
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and endpoint exists in environment."""
azure_cogs_key = get_from_dict_or_env(
values, "azure_cogs_key", "AZURE_COGS_KEY"
)
azure_cogs_region = get_from_dict_or_env(
values, "azure_cogs_region", "AZURE_COGS_REGION"
)
try:
import azure.cognitiveservices.speech as speechsdk
values["speech_config"] = speechsdk.SpeechConfig(
subscription=azure_cogs_key, region=azure_cogs_region
)
except ImportError:
raise ImportError(
"azure-cognitiveservices-speech is not installed. "
"Run `pip install azure-cognitiveservices-speech` to install."
)
return values
def _text2speech(self, text: str, speech_language: str) -> str:
try:
import azure.cognitiveservices.speech as speechsdk
except ImportError:
pass
self.speech_config.speech_synthesis_language = speech_language
speech_synthesizer = speechsdk.SpeechSynthesizer(
speech_config=self.speech_config, audio_config=None
)
result = speech_synthesizer.speak_text(text)
if result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted:
stream = speechsdk.AudioDataStream(result)
with tempfile.NamedTemporaryFile(
mode="wb", suffix=".wav", delete=False
) as f:
stream.save_to_wav_file(f.name)
return f.name
elif result.reason == speechsdk.ResultReason.Canceled:
cancellation_details = result.cancellation_details
logger.debug(f"Speech synthesis canceled: {cancellation_details.reason}")
if cancellation_details.reason == speechsdk.CancellationReason.Error:
raise RuntimeError(
f"Speech synthesis error: {cancellation_details.error_details}"
)
return "Speech synthesis canceled."
else:
return f"Speech synthesis failed: {result.reason}"
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
try:
speech_file = self._text2speech(query, self.speech_language)
return speech_file
except Exception as e:
raise RuntimeError(f"Error while running AzureCogsText2SpeechTool: {e}")
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/azure_cognitive_services/text_analytics_health.py | from __future__ import annotations
import logging
from typing import Any, Dict, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_core.utils import get_from_dict_or_env
from pydantic import model_validator
logger = logging.getLogger(__name__)
class AzureCogsTextAnalyticsHealthTool(BaseTool): # type: ignore[override]
"""Tool that queries the Azure Cognitive Services Text Analytics for Health API.
In order to set this up, follow instructions at:
https://learn.microsoft.com/en-us/azure/ai-services/language-service/text-analytics-for-health/quickstart?tabs=windows&pivots=programming-language-python
"""
azure_cogs_key: str = "" #: :meta private:
azure_cogs_endpoint: str = "" #: :meta private:
text_analytics_client: Any #: :meta private:
name: str = "azure_cognitive_services_text_analyics_health"
description: str = (
"A wrapper around Azure Cognitive Services Text Analytics for Health. "
"Useful for when you need to identify entities in healthcare data. "
"Input should be text."
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and endpoint exists in environment."""
azure_cogs_key = get_from_dict_or_env(
values, "azure_cogs_key", "AZURE_COGS_KEY"
)
azure_cogs_endpoint = get_from_dict_or_env(
values, "azure_cogs_endpoint", "AZURE_COGS_ENDPOINT"
)
try:
import azure.ai.textanalytics as sdk
from azure.core.credentials import AzureKeyCredential
values["text_analytics_client"] = sdk.TextAnalyticsClient(
endpoint=azure_cogs_endpoint,
credential=AzureKeyCredential(azure_cogs_key),
)
except ImportError:
raise ImportError(
"azure-ai-textanalytics is not installed. "
"Run `pip install azure-ai-textanalytics` to install."
)
return values
def _text_analysis(self, text: str) -> Dict:
poller = self.text_analytics_client.begin_analyze_healthcare_entities(
[{"id": "1", "language": "en", "text": text}]
)
result = poller.result()
res_dict = {}
docs = [doc for doc in result if not doc.is_error]
if docs is not None:
res_dict["entities"] = [
f"{x.text} is a healthcare entity of type {x.category}"
for y in docs
for x in y.entities
]
return res_dict
def _format_text_analysis_result(self, text_analysis_result: Dict) -> str:
formatted_result = []
if "entities" in text_analysis_result:
formatted_result.append(
f"""The text contains the following healthcare entities: {
', '.join(text_analysis_result['entities'])
}""".replace("\n", " ")
)
return "\n".join(formatted_result)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
try:
text_analysis_result = self._text_analysis(query)
return self._format_text_analysis_result(text_analysis_result)
except Exception as e:
raise RuntimeError(
f"Error while running AzureCogsTextAnalyticsHealthTool: {e}"
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/azure_cognitive_services/speech2text.py | from __future__ import annotations
import logging
import time
from typing import Any, Dict, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_core.utils import get_from_dict_or_env
from pydantic import model_validator
from langchain_community.tools.azure_cognitive_services.utils import (
detect_file_src_type,
download_audio_from_url,
)
logger = logging.getLogger(__name__)
class AzureCogsSpeech2TextTool(BaseTool): # type: ignore[override]
"""Tool that queries the Azure Cognitive Services Speech2Text API.
In order to set this up, follow instructions at:
https://learn.microsoft.com/en-us/azure/cognitive-services/speech-service/get-started-speech-to-text?pivots=programming-language-python
"""
azure_cogs_key: str = "" #: :meta private:
azure_cogs_region: str = "" #: :meta private:
speech_language: str = "en-US" #: :meta private:
speech_config: Any #: :meta private:
name: str = "azure_cognitive_services_speech2text"
description: str = (
"A wrapper around Azure Cognitive Services Speech2Text. "
"Useful for when you need to transcribe audio to text. "
"Input should be a url to an audio file."
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and endpoint exists in environment."""
azure_cogs_key = get_from_dict_or_env(
values, "azure_cogs_key", "AZURE_COGS_KEY"
)
azure_cogs_region = get_from_dict_or_env(
values, "azure_cogs_region", "AZURE_COGS_REGION"
)
try:
import azure.cognitiveservices.speech as speechsdk
values["speech_config"] = speechsdk.SpeechConfig(
subscription=azure_cogs_key, region=azure_cogs_region
)
except ImportError:
raise ImportError(
"azure-cognitiveservices-speech is not installed. "
"Run `pip install azure-cognitiveservices-speech` to install."
)
return values
def _continuous_recognize(self, speech_recognizer: Any) -> str:
done = False
text = ""
def stop_cb(evt: Any) -> None:
"""callback that stop continuous recognition"""
speech_recognizer.stop_continuous_recognition_async()
nonlocal done
done = True
def retrieve_cb(evt: Any) -> None:
"""callback that retrieves the intermediate recognition results"""
nonlocal text
text += evt.result.text
# retrieve text on recognized events
speech_recognizer.recognized.connect(retrieve_cb)
# stop continuous recognition on either session stopped or canceled events
speech_recognizer.session_stopped.connect(stop_cb)
speech_recognizer.canceled.connect(stop_cb)
# Start continuous speech recognition
speech_recognizer.start_continuous_recognition_async()
while not done:
time.sleep(0.5)
return text
def _speech2text(self, audio_path: str, speech_language: str) -> str:
try:
import azure.cognitiveservices.speech as speechsdk
except ImportError:
pass
audio_src_type = detect_file_src_type(audio_path)
if audio_src_type == "local":
audio_config = speechsdk.AudioConfig(filename=audio_path)
elif audio_src_type == "remote":
tmp_audio_path = download_audio_from_url(audio_path)
audio_config = speechsdk.AudioConfig(filename=tmp_audio_path)
else:
raise ValueError(f"Invalid audio path: {audio_path}")
self.speech_config.speech_recognition_language = speech_language
speech_recognizer = speechsdk.SpeechRecognizer(self.speech_config, audio_config)
return self._continuous_recognize(speech_recognizer)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
try:
text = self._speech2text(query, self.speech_language)
return text
except Exception as e:
raise RuntimeError(f"Error while running AzureCogsSpeech2TextTool: {e}")
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/azure_cognitive_services/utils.py | import os
import tempfile
from urllib.parse import urlparse
import requests
def detect_file_src_type(file_path: str) -> str:
"""Detect if the file is local or remote."""
if os.path.isfile(file_path):
return "local"
parsed_url = urlparse(file_path)
if parsed_url.scheme and parsed_url.netloc:
return "remote"
return "invalid"
def download_audio_from_url(audio_url: str) -> str:
"""Download audio from url to local."""
ext = audio_url.split(".")[-1]
response = requests.get(audio_url, stream=True)
response.raise_for_status()
with tempfile.NamedTemporaryFile(mode="wb", suffix=f".{ext}", delete=False) as f:
for chunk in response.iter_content(chunk_size=8192):
f.write(chunk)
return f.name
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/azure_cognitive_services/__init__.py | """Azure Cognitive Services Tools."""
from langchain_community.tools.azure_cognitive_services.form_recognizer import (
AzureCogsFormRecognizerTool,
)
from langchain_community.tools.azure_cognitive_services.image_analysis import (
AzureCogsImageAnalysisTool,
)
from langchain_community.tools.azure_cognitive_services.speech2text import (
AzureCogsSpeech2TextTool,
)
from langchain_community.tools.azure_cognitive_services.text2speech import (
AzureCogsText2SpeechTool,
)
from langchain_community.tools.azure_cognitive_services.text_analytics_health import (
AzureCogsTextAnalyticsHealthTool,
)
__all__ = [
"AzureCogsImageAnalysisTool",
"AzureCogsFormRecognizerTool",
"AzureCogsSpeech2TextTool",
"AzureCogsText2SpeechTool",
"AzureCogsTextAnalyticsHealthTool",
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/azure_cognitive_services/image_analysis.py | from __future__ import annotations
import logging
from typing import Any, Dict, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_core.utils import get_from_dict_or_env
from pydantic import model_validator
from langchain_community.tools.azure_cognitive_services.utils import (
detect_file_src_type,
)
logger = logging.getLogger(__name__)
class AzureCogsImageAnalysisTool(BaseTool): # type: ignore[override]
"""Tool that queries the Azure Cognitive Services Image Analysis API.
In order to set this up, follow instructions at:
https://learn.microsoft.com/en-us/azure/cognitive-services/computer-vision/quickstarts-sdk/image-analysis-client-library-40
"""
azure_cogs_key: str = "" #: :meta private:
azure_cogs_endpoint: str = "" #: :meta private:
vision_service: Any #: :meta private:
analysis_options: Any #: :meta private:
name: str = "azure_cognitive_services_image_analysis"
description: str = (
"A wrapper around Azure Cognitive Services Image Analysis. "
"Useful for when you need to analyze images. "
"Input should be a url to an image."
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and endpoint exists in environment."""
azure_cogs_key = get_from_dict_or_env(
values, "azure_cogs_key", "AZURE_COGS_KEY"
)
azure_cogs_endpoint = get_from_dict_or_env(
values, "azure_cogs_endpoint", "AZURE_COGS_ENDPOINT"
)
try:
import azure.ai.vision as sdk
values["vision_service"] = sdk.VisionServiceOptions(
endpoint=azure_cogs_endpoint, key=azure_cogs_key
)
values["analysis_options"] = sdk.ImageAnalysisOptions()
values["analysis_options"].features = (
sdk.ImageAnalysisFeature.CAPTION
| sdk.ImageAnalysisFeature.OBJECTS
| sdk.ImageAnalysisFeature.TAGS
| sdk.ImageAnalysisFeature.TEXT
)
except ImportError:
raise ImportError(
"azure-ai-vision is not installed. "
"Run `pip install azure-ai-vision` to install."
)
return values
def _image_analysis(self, image_path: str) -> Dict:
try:
import azure.ai.vision as sdk
except ImportError:
pass
image_src_type = detect_file_src_type(image_path)
if image_src_type == "local":
vision_source = sdk.VisionSource(filename=image_path)
elif image_src_type == "remote":
vision_source = sdk.VisionSource(url=image_path)
else:
raise ValueError(f"Invalid image path: {image_path}")
image_analyzer = sdk.ImageAnalyzer(
self.vision_service, vision_source, self.analysis_options
)
result = image_analyzer.analyze()
res_dict = {}
if result.reason == sdk.ImageAnalysisResultReason.ANALYZED:
if result.caption is not None:
res_dict["caption"] = result.caption.content
if result.objects is not None:
res_dict["objects"] = [obj.name for obj in result.objects]
if result.tags is not None:
res_dict["tags"] = [tag.name for tag in result.tags]
if result.text is not None:
res_dict["text"] = [line.content for line in result.text.lines]
else:
error_details = sdk.ImageAnalysisErrorDetails.from_result(result)
raise RuntimeError(
f"Image analysis failed.\n"
f"Reason: {error_details.reason}\n"
f"Details: {error_details.message}"
)
return res_dict
def _format_image_analysis_result(self, image_analysis_result: Dict) -> str:
formatted_result = []
if "caption" in image_analysis_result:
formatted_result.append("Caption: " + image_analysis_result["caption"])
if (
"objects" in image_analysis_result
and len(image_analysis_result["objects"]) > 0
):
formatted_result.append(
"Objects: " + ", ".join(image_analysis_result["objects"])
)
if "tags" in image_analysis_result and len(image_analysis_result["tags"]) > 0:
formatted_result.append("Tags: " + ", ".join(image_analysis_result["tags"]))
if "text" in image_analysis_result and len(image_analysis_result["text"]) > 0:
formatted_result.append("Text: " + ", ".join(image_analysis_result["text"]))
return "\n".join(formatted_result)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
try:
image_analysis_result = self._image_analysis(query)
if not image_analysis_result:
return "No good image analysis result was found"
return self._format_image_analysis_result(image_analysis_result)
except Exception as e:
raise RuntimeError(f"Error while running AzureCogsImageAnalysisTool: {e}")
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/brave_search/tool.py | from __future__ import annotations
from typing import Any, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.brave_search import BraveSearchWrapper
class BraveSearch(BaseTool): # type: ignore[override]
"""Tool that queries the BraveSearch."""
name: str = "brave_search"
description: str = (
"a search engine. "
"useful for when you need to answer questions about current events."
" input should be a search query."
)
search_wrapper: BraveSearchWrapper
@classmethod
def from_api_key(
cls, api_key: str, search_kwargs: Optional[dict] = None, **kwargs: Any
) -> BraveSearch:
"""Create a tool from an api key.
Args:
api_key: The api key to use.
search_kwargs: Any additional kwargs to pass to the search wrapper.
**kwargs: Any additional kwargs to pass to the tool.
Returns:
A tool.
"""
wrapper = BraveSearchWrapper(api_key=api_key, search_kwargs=search_kwargs or {})
return cls(search_wrapper=wrapper, **kwargs)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.search_wrapper.run(query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/scenexplain/tool.py | """Tool for the SceneXplain API."""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.scenexplain import SceneXplainAPIWrapper
class SceneXplainInput(BaseModel):
"""Input for SceneXplain."""
query: str = Field(..., description="The link to the image to explain")
class SceneXplainTool(BaseTool): # type: ignore[override]
"""Tool that explains images."""
name: str = "image_explainer"
description: str = (
"An Image Captioning Tool: Use this tool to generate a detailed caption "
"for an image. The input can be an image file of any format, and "
"the output will be a text description that covers every detail of the image."
)
api_wrapper: SceneXplainAPIWrapper = Field(default_factory=SceneXplainAPIWrapper) # type: ignore[arg-type]
def _run(
self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
"""Use the tool."""
return self.api_wrapper.run(query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/scenexplain/__init__.py | """SceneXplain API toolkit."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/clickup/tool.py | """
This tool allows agents to interact with the clickup library
and operate on a Clickup instance.
To use this tool, you must first set as environment variables:
client_secret
client_id
code
Below is a sample script that uses the Clickup tool:
```python
from langchain_community.agent_toolkits.clickup.toolkit import ClickupToolkit
from langchain_community.utilities.clickup import ClickupAPIWrapper
clickup = ClickupAPIWrapper()
toolkit = ClickupToolkit.from_clickup_api_wrapper(clickup)
```
"""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.utilities.clickup import ClickupAPIWrapper
class ClickupAction(BaseTool): # type: ignore[override]
"""Tool that queries the Clickup API."""
api_wrapper: ClickupAPIWrapper = Field(default_factory=ClickupAPIWrapper)
mode: str
name: str = ""
description: str = ""
def _run(
self,
instructions: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Clickup API to run an operation."""
return self.api_wrapper.run(self.mode, instructions)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/clickup/prompt.py | # flake8: noqa
CLICKUP_TASK_CREATE_PROMPT = """
This tool is a wrapper around clickup's create_task API, useful when you need to create a CLICKUP task.
The input to this tool is a dictionary specifying the fields of the CLICKUP task, and will be passed into clickup's CLICKUP `create_task` function.
Only add fields described by the user.
Use the following mapping in order to map the user's priority to the clickup priority: {{
Urgent = 1,
High = 2,
Normal = 3,
Low = 4,
}}. If the user passes in "urgent" replace the priority value as 1.
Here are a few task descriptions and corresponding input examples:
Task: create a task called "Daily report"
Example Input: {{"name": "Daily report"}}
Task: Make an open task called "ClickUp toolkit refactor" with description "Refactor the clickup toolkit to use dataclasses for parsing", with status "open"
Example Input: {{"name": "ClickUp toolkit refactor", "description": "Refactor the clickup toolkit to use dataclasses for parsing", "status": "Open"}}
Task: create a task with priority 3 called "New Task Name" with description "New Task Description", with status "open"
Example Input: {{"name": "New Task Name", "description": "New Task Description", "status": "Open", "priority": 3}}
Task: Add a task called "Bob's task" and assign it to Bob (user id: 81928627)
Example Input: {{"name": "Bob's task", "description": "Task for Bob", "assignees": [81928627]}}
"""
CLICKUP_LIST_CREATE_PROMPT = """
This tool is a wrapper around clickup's create_list API, useful when you need to create a CLICKUP list.
The input to this tool is a dictionary specifying the fields of a clickup list, and will be passed to clickup's create_list function.
Only add fields described by the user.
Use the following mapping in order to map the user's priority to the clickup priority: {{
Urgent = 1,
High = 2,
Normal = 3,
Low = 4,
}}. If the user passes in "urgent" replace the priority value as 1.
Here are a few list descriptions and corresponding input examples:
Description: make a list with name "General List"
Example Input: {{"name": "General List"}}
Description: add a new list ("TODOs") with low priority
Example Input: {{"name": "General List", "priority": 4}}
Description: create a list with name "List name", content "List content", priority 2, and status "red"
Example Input: {{"name": "List name", "content": "List content", "priority": 2, "status": "red"}}
"""
CLICKUP_FOLDER_CREATE_PROMPT = """
This tool is a wrapper around clickup's create_folder API, useful when you need to create a CLICKUP folder.
The input to this tool is a dictionary specifying the fields of a clickup folder, and will be passed to clickup's create_folder function.
For example, to create a folder with name "Folder name" you would pass in the following dictionary:
{{
"name": "Folder name",
}}
"""
CLICKUP_GET_TASK_PROMPT = """
This tool is a wrapper around clickup's API,
Do NOT use to get a task specific attribute. Use get task attribute instead.
useful when you need to get a specific task for the user. Given the task id you want to create a request similar to the following dictionary:
payload = {{"task_id": "86a0t44tq"}}
"""
CLICKUP_GET_TASK_ATTRIBUTE_PROMPT = """
This tool is a wrapper around clickup's API,
useful when you need to get a specific attribute from a task. Given the task id and desired attribute create a request similar to the following dictionary:
payload = {{"task_id": "<task_id_to_update>", "attribute_name": "<attribute_name_to_update>"}}
Here are some example queries their corresponding payloads:
Get the name of task 23jn23kjn -> {{"task_id": "23jn23kjn", "attribute_name": "name"}}
What is the priority of task 86a0t44tq? -> {{"task_id": "86a0t44tq", "attribute_name": "priority"}}
Output the description of task sdc9ds9jc -> {{"task_id": "sdc9ds9jc", "attribute_name": "description"}}
Who is assigned to task bgjfnbfg0 -> {{"task_id": "bgjfnbfg0", "attribute_name": "assignee"}}
Which is the status of task kjnsdcjc? -> {{"task_id": "kjnsdcjc", "attribute_name": "description"}}
How long is the time estimate of task sjncsd999? -> {{"task_id": "sjncsd999", "attribute_name": "time_estimate"}}
Is task jnsd98sd archived?-> {{"task_id": "jnsd98sd", "attribute_name": "archive"}}
"""
CLICKUP_GET_ALL_TEAMS_PROMPT = """
This tool is a wrapper around clickup's API, useful when you need to get all teams that the user is a part of.
To get a list of all the teams there is no necessary request parameters.
"""
CLICKUP_GET_LIST_PROMPT = """
This tool is a wrapper around clickup's API,
useful when you need to get a specific list for the user. Given the list id you want to create a request similar to the following dictionary:
payload = {{"list_id": "901300608424"}}
"""
CLICKUP_GET_FOLDERS_PROMPT = """
This tool is a wrapper around clickup's API,
useful when you need to get a specific folder for the user. Given the user's workspace id you want to create a request similar to the following dictionary:
payload = {{"folder_id": "90130119692"}}
"""
CLICKUP_GET_SPACES_PROMPT = """
This tool is a wrapper around clickup's API,
useful when you need to get all the spaces available to a user. Given the user's workspace id you want to create a request similar to the following dictionary:
payload = {{"team_id": "90130119692"}}
"""
CLICKUP_GET_SPACES_PROMPT = """
This tool is a wrapper around clickup's API,
useful when you need to get all the spaces available to a user. Given the user's workspace id you want to create a request similar to the following dictionary:
payload = {{"team_id": "90130119692"}}
"""
CLICKUP_UPDATE_TASK_PROMPT = """
This tool is a wrapper around clickup's API,
useful when you need to update a specific attribute of a task. Given the task id, desired attribute to change and the new value you want to create a request similar to the following dictionary:
payload = {{"task_id": "<task_id_to_update>", "attribute_name": "<attribute_name_to_update>", "value": "<value_to_update_to>"}}
Here are some example queries their corresponding payloads:
Change the name of task 23jn23kjn to new task name -> {{"task_id": "23jn23kjn", "attribute_name": "name", "value": "new task name"}}
Update the priority of task 86a0t44tq to 1 -> {{"task_id": "86a0t44tq", "attribute_name": "priority", "value": 1}}
Re-write the description of task sdc9ds9jc to 'a new task description' -> {{"task_id": "sdc9ds9jc", "attribute_name": "description", "value": "a new task description"}}
Forward the status of task kjnsdcjc to done -> {{"task_id": "kjnsdcjc", "attribute_name": "description", "status": "done"}}
Increase the time estimate of task sjncsd999 to 3h -> {{"task_id": "sjncsd999", "attribute_name": "time_estimate", "value": 8000}}
Archive task jnsd98sd -> {{"task_id": "jnsd98sd", "attribute_name": "archive", "value": true}}
*IMPORTANT*: Pay attention to the exact syntax above and the correct use of quotes.
For changing priority and time estimates, we expect integers (int).
For name, description and status we expect strings (str).
For archive, we expect a boolean (bool).
"""
CLICKUP_UPDATE_TASK_ASSIGNEE_PROMPT = """
This tool is a wrapper around clickup's API,
useful when you need to update the assignees of a task. Given the task id, the operation add or remove (rem), and the list of user ids. You want to create a request similar to the following dictionary:
payload = {{"task_id": "<task_id_to_update>", "operation": "<operation, (add or rem)>", "users": [<user_id_1>, <user_id_2>]}}
Here are some example queries their corresponding payloads:
Add 81928627 and 3987234 as assignees to task 21hw21jn -> {{"task_id": "21hw21jn", "operation": "add", "users": [81928627, 3987234]}}
Remove 67823487 as assignee from task jin34ji4 -> {{"task_id": "jin34ji4", "operation": "rem", "users": [67823487]}}
*IMPORTANT*: Users id should always be ints.
"""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/mojeek_search/tool.py | from __future__ import annotations
from typing import Any, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from langchain_community.utilities.mojeek_search import MojeekSearchAPIWrapper
class MojeekSearch(BaseTool): # type: ignore[override]
name: str = "mojeek_search"
description: str = (
"A wrapper around Mojeek Search. "
"Useful for when you need to web search results. "
"Input should be a search query."
)
api_wrapper: MojeekSearchAPIWrapper
@classmethod
def config(
cls, api_key: str, search_kwargs: Optional[dict] = None, **kwargs: Any
) -> MojeekSearch:
wrapper = MojeekSearchAPIWrapper(
api_key=api_key, search_kwargs=search_kwargs or {}
)
return cls(api_wrapper=wrapper, **kwargs)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
return self.api_wrapper.run(query)
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("MojeekSearch does not support async")
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/google_lens/tool.py | """Tool for the Google Lens"""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.google_lens import GoogleLensAPIWrapper
class GoogleLensQueryRun(BaseTool): # type: ignore[override]
"""Tool that queries the Google Lens API."""
name: str = "google_lens"
description: str = (
"A wrapper around Google Lens Search. "
"Useful for when you need to get information related"
"to an image from Google Lens"
"Input should be a url to an image."
)
api_wrapper: GoogleLensAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.api_wrapper.run(query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/google_lens/__init__.py | """Google Lens API Toolkit."""
from langchain_community.tools.google_lens.tool import GoogleLensQueryRun
__all__ = ["GoogleLensQueryRun"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/searx_search/tool.py | """Tool for the SearxNG search API."""
from typing import Optional, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel, ConfigDict, Field
from langchain_community.utilities.searx_search import SearxSearchWrapper
class SearxSearchQueryInput(BaseModel):
"""Input for the SearxSearch tool."""
query: str = Field(description="query to look up on searx")
class SearxSearchRun(BaseTool): # type: ignore[override, override]
"""Tool that queries a Searx instance."""
name: str = "searx_search"
description: str = (
"A meta search engine."
"Useful for when you need to answer questions about current events."
"Input should be a search query."
)
wrapper: SearxSearchWrapper
kwargs: dict = Field(default_factory=dict)
args_schema: Type[BaseModel] = SearxSearchQueryInput
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.wrapper.run(query, **self.kwargs)
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
return await self.wrapper.arun(query, **self.kwargs)
class SearxSearchResults(BaseTool): # type: ignore[override, override]
"""Tool that queries a Searx instance and gets back json."""
name: str = "searx_search_results"
description: str = (
"A meta search engine."
"Useful for when you need to answer questions about current events."
"Input should be a search query. Output is a JSON array of the query results"
)
wrapper: SearxSearchWrapper
num_results: int = 4
kwargs: dict = Field(default_factory=dict)
args_schema: Type[BaseModel] = SearxSearchQueryInput
model_config = ConfigDict(
extra="allow",
)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return str(self.wrapper.results(query, self.num_results, **self.kwargs))
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
return (
await self.wrapper.aresults(query, self.num_results, **self.kwargs)
).__str__()
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/requests/tool.py | # flake8: noqa
"""Tools for making requests to an API endpoint."""
import json
from typing import Any, Dict, Optional, Union
from pydantic import BaseModel
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_community.utilities.requests import GenericRequestsWrapper
from langchain_core.tools import BaseTool
def _parse_input(text: str) -> Dict[str, Any]:
"""Parse the json string into a dict."""
return json.loads(text)
def _clean_url(url: str) -> str:
"""Strips quotes from the url."""
return url.strip("\"'")
class BaseRequestsTool(BaseModel):
"""Base class for requests tools."""
requests_wrapper: GenericRequestsWrapper
allow_dangerous_requests: bool = False
def __init__(self, **kwargs: Any):
"""Initialize the tool."""
if not kwargs.get("allow_dangerous_requests", False):
raise ValueError(
"You must set allow_dangerous_requests to True to use this tool. "
"Requests can be dangerous and can lead to security vulnerabilities. "
"For example, users can ask a server to make a request to an internal "
"server. It's recommended to use requests through a proxy server "
"and avoid accepting inputs from untrusted sources without proper "
"sandboxing."
"Please see: https://python.langchain.com/docs/security for "
"further security information."
)
super().__init__(**kwargs)
class RequestsGetTool(BaseRequestsTool, BaseTool): # type: ignore[override]
"""Tool for making a GET request to an API endpoint."""
name: str = "requests_get"
description: str = """A portal to the internet. Use this when you need to get specific
content from a website. Input should be a url (i.e. https://www.google.com).
The output will be the text response of the GET request.
"""
def _run(
self, url: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> Union[str, Dict[str, Any]]:
"""Run the tool."""
return self.requests_wrapper.get(_clean_url(url))
async def _arun(
self,
url: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> Union[str, Dict[str, Any]]:
"""Run the tool asynchronously."""
return await self.requests_wrapper.aget(_clean_url(url))
class RequestsPostTool(BaseRequestsTool, BaseTool): # type: ignore[override]
"""Tool for making a POST request to an API endpoint."""
name: str = "requests_post"
description: str = """Use this when you want to POST to a website.
Input should be a json string with two keys: "url" and "data".
The value of "url" should be a string, and the value of "data" should be a dictionary of
key-value pairs you want to POST to the url.
Be careful to always use double quotes for strings in the json string
The output will be the text response of the POST request.
"""
def _run(
self, text: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> Union[str, Dict[str, Any]]:
"""Run the tool."""
try:
data = _parse_input(text)
return self.requests_wrapper.post(_clean_url(data["url"]), data["data"])
except Exception as e:
return repr(e)
async def _arun(
self,
text: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> Union[str, Dict[str, Any]]:
"""Run the tool asynchronously."""
try:
data = _parse_input(text)
return await self.requests_wrapper.apost(
_clean_url(data["url"]), data["data"]
)
except Exception as e:
return repr(e)
class RequestsPatchTool(BaseRequestsTool, BaseTool): # type: ignore[override]
"""Tool for making a PATCH request to an API endpoint."""
name: str = "requests_patch"
description: str = """Use this when you want to PATCH to a website.
Input should be a json string with two keys: "url" and "data".
The value of "url" should be a string, and the value of "data" should be a dictionary of
key-value pairs you want to PATCH to the url.
Be careful to always use double quotes for strings in the json string
The output will be the text response of the PATCH request.
"""
def _run(
self, text: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> Union[str, Dict[str, Any]]:
"""Run the tool."""
try:
data = _parse_input(text)
return self.requests_wrapper.patch(_clean_url(data["url"]), data["data"])
except Exception as e:
return repr(e)
async def _arun(
self,
text: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> Union[str, Dict[str, Any]]:
"""Run the tool asynchronously."""
try:
data = _parse_input(text)
return await self.requests_wrapper.apatch(
_clean_url(data["url"]), data["data"]
)
except Exception as e:
return repr(e)
class RequestsPutTool(BaseRequestsTool, BaseTool): # type: ignore[override]
"""Tool for making a PUT request to an API endpoint."""
name: str = "requests_put"
description: str = """Use this when you want to PUT to a website.
Input should be a json string with two keys: "url" and "data".
The value of "url" should be a string, and the value of "data" should be a dictionary of
key-value pairs you want to PUT to the url.
Be careful to always use double quotes for strings in the json string.
The output will be the text response of the PUT request.
"""
def _run(
self, text: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> Union[str, Dict[str, Any]]:
"""Run the tool."""
try:
data = _parse_input(text)
return self.requests_wrapper.put(_clean_url(data["url"]), data["data"])
except Exception as e:
return repr(e)
async def _arun(
self,
text: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> Union[str, Dict[str, Any]]:
"""Run the tool asynchronously."""
try:
data = _parse_input(text)
return await self.requests_wrapper.aput(
_clean_url(data["url"]), data["data"]
)
except Exception as e:
return repr(e)
class RequestsDeleteTool(BaseRequestsTool, BaseTool): # type: ignore[override]
"""Tool for making a DELETE request to an API endpoint."""
name: str = "requests_delete"
description: str = """A portal to the internet.
Use this when you need to make a DELETE request to a URL.
Input should be a specific url, and the output will be the text
response of the DELETE request.
"""
def _run(
self,
url: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> Union[str, Dict[str, Any]]:
"""Run the tool."""
return self.requests_wrapper.delete(_clean_url(url))
async def _arun(
self,
url: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> Union[str, Dict[str, Any]]:
"""Run the tool asynchronously."""
return await self.requests_wrapper.adelete(_clean_url(url))
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/requests/__init__.py | """Tools for making requests to an API endpoint."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/human/tool.py | """Tool for asking human input."""
from typing import Callable, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field
def _print_func(text: str) -> None:
print("\n") # noqa: T201
print(text) # noqa: T201
class HumanInputRun(BaseTool): # type: ignore[override]
"""Tool that asks user for input."""
name: str = "human"
description: str = (
"You can ask a human for guidance when you think you "
"got stuck or you are not sure what to do next. "
"The input should be a question for the human."
)
prompt_func: Callable[[str], None] = Field(default_factory=lambda: _print_func)
input_func: Callable = Field(default_factory=lambda: input)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Human input tool."""
self.prompt_func(query)
return self.input_func()
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/human/__init__.py | """Tool for asking for human input."""
from langchain_community.tools.human.tool import HumanInputRun
__all__ = ["HumanInputRun"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools/openapi | lc_public_repos/langchain/libs/community/langchain_community/tools/openapi/utils/openapi_utils.py | """Utility functions for parsing an OpenAPI spec. Kept for backwards compat."""
from langchain_community.utilities.openapi import HTTPVerb, OpenAPISpec
__all__ = ["HTTPVerb", "OpenAPISpec"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools/openapi | lc_public_repos/langchain/libs/community/langchain_community/tools/openapi/utils/api_models.py | """Pydantic models for parsing an OpenAPI spec."""
from __future__ import annotations
import logging
from enum import Enum
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
)
from pydantic import BaseModel, Field
from langchain_community.tools.openapi.utils.openapi_utils import HTTPVerb, OpenAPISpec
logger = logging.getLogger(__name__)
PRIMITIVE_TYPES = {
"integer": int,
"number": float,
"string": str,
"boolean": bool,
"array": List,
"object": Dict,
"null": None,
}
# See https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#parameterIn
# for more info.
class APIPropertyLocation(Enum):
"""The location of the property."""
QUERY = "query"
PATH = "path"
HEADER = "header"
COOKIE = "cookie" # Not yet supported
@classmethod
def from_str(cls, location: str) -> "APIPropertyLocation":
"""Parse an APIPropertyLocation."""
try:
return cls(location)
except ValueError:
raise ValueError(
f"Invalid APIPropertyLocation. Valid values are {cls.__members__}"
)
_SUPPORTED_MEDIA_TYPES = ("application/json",)
SUPPORTED_LOCATIONS = {
APIPropertyLocation.HEADER,
APIPropertyLocation.QUERY,
APIPropertyLocation.PATH,
}
INVALID_LOCATION_TEMPL = (
'Unsupported APIPropertyLocation "{location}"'
" for parameter {name}. "
+ f"Valid values are {[loc.value for loc in SUPPORTED_LOCATIONS]}"
)
SCHEMA_TYPE = Union[str, Type, tuple, None, Enum]
class APIPropertyBase(BaseModel):
"""Base model for an API property."""
# The name of the parameter is required and is case-sensitive.
# If "in" is "path", the "name" field must correspond to a template expression
# within the path field in the Paths Object.
# If "in" is "header" and the "name" field is "Accept", "Content-Type",
# or "Authorization", the parameter definition is ignored.
# For all other cases, the "name" corresponds to the parameter
# name used by the "in" property.
name: str = Field(alias="name")
"""The name of the property."""
required: bool = Field(alias="required")
"""Whether the property is required."""
type: SCHEMA_TYPE = Field(alias="type")
"""The type of the property.
Either a primitive type, a component/parameter type,
or an array or 'object' (dict) of the above."""
default: Optional[Any] = Field(alias="default", default=None)
"""The default value of the property."""
description: Optional[str] = Field(alias="description", default=None)
"""The description of the property."""
if TYPE_CHECKING:
from openapi_pydantic import (
MediaType,
Parameter,
RequestBody,
Schema,
)
class APIProperty(APIPropertyBase):
"""A model for a property in the query, path, header, or cookie params."""
location: APIPropertyLocation = Field(alias="location")
"""The path/how it's being passed to the endpoint."""
@staticmethod
def _cast_schema_list_type(
schema: Schema,
) -> Optional[Union[str, Tuple[str, ...]]]:
type_ = schema.type
if not isinstance(type_, list):
return type_
else:
return tuple(type_)
@staticmethod
def _get_schema_type_for_enum(parameter: Parameter, schema: Schema) -> Enum:
"""Get the schema type when the parameter is an enum."""
param_name = f"{parameter.name}Enum"
return Enum(param_name, {str(v): v for v in schema.enum})
@staticmethod
def _get_schema_type_for_array(
schema: Schema,
) -> Optional[Union[str, Tuple[str, ...]]]:
from openapi_pydantic import (
Reference,
Schema,
)
items = schema.items
if isinstance(items, Schema):
schema_type = APIProperty._cast_schema_list_type(items)
elif isinstance(items, Reference):
ref_name = items.ref.split("/")[-1]
schema_type = ref_name # TODO: Add ref definitions to make his valid
else:
raise ValueError(f"Unsupported array items: {items}")
if isinstance(schema_type, str):
# TODO: recurse
schema_type = (schema_type,)
return schema_type
@staticmethod
def _get_schema_type(parameter: Parameter, schema: Optional[Schema]) -> SCHEMA_TYPE:
if schema is None:
return None
schema_type: SCHEMA_TYPE = APIProperty._cast_schema_list_type(schema)
if schema_type == "array":
schema_type = APIProperty._get_schema_type_for_array(schema)
elif schema_type == "object":
# TODO: Resolve array and object types to components.
raise NotImplementedError("Objects not yet supported")
elif schema_type in PRIMITIVE_TYPES:
if schema.enum:
schema_type = APIProperty._get_schema_type_for_enum(parameter, schema)
else:
# Directly use the primitive type
pass
else:
raise NotImplementedError(f"Unsupported type: {schema_type}")
return schema_type
@staticmethod
def _validate_location(location: APIPropertyLocation, name: str) -> None:
if location not in SUPPORTED_LOCATIONS:
raise NotImplementedError(
INVALID_LOCATION_TEMPL.format(location=location, name=name)
)
@staticmethod
def _validate_content(content: Optional[Dict[str, MediaType]]) -> None:
if content:
raise ValueError(
"API Properties with media content not supported. "
"Media content only supported within APIRequestBodyProperty's"
)
@staticmethod
def _get_schema(parameter: Parameter, spec: OpenAPISpec) -> Optional[Schema]:
from openapi_pydantic import (
Reference,
Schema,
)
schema = parameter.param_schema
if isinstance(schema, Reference):
schema = spec.get_referenced_schema(schema)
elif schema is None:
return None
elif not isinstance(schema, Schema):
raise ValueError(f"Error dereferencing schema: {schema}")
return schema
@staticmethod
def is_supported_location(location: str) -> bool:
"""Return whether the provided location is supported."""
try:
return APIPropertyLocation.from_str(location) in SUPPORTED_LOCATIONS
except ValueError:
return False
@classmethod
def from_parameter(cls, parameter: Parameter, spec: OpenAPISpec) -> "APIProperty":
"""Instantiate from an OpenAPI Parameter."""
location = APIPropertyLocation.from_str(parameter.param_in)
cls._validate_location(
location,
parameter.name,
)
cls._validate_content(parameter.content)
schema = cls._get_schema(parameter, spec)
schema_type = cls._get_schema_type(parameter, schema)
default_val = schema.default if schema is not None else None
return cls(
name=parameter.name,
location=location,
default=default_val,
description=parameter.description,
required=parameter.required,
type=schema_type,
)
class APIRequestBodyProperty(APIPropertyBase):
"""A model for a request body property."""
properties: List["APIRequestBodyProperty"] = Field(alias="properties")
"""The sub-properties of the property."""
# This is useful for handling nested property cycles.
# We can define separate types in that case.
references_used: List[str] = Field(alias="references_used")
"""The references used by the property."""
@classmethod
def _process_object_schema(
cls, schema: Schema, spec: OpenAPISpec, references_used: List[str]
) -> Tuple[Union[str, List[str], None], List["APIRequestBodyProperty"]]:
from openapi_pydantic import (
Reference,
)
properties = []
required_props = schema.required or []
if schema.properties is None:
raise ValueError(
f"No properties found when processing object schema: {schema}"
)
for prop_name, prop_schema in schema.properties.items():
if isinstance(prop_schema, Reference):
ref_name = prop_schema.ref.split("/")[-1]
if ref_name not in references_used:
references_used.append(ref_name)
prop_schema = spec.get_referenced_schema(prop_schema)
else:
continue
properties.append(
cls.from_schema(
schema=prop_schema,
name=prop_name,
required=prop_name in required_props,
spec=spec,
references_used=references_used,
)
)
return schema.type, properties
@classmethod
def _process_array_schema(
cls,
schema: Schema,
name: str,
spec: OpenAPISpec,
references_used: List[str],
) -> str:
from openapi_pydantic import Reference, Schema
items = schema.items
if items is not None:
if isinstance(items, Reference):
ref_name = items.ref.split("/")[-1]
if ref_name not in references_used:
references_used.append(ref_name)
items = spec.get_referenced_schema(items)
else:
pass
return f"Array<{ref_name}>"
else:
pass
if isinstance(items, Schema):
array_type = cls.from_schema(
schema=items,
name=f"{name}Item",
required=True, # TODO: Add required
spec=spec,
references_used=references_used,
)
return f"Array<{array_type.type}>"
return "array"
@classmethod
def from_schema(
cls,
schema: Schema,
name: str,
required: bool,
spec: OpenAPISpec,
references_used: Optional[List[str]] = None,
) -> "APIRequestBodyProperty":
"""Recursively populate from an OpenAPI Schema."""
if references_used is None:
references_used = []
schema_type = schema.type
properties: List[APIRequestBodyProperty] = []
if schema_type == "object" and schema.properties:
schema_type, properties = cls._process_object_schema(
schema, spec, references_used
)
elif schema_type == "array":
schema_type = cls._process_array_schema(schema, name, spec, references_used)
elif schema_type in PRIMITIVE_TYPES:
# Use the primitive type directly
pass
elif schema_type is None:
# No typing specified/parsed. WIll map to 'any'
pass
else:
raise ValueError(f"Unsupported type: {schema_type}")
return cls(
name=name,
required=required,
type=schema_type,
default=schema.default,
description=schema.description,
properties=properties,
references_used=references_used,
)
# class APIRequestBodyProperty(APIPropertyBase):
class APIRequestBody(BaseModel):
"""A model for a request body."""
description: Optional[str] = Field(alias="description")
"""The description of the request body."""
properties: List[APIRequestBodyProperty] = Field(alias="properties")
# E.g., application/json - we only support JSON at the moment.
media_type: str = Field(alias="media_type")
"""The media type of the request body."""
@classmethod
def _process_supported_media_type(
cls,
media_type_obj: MediaType,
spec: OpenAPISpec,
) -> List[APIRequestBodyProperty]:
"""Process the media type of the request body."""
from openapi_pydantic import Reference
references_used = []
schema = media_type_obj.media_type_schema
if isinstance(schema, Reference):
references_used.append(schema.ref.split("/")[-1])
schema = spec.get_referenced_schema(schema)
if schema is None:
raise ValueError(
f"Could not resolve schema for media type: {media_type_obj}"
)
api_request_body_properties = []
required_properties = schema.required or []
if schema.type == "object" and schema.properties:
for prop_name, prop_schema in schema.properties.items():
if isinstance(prop_schema, Reference):
prop_schema = spec.get_referenced_schema(prop_schema)
api_request_body_properties.append(
APIRequestBodyProperty.from_schema(
schema=prop_schema,
name=prop_name,
required=prop_name in required_properties,
spec=spec,
)
)
else:
api_request_body_properties.append(
APIRequestBodyProperty(
name="body",
required=True,
type=schema.type,
default=schema.default,
description=schema.description,
properties=[],
references_used=references_used,
)
)
return api_request_body_properties
@classmethod
def from_request_body(
cls, request_body: RequestBody, spec: OpenAPISpec
) -> "APIRequestBody":
"""Instantiate from an OpenAPI RequestBody."""
properties = []
for media_type, media_type_obj in request_body.content.items():
if media_type not in _SUPPORTED_MEDIA_TYPES:
continue
api_request_body_properties = cls._process_supported_media_type(
media_type_obj,
spec,
)
properties.extend(api_request_body_properties)
return cls(
description=request_body.description,
properties=properties,
media_type=media_type,
)
# class APIRequestBodyProperty(APIPropertyBase):
# class APIRequestBody(BaseModel):
class APIOperation(BaseModel):
"""A model for a single API operation."""
operation_id: str = Field(alias="operation_id")
"""The unique identifier of the operation."""
description: Optional[str] = Field(alias="description")
"""The description of the operation."""
base_url: str = Field(alias="base_url")
"""The base URL of the operation."""
path: str = Field(alias="path")
"""The path of the operation."""
method: HTTPVerb = Field(alias="method")
"""The HTTP method of the operation."""
properties: Sequence[APIProperty] = Field(alias="properties")
# TODO: Add parse in used components to be able to specify what type of
# referenced object it is.
# """The properties of the operation."""
# components: Dict[str, BaseModel] = Field(alias="components")
request_body: Optional[APIRequestBody] = Field(alias="request_body")
"""The request body of the operation."""
@staticmethod
def _get_properties_from_parameters(
parameters: List[Parameter], spec: OpenAPISpec
) -> List[APIProperty]:
"""Get the properties of the operation."""
properties = []
for param in parameters:
if APIProperty.is_supported_location(param.param_in):
properties.append(APIProperty.from_parameter(param, spec))
elif param.required:
raise ValueError(
INVALID_LOCATION_TEMPL.format(
location=param.param_in, name=param.name
)
)
else:
logger.warning(
INVALID_LOCATION_TEMPL.format(
location=param.param_in, name=param.name
)
+ " Ignoring optional parameter"
)
pass
return properties
@classmethod
def from_openapi_url(
cls,
spec_url: str,
path: str,
method: str,
) -> "APIOperation":
"""Create an APIOperation from an OpenAPI URL."""
spec = OpenAPISpec.from_url(spec_url)
return cls.from_openapi_spec(spec, path, method)
@classmethod
def from_openapi_spec(
cls,
spec: OpenAPISpec,
path: str,
method: str,
) -> "APIOperation":
"""Create an APIOperation from an OpenAPI spec."""
operation = spec.get_operation(path, method)
parameters = spec.get_parameters_for_operation(operation)
properties = cls._get_properties_from_parameters(parameters, spec)
operation_id = OpenAPISpec.get_cleaned_operation_id(operation, path, method)
request_body = spec.get_request_body_for_operation(operation)
api_request_body = (
APIRequestBody.from_request_body(request_body, spec)
if request_body is not None
else None
)
description = operation.description or operation.summary
if not description and spec.paths is not None:
description = spec.paths[path].description or spec.paths[path].summary
return cls(
operation_id=operation_id,
description=description or "",
base_url=spec.base_url,
path=path,
method=method, # type: ignore[arg-type]
properties=properties,
request_body=api_request_body,
)
@staticmethod
def ts_type_from_python(type_: SCHEMA_TYPE) -> str:
if type_ is None:
# TODO: Handle Nones better. These often result when
# parsing specs that are < v3
return "any"
elif isinstance(type_, str):
return {
"str": "string",
"integer": "number",
"float": "number",
"date-time": "string",
}.get(type_, type_)
elif isinstance(type_, tuple):
return f"Array<{APIOperation.ts_type_from_python(type_[0])}>"
elif isinstance(type_, type) and issubclass(type_, Enum):
return " | ".join([f"'{e.value}'" for e in type_])
else:
return str(type_)
def _format_nested_properties(
self, properties: List[APIRequestBodyProperty], indent: int = 2
) -> str:
"""Format nested properties."""
formatted_props = []
for prop in properties:
prop_name = prop.name
prop_type = self.ts_type_from_python(prop.type)
prop_required = "" if prop.required else "?"
prop_desc = f"/* {prop.description} */" if prop.description else ""
if prop.properties:
nested_props = self._format_nested_properties(
prop.properties, indent + 2
)
prop_type = f"{{\n{nested_props}\n{' ' * indent}}}"
formatted_props.append(
f"{prop_desc}\n{' ' * indent}{prop_name}"
f"{prop_required}: {prop_type},"
)
return "\n".join(formatted_props)
def to_typescript(self) -> str:
"""Get typescript string representation of the operation."""
operation_name = self.operation_id
params = []
if self.request_body:
formatted_request_body_props = self._format_nested_properties(
self.request_body.properties
)
params.append(formatted_request_body_props)
for prop in self.properties:
prop_name = prop.name
prop_type = self.ts_type_from_python(prop.type)
prop_required = "" if prop.required else "?"
prop_desc = f"/* {prop.description} */" if prop.description else ""
params.append(f"{prop_desc}\n\t\t{prop_name}{prop_required}: {prop_type},")
formatted_params = "\n".join(params).strip()
description_str = f"/* {self.description} */" if self.description else ""
typescript_definition = f"""
{description_str}
type {operation_name} = (_: {{
{formatted_params}
}}) => any;
"""
return typescript_definition.strip()
@property
def query_params(self) -> List[str]:
return [
property.name
for property in self.properties
if property.location == APIPropertyLocation.QUERY
]
@property
def path_params(self) -> List[str]:
return [
property.name
for property in self.properties
if property.location == APIPropertyLocation.PATH
]
@property
def body_params(self) -> List[str]:
if self.request_body is None:
return []
return [prop.name for prop in self.request_body.properties]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/eleven_labs/models.py | from enum import Enum
class ElevenLabsModel(str, Enum):
"""Models available for Eleven Labs Text2Speech."""
MULTI_LINGUAL = "eleven_multilingual_v1"
MONO_LINGUAL = "eleven_monolingual_v1"
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/eleven_labs/text2speech.py | import tempfile
from enum import Enum
from typing import Any, Dict, Optional, Union
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_core.utils import get_from_dict_or_env
from pydantic import model_validator
def _import_elevenlabs() -> Any:
try:
import elevenlabs
except ImportError as e:
raise ImportError(
"Cannot import elevenlabs, please install `pip install elevenlabs`."
) from e
return elevenlabs
class ElevenLabsModel(str, Enum):
"""Models available for Eleven Labs Text2Speech."""
MULTI_LINGUAL = "eleven_multilingual_v1"
MONO_LINGUAL = "eleven_monolingual_v1"
class ElevenLabsText2SpeechTool(BaseTool): # type: ignore[override]
"""Tool that queries the Eleven Labs Text2Speech API.
In order to set this up, follow instructions at:
https://docs.elevenlabs.io/welcome/introduction
"""
model: Union[ElevenLabsModel, str] = ElevenLabsModel.MULTI_LINGUAL
name: str = "eleven_labs_text2speech"
description: str = (
"A wrapper around Eleven Labs Text2Speech. "
"Useful for when you need to convert text to speech. "
"It supports multiple languages, including English, German, Polish, "
"Spanish, Italian, French, Portuguese, and Hindi. "
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key exists in environment."""
_ = get_from_dict_or_env(values, "eleven_api_key", "ELEVEN_API_KEY")
return values
def _run(
self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
"""Use the tool."""
elevenlabs = _import_elevenlabs()
try:
speech = elevenlabs.generate(text=query, model=self.model)
with tempfile.NamedTemporaryFile(
mode="bx", suffix=".wav", delete=False
) as f:
f.write(speech)
return f.name
except Exception as e:
raise RuntimeError(f"Error while running ElevenLabsText2SpeechTool: {e}")
def play(self, speech_file: str) -> None:
"""Play the text as speech."""
elevenlabs = _import_elevenlabs()
with open(speech_file, mode="rb") as f:
speech = f.read()
elevenlabs.play(speech)
def stream_speech(self, query: str) -> None:
"""Stream the text as speech as it is generated.
Play the text in your speakers."""
elevenlabs = _import_elevenlabs()
speech_stream = elevenlabs.generate(text=query, model=self.model, stream=True)
elevenlabs.stream(speech_stream)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/eleven_labs/__init__.py | """Eleven Labs Services Tools."""
from langchain_community.tools.eleven_labs.text2speech import ElevenLabsText2SpeechTool
__all__ = ["ElevenLabsText2SpeechTool"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/tavily_search/tool.py | """Tool for the Tavily search API."""
from typing import Dict, List, Literal, Optional, Tuple, Type, Union
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.tavily_search import TavilySearchAPIWrapper
class TavilyInput(BaseModel):
"""Input for the Tavily tool."""
query: str = Field(description="search query to look up")
class TavilySearchResults(BaseTool): # type: ignore[override, override]
"""Tool that queries the Tavily Search API and gets back json.
Setup:
Install ``langchain-openai`` and ``tavily-python``, and set environment variable ``TAVILY_API_KEY``.
.. code-block:: bash
pip install -U langchain-community tavily-python
export TAVILY_API_KEY="your-api-key"
Instantiate:
.. code-block:: python
from langchain_community.tools import TavilySearchResults
tool = TavilySearchResults(
max_results=5,
include_answer=True,
include_raw_content=True,
include_images=True,
# search_depth="advanced",
# include_domains = []
# exclude_domains = []
)
Invoke directly with args:
.. code-block:: python
tool.invoke({'query': 'who won the last french open'})
.. code-block:: python
'{\n "url": "https://www.nytimes.com...", "content": "Novak Djokovic won the last French Open by beating Casper Ruud ...'
Invoke with tool call:
.. code-block:: python
tool.invoke({"args": {'query': 'who won the last french open'}, "type": "tool_call", "id": "foo", "name": "tavily"})
.. code-block:: python
ToolMessage(
content='{\n "url": "https://www.nytimes.com...", "content": "Novak Djokovic won the last French Open by beating Casper Ruud ...',
artifact={
'query': 'who won the last french open',
'follow_up_questions': None,
'answer': 'Novak ...',
'images': [
'https://www.amny.com/wp-content/uploads/2023/06/AP23162622181176-1200x800.jpg',
...
],
'results': [
{
'title': 'Djokovic ...',
'url': 'https://www.nytimes.com...',
'content': "Novak...",
'score': 0.99505633,
'raw_content': 'Tennis\nNovak ...'
},
...
],
'response_time': 2.92
},
tool_call_id='1',
name='tavily_search_results_json',
)
""" # noqa: E501
name: str = "tavily_search_results_json"
description: str = (
"A search engine optimized for comprehensive, accurate, and trusted results. "
"Useful for when you need to answer questions about current events. "
"Input should be a search query."
)
args_schema: Type[BaseModel] = TavilyInput
"""The tool response format."""
max_results: int = 5
"""Max search results to return, default is 5"""
search_depth: str = "advanced"
"""The depth of the search. It can be "basic" or "advanced"
.. versionadded:: 0.2.5
"""
include_domains: List[str] = []
"""A list of domains to specifically include in the search results.
Default is None, which includes all domains.
.. versionadded:: 0.2.5
"""
exclude_domains: List[str] = []
"""A list of domains to specifically exclude from the search results.
Default is None, which doesn't exclude any domains.
.. versionadded:: 0.2.5
"""
include_answer: bool = False
"""Include a short answer to original query in the search results.
Default is False.
.. versionadded:: 0.2.5
"""
include_raw_content: bool = False
"""Include cleaned and parsed HTML of each site search results.
Default is False.
.. versionadded:: 0.2.5
"""
include_images: bool = False
"""Include a list of query related images in the response.
Default is False.
.. versionadded:: 0.2.5
"""
api_wrapper: TavilySearchAPIWrapper = Field(default_factory=TavilySearchAPIWrapper) # type: ignore[arg-type]
response_format: Literal["content_and_artifact"] = "content_and_artifact"
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> Tuple[Union[List[Dict[str, str]], str], Dict]:
"""Use the tool."""
# TODO: remove try/except, should be handled by BaseTool
try:
raw_results = self.api_wrapper.raw_results(
query,
self.max_results,
self.search_depth,
self.include_domains,
self.exclude_domains,
self.include_answer,
self.include_raw_content,
self.include_images,
)
except Exception as e:
return repr(e), {}
return self.api_wrapper.clean_results(raw_results["results"]), raw_results
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> Tuple[Union[List[Dict[str, str]], str], Dict]:
"""Use the tool asynchronously."""
try:
raw_results = await self.api_wrapper.raw_results_async(
query,
self.max_results,
self.search_depth,
self.include_domains,
self.exclude_domains,
self.include_answer,
self.include_raw_content,
self.include_images,
)
except Exception as e:
return repr(e), {}
return self.api_wrapper.clean_results(raw_results["results"]), raw_results
class TavilyAnswer(BaseTool): # type: ignore[override, override]
"""Tool that queries the Tavily Search API and gets back an answer."""
name: str = "tavily_answer"
description: str = (
"A search engine optimized for comprehensive, accurate, and trusted results. "
"Useful for when you need to answer questions about current events. "
"Input should be a search query. "
"This returns only the answer - not the original source data."
)
api_wrapper: TavilySearchAPIWrapper = Field(default_factory=TavilySearchAPIWrapper) # type: ignore[arg-type]
args_schema: Type[BaseModel] = TavilyInput
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> Union[List[Dict], str]:
"""Use the tool."""
try:
return self.api_wrapper.raw_results(
query,
max_results=5,
include_answer=True,
search_depth="basic",
)["answer"]
except Exception as e:
return repr(e)
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> Union[List[Dict], str]:
"""Use the tool asynchronously."""
try:
result = await self.api_wrapper.raw_results_async(
query,
max_results=5,
include_answer=True,
search_depth="basic",
)
return result["answer"]
except Exception as e:
return repr(e)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/tavily_search/__init__.py | """Tavily Search API toolkit."""
from langchain_community.tools.tavily_search.tool import (
TavilyAnswer,
TavilySearchResults,
)
__all__ = ["TavilySearchResults", "TavilyAnswer"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/azure_ai_services/text_to_speech.py | from __future__ import annotations
import logging
import tempfile
from typing import Any, Dict, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_core.utils import get_from_dict_or_env
from pydantic import model_validator
logger = logging.getLogger(__name__)
class AzureAiServicesTextToSpeechTool(BaseTool): # type: ignore[override]
"""Tool that queries the Azure AI Services Text to Speech API.
In order to set this up, follow instructions at:
https://learn.microsoft.com/en-us/azure/ai-services/speech-service/get-started-text-to-speech?pivots=programming-language-python
"""
name: str = "azure_ai_services_text_to_speech"
description: str = (
"A wrapper around Azure AI Services Text to Speech API. "
"Useful for when you need to convert text to speech. "
)
return_direct: bool = True
azure_ai_services_key: str = "" #: :meta private:
azure_ai_services_region: str = "" #: :meta private:
speech_language: str = "en-US" #: :meta private:
speech_config: Any #: :meta private:
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and endpoint exists in environment."""
azure_ai_services_key = get_from_dict_or_env(
values, "azure_ai_services_key", "AZURE_AI_SERVICES_KEY"
)
azure_ai_services_region = get_from_dict_or_env(
values, "azure_ai_services_region", "AZURE_AI_SERVICES_REGION"
)
try:
import azure.cognitiveservices.speech as speechsdk
values["speech_config"] = speechsdk.SpeechConfig(
subscription=azure_ai_services_key, region=azure_ai_services_region
)
except ImportError:
raise ImportError(
"azure-cognitiveservices-speech is not installed. "
"Run `pip install azure-cognitiveservices-speech` to install."
)
return values
def _text_to_speech(self, text: str, speech_language: str) -> str:
try:
import azure.cognitiveservices.speech as speechsdk
except ImportError:
pass
self.speech_config.speech_synthesis_language = speech_language
speech_synthesizer = speechsdk.SpeechSynthesizer(
speech_config=self.speech_config, audio_config=None
)
result = speech_synthesizer.speak_text(text)
if result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted:
stream = speechsdk.AudioDataStream(result)
with tempfile.NamedTemporaryFile(
mode="wb", suffix=".wav", delete=False
) as f:
stream.save_to_wav_file(f.name)
return f.name
elif result.reason == speechsdk.ResultReason.Canceled:
cancellation_details = result.cancellation_details
logger.debug(f"Speech synthesis canceled: {cancellation_details.reason}")
if cancellation_details.reason == speechsdk.CancellationReason.Error:
raise RuntimeError(
f"Speech synthesis error: {cancellation_details.error_details}"
)
return "Speech synthesis canceled."
else:
return f"Speech synthesis failed: {result.reason}"
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
try:
speech_file = self._text_to_speech(query, self.speech_language)
return speech_file
except Exception as e:
raise RuntimeError(
f"Error while running AzureAiServicesTextToSpeechTool: {e}"
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/azure_ai_services/text_analytics_for_health.py | from __future__ import annotations
import logging
from typing import Any, Dict, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_core.utils import get_from_dict_or_env
from pydantic import model_validator
logger = logging.getLogger(__name__)
class AzureAiServicesTextAnalyticsForHealthTool(BaseTool): # type: ignore[override]
"""Tool that queries the Azure AI Services Text Analytics for Health API.
In order to set this up, follow instructions at:
https://learn.microsoft.com/en-us/azure/ai-services/language-service/text-analytics-for-health/quickstart?pivots=programming-language-python
"""
azure_ai_services_key: str = "" #: :meta private:
azure_ai_services_endpoint: str = "" #: :meta private:
text_analytics_client: Any #: :meta private:
name: str = "azure_ai_services_text_analytics_for_health"
description: str = (
"A wrapper around Azure AI Services Text Analytics for Health. "
"Useful for when you need to identify entities in healthcare data. "
"Input should be text."
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and endpoint exists in environment."""
azure_ai_services_key = get_from_dict_or_env(
values, "azure_ai_services_key", "AZURE_AI_SERVICES_KEY"
)
azure_ai_services_endpoint = get_from_dict_or_env(
values, "azure_ai_services_endpoint", "AZURE_AI_SERVICES_ENDPOINT"
)
try:
import azure.ai.textanalytics as sdk
from azure.core.credentials import AzureKeyCredential
values["text_analytics_client"] = sdk.TextAnalyticsClient(
endpoint=azure_ai_services_endpoint,
credential=AzureKeyCredential(azure_ai_services_key),
)
except ImportError:
raise ImportError(
"azure-ai-textanalytics is not installed. "
"Run `pip install azure-ai-textanalytics` to install."
)
return values
def _text_analysis(self, text: str) -> Dict:
poller = self.text_analytics_client.begin_analyze_healthcare_entities(
[{"id": "1", "language": "en", "text": text}]
)
result = poller.result()
res_dict = {}
docs = [doc for doc in result if not doc.is_error]
if docs is not None:
res_dict["entities"] = [
f"{x.text} is a healthcare entity of type {x.category}"
for y in docs
for x in y.entities
]
return res_dict
def _format_text_analysis_result(self, text_analysis_result: Dict) -> str:
formatted_result = []
if "entities" in text_analysis_result:
formatted_result.append(
f"""The text contains the following healthcare entities: {
', '.join(text_analysis_result['entities'])
}""".replace("\n", " ")
)
return "\n".join(formatted_result)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
try:
text_analysis_result = self._text_analysis(query)
return self._format_text_analysis_result(text_analysis_result)
except Exception as e:
raise RuntimeError(
f"Error while running AzureAiServicesTextAnalyticsForHealthTool: {e}"
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/azure_ai_services/document_intelligence.py | from __future__ import annotations
import logging
from typing import Any, Dict, List, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_core.utils import get_from_dict_or_env
from pydantic import model_validator
from langchain_community.tools.azure_ai_services.utils import (
detect_file_src_type,
)
logger = logging.getLogger(__name__)
class AzureAiServicesDocumentIntelligenceTool(BaseTool): # type: ignore[override]
"""Tool that queries the Azure AI Services Document Intelligence API.
In order to set this up, follow instructions at:
https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/quickstarts/get-started-sdks-rest-api?view=doc-intel-4.0.0&pivots=programming-language-python
"""
azure_ai_services_key: str = "" #: :meta private:
azure_ai_services_endpoint: str = "" #: :meta private:
doc_analysis_client: Any #: :meta private:
name: str = "azure_ai_services_document_intelligence"
description: str = (
"A wrapper around Azure AI Services Document Intelligence. "
"Useful for when you need to "
"extract text, tables, and key-value pairs from documents. "
"Input should be a url to a document."
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and endpoint exists in environment."""
azure_ai_services_key = get_from_dict_or_env(
values, "azure_ai_services_key", "AZURE_AI_SERVICES_KEY"
)
azure_ai_services_endpoint = get_from_dict_or_env(
values, "azure_ai_services_endpoint", "AZURE_AI_SERVICES_ENDPOINT"
)
try:
from azure.ai.formrecognizer import DocumentAnalysisClient
from azure.core.credentials import AzureKeyCredential
values["doc_analysis_client"] = DocumentAnalysisClient(
endpoint=azure_ai_services_endpoint,
credential=AzureKeyCredential(azure_ai_services_key),
)
except ImportError:
raise ImportError(
"azure-ai-formrecognizer is not installed. "
"Run `pip install azure-ai-formrecognizer` to install."
)
return values
def _parse_tables(self, tables: List[Any]) -> List[Any]:
result = []
for table in tables:
rc, cc = table.row_count, table.column_count
_table = [["" for _ in range(cc)] for _ in range(rc)]
for cell in table.cells:
_table[cell.row_index][cell.column_index] = cell.content
result.append(_table)
return result
def _parse_kv_pairs(self, kv_pairs: List[Any]) -> List[Any]:
result = []
for kv_pair in kv_pairs:
key = kv_pair.key.content if kv_pair.key else ""
value = kv_pair.value.content if kv_pair.value else ""
result.append((key, value))
return result
def _document_analysis(self, document_path: str) -> Dict:
document_src_type = detect_file_src_type(document_path)
if document_src_type == "local":
with open(document_path, "rb") as document:
poller = self.doc_analysis_client.begin_analyze_document(
"prebuilt-document", document
)
elif document_src_type == "remote":
poller = self.doc_analysis_client.begin_analyze_document_from_url(
"prebuilt-document", document_path
)
else:
raise ValueError(f"Invalid document path: {document_path}")
result = poller.result()
res_dict = {}
if result.content is not None:
res_dict["content"] = result.content
if result.tables is not None:
res_dict["tables"] = self._parse_tables(result.tables)
if result.key_value_pairs is not None:
res_dict["key_value_pairs"] = self._parse_kv_pairs(result.key_value_pairs)
return res_dict
def _format_document_analysis_result(self, document_analysis_result: Dict) -> str:
formatted_result = []
if "content" in document_analysis_result:
formatted_result.append(
f"Content: {document_analysis_result['content']}".replace("\n", " ")
)
if "tables" in document_analysis_result:
for i, table in enumerate(document_analysis_result["tables"]):
formatted_result.append(f"Table {i}: {table}".replace("\n", " "))
if "key_value_pairs" in document_analysis_result:
for kv_pair in document_analysis_result["key_value_pairs"]:
formatted_result.append(
f"{kv_pair[0]}: {kv_pair[1]}".replace("\n", " ")
)
return "\n".join(formatted_result)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
try:
document_analysis_result = self._document_analysis(query)
if not document_analysis_result:
return "No good document analysis result was found"
return self._format_document_analysis_result(document_analysis_result)
except Exception as e:
raise RuntimeError(
f"Error while running AzureAiServicesDocumentIntelligenceTool: {e}"
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/azure_ai_services/speech_to_text.py | from __future__ import annotations
import logging
import time
from typing import Any, Dict, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_core.utils import get_from_dict_or_env
from pydantic import model_validator
from langchain_community.tools.azure_ai_services.utils import (
detect_file_src_type,
download_audio_from_url,
)
logger = logging.getLogger(__name__)
class AzureAiServicesSpeechToTextTool(BaseTool): # type: ignore[override]
"""Tool that queries the Azure AI Services Speech to Text API.
In order to set this up, follow instructions at:
https://learn.microsoft.com/en-us/azure/ai-services/speech-service/get-started-speech-to-text?pivots=programming-language-python
"""
azure_ai_services_key: str = "" #: :meta private:
azure_ai_services_region: str = "" #: :meta private:
speech_language: str = "en-US" #: :meta private:
speech_config: Any #: :meta private:
name: str = "azure_ai_services_speech_to_text"
description: str = (
"A wrapper around Azure AI Services Speech to Text. "
"Useful for when you need to transcribe audio to text. "
"Input should be a url to an audio file."
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and endpoint exists in environment."""
azure_ai_services_key = get_from_dict_or_env(
values, "azure_ai_services_key", "AZURE_AI_SERVICES_KEY"
)
azure_ai_services_region = get_from_dict_or_env(
values, "azure_ai_services_region", "AZURE_AI_SERVICES_REGION"
)
try:
import azure.cognitiveservices.speech as speechsdk
values["speech_config"] = speechsdk.SpeechConfig(
subscription=azure_ai_services_key, region=azure_ai_services_region
)
except ImportError:
raise ImportError(
"azure-cognitiveservices-speech is not installed. "
"Run `pip install azure-cognitiveservices-speech` to install."
)
return values
def _continuous_recognize(self, speech_recognizer: Any) -> str:
done = False
text = ""
def stop_cb(evt: Any) -> None:
"""callback that stop continuous recognition"""
speech_recognizer.stop_continuous_recognition_async()
nonlocal done
done = True
def retrieve_cb(evt: Any) -> None:
"""callback that retrieves the intermediate recognition results"""
nonlocal text
text += evt.result.text
# retrieve text on recognized events
speech_recognizer.recognized.connect(retrieve_cb)
# stop continuous recognition on either session stopped or canceled events
speech_recognizer.session_stopped.connect(stop_cb)
speech_recognizer.canceled.connect(stop_cb)
# Start continuous speech recognition
speech_recognizer.start_continuous_recognition_async()
while not done:
time.sleep(0.5)
return text
def _speech_to_text(self, audio_path: str, speech_language: str) -> str:
try:
import azure.cognitiveservices.speech as speechsdk
except ImportError:
pass
audio_src_type = detect_file_src_type(audio_path)
if audio_src_type == "local":
audio_config = speechsdk.AudioConfig(filename=audio_path)
elif audio_src_type == "remote":
tmp_audio_path = download_audio_from_url(audio_path)
audio_config = speechsdk.AudioConfig(filename=tmp_audio_path)
else:
raise ValueError(f"Invalid audio path: {audio_path}")
self.speech_config.speech_recognition_language = speech_language
speech_recognizer = speechsdk.SpeechRecognizer(self.speech_config, audio_config)
return self._continuous_recognize(speech_recognizer)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
try:
text = self._speech_to_text(query, self.speech_language)
return text
except Exception as e:
raise RuntimeError(
f"Error while running AzureAiServicesSpeechToTextTool: {e}"
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/azure_ai_services/utils.py | import os
import tempfile
from urllib.parse import urlparse
import requests
def detect_file_src_type(file_path: str) -> str:
"""Detect if the file is local or remote."""
if os.path.isfile(file_path):
return "local"
parsed_url = urlparse(file_path)
if parsed_url.scheme and parsed_url.netloc:
return "remote"
return "invalid"
def download_audio_from_url(audio_url: str) -> str:
"""Download audio from url to local."""
ext = audio_url.split(".")[-1]
response = requests.get(audio_url, stream=True)
response.raise_for_status()
with tempfile.NamedTemporaryFile(mode="wb", suffix=f".{ext}", delete=False) as f:
for chunk in response.iter_content(chunk_size=8192):
f.write(chunk)
return f.name
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/azure_ai_services/__init__.py | """Azure AI Services Tools."""
from langchain_community.tools.azure_ai_services.document_intelligence import (
AzureAiServicesDocumentIntelligenceTool,
)
from langchain_community.tools.azure_ai_services.image_analysis import (
AzureAiServicesImageAnalysisTool,
)
from langchain_community.tools.azure_ai_services.speech_to_text import (
AzureAiServicesSpeechToTextTool,
)
from langchain_community.tools.azure_ai_services.text_analytics_for_health import (
AzureAiServicesTextAnalyticsForHealthTool,
)
from langchain_community.tools.azure_ai_services.text_to_speech import (
AzureAiServicesTextToSpeechTool,
)
__all__ = [
"AzureAiServicesDocumentIntelligenceTool",
"AzureAiServicesImageAnalysisTool",
"AzureAiServicesSpeechToTextTool",
"AzureAiServicesTextToSpeechTool",
"AzureAiServicesTextAnalyticsForHealthTool",
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/azure_ai_services/image_analysis.py | from __future__ import annotations
import logging
from typing import Any, Dict, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_core.utils import get_from_dict_or_env
from pydantic import model_validator
from langchain_community.tools.azure_ai_services.utils import (
detect_file_src_type,
)
logger = logging.getLogger(__name__)
class AzureAiServicesImageAnalysisTool(BaseTool): # type: ignore[override]
"""Tool that queries the Azure AI Services Image Analysis API.
In order to set this up, follow instructions at:
https://learn.microsoft.com/en-us/azure/ai-services/computer-vision/quickstarts-sdk/image-analysis-client-library-40
"""
azure_ai_services_key: str = "" #: :meta private:
azure_ai_services_endpoint: str = "" #: :meta private:
image_analysis_client: Any #: :meta private:
visual_features: Any #: :meta private:
name: str = "azure_ai_services_image_analysis"
description: str = (
"A wrapper around Azure AI Services Image Analysis. "
"Useful for when you need to analyze images. "
"Input should be a url to an image."
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and endpoint exists in environment."""
azure_ai_services_key = get_from_dict_or_env(
values, "azure_ai_services_key", "AZURE_AI_SERVICES_KEY"
)
azure_ai_services_endpoint = get_from_dict_or_env(
values, "azure_ai_services_endpoint", "AZURE_AI_SERVICES_ENDPOINT"
)
"""Validate that azure-ai-vision-imageanalysis is installed."""
try:
from azure.ai.vision.imageanalysis import ImageAnalysisClient
from azure.ai.vision.imageanalysis.models import VisualFeatures
from azure.core.credentials import AzureKeyCredential
except ImportError:
raise ImportError(
"azure-ai-vision-imageanalysis is not installed. "
"Run `pip install azure-ai-vision-imageanalysis` to install. "
)
"""Validate Azure AI Vision Image Analysis client can be initialized."""
try:
values["image_analysis_client"] = ImageAnalysisClient(
endpoint=azure_ai_services_endpoint,
credential=AzureKeyCredential(azure_ai_services_key),
)
except Exception as e:
raise RuntimeError(
f"Initialization of Azure AI Vision Image Analysis client failed: {e}"
)
values["visual_features"] = [
VisualFeatures.TAGS,
VisualFeatures.OBJECTS,
VisualFeatures.CAPTION,
VisualFeatures.READ,
]
return values
def _image_analysis(self, image_path: str) -> Dict:
try:
from azure.ai.vision.imageanalysis import ImageAnalysisClient
except ImportError:
pass
self.image_analysis_client: ImageAnalysisClient
image_src_type = detect_file_src_type(image_path)
if image_src_type == "local":
with open(image_path, "rb") as image_file:
image_data = image_file.read()
result = self.image_analysis_client.analyze(
image_data=image_data,
visual_features=self.visual_features,
)
elif image_src_type == "remote":
result = self.image_analysis_client.analyze_from_url(
image_url=image_path,
visual_features=self.visual_features,
)
else:
raise ValueError(f"Invalid image path: {image_path}")
res_dict = {}
if result:
if result.caption is not None:
res_dict["caption"] = result.caption.text
if result.objects is not None:
res_dict["objects"] = [obj.tags[0].name for obj in result.objects.list]
if result.tags is not None:
res_dict["tags"] = [tag.name for tag in result.tags.list]
if result.read is not None and len(result.read.blocks) > 0:
res_dict["text"] = [line.text for line in result.read.blocks[0].lines]
return res_dict
def _format_image_analysis_result(self, image_analysis_result: Dict) -> str:
formatted_result = []
if "caption" in image_analysis_result:
formatted_result.append("Caption: " + image_analysis_result["caption"])
if (
"objects" in image_analysis_result
and len(image_analysis_result["objects"]) > 0
):
formatted_result.append(
"Objects: " + ", ".join(image_analysis_result["objects"])
)
if "tags" in image_analysis_result and len(image_analysis_result["tags"]) > 0:
formatted_result.append("Tags: " + ", ".join(image_analysis_result["tags"]))
if "text" in image_analysis_result and len(image_analysis_result["text"]) > 0:
formatted_result.append("Text: " + ", ".join(image_analysis_result["text"]))
return "\n".join(formatted_result)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
try:
image_analysis_result = self._image_analysis(query)
if not image_analysis_result:
return "No good image analysis result was found"
return self._format_image_analysis_result(image_analysis_result)
except Exception as e:
raise RuntimeError(f"Error while running AzureAiImageAnalysisTool: {e}")
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/polygon/ticker_news.py | from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel
from langchain_community.utilities.polygon import PolygonAPIWrapper
class Inputs(BaseModel):
"""Inputs for Polygon's Ticker News API"""
query: str
class PolygonTickerNews(BaseTool): # type: ignore[override, override]
"""Tool that gets the latest news for a given ticker from Polygon"""
mode: str = "get_ticker_news"
name: str = "polygon_ticker_news"
description: str = (
"A wrapper around Polygon's Ticker News API. "
"This tool is useful for fetching the latest news for a stock. "
"Input should be the ticker that you want to get the latest news for."
)
args_schema: Type[BaseModel] = Inputs
api_wrapper: PolygonAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Polygon API tool."""
return self.api_wrapper.run(self.mode, ticker=query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/polygon/last_quote.py | from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel
from langchain_community.utilities.polygon import PolygonAPIWrapper
class Inputs(BaseModel):
"""Inputs for Polygon's Last Quote API"""
query: str
class PolygonLastQuote(BaseTool): # type: ignore[override, override]
"""Tool that gets the last quote of a ticker from Polygon"""
mode: str = "get_last_quote"
name: str = "polygon_last_quote"
description: str = (
"A wrapper around Polygon's Last Quote API. "
"This tool is useful for fetching the latest price of a stock. "
"Input should be the ticker that you want to query the last price quote for."
)
args_schema: Type[BaseModel] = Inputs
api_wrapper: PolygonAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Polygon API tool."""
return self.api_wrapper.run(self.mode, ticker=query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/polygon/aggregates.py | from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.polygon import PolygonAPIWrapper
class PolygonAggregatesSchema(BaseModel):
"""Input for PolygonAggregates."""
ticker: str = Field(
description="The ticker symbol to fetch aggregates for.",
)
timespan: str = Field(
description="The size of the time window. "
"Possible values are: "
"second, minute, hour, day, week, month, quarter, year. "
"Default is 'day'",
)
timespan_multiplier: int = Field(
description="The number of timespans to aggregate. "
"For example, if timespan is 'day' and "
"timespan_multiplier is 1, the result will be daily bars. "
"If timespan is 'day' and timespan_multiplier is 5, "
"the result will be weekly bars. "
"Default is 1.",
)
from_date: str = Field(
description="The start of the aggregate time window. "
"Either a date with the format YYYY-MM-DD or "
"a millisecond timestamp.",
)
to_date: str = Field(
description="The end of the aggregate time window. "
"Either a date with the format YYYY-MM-DD or "
"a millisecond timestamp.",
)
class PolygonAggregates(BaseTool): # type: ignore[override, override]
"""
Tool that gets aggregate bars (stock prices) over a
given date range for a given ticker from Polygon.
"""
mode: str = "get_aggregates"
name: str = "polygon_aggregates"
description: str = (
"A wrapper around Polygon's Aggregates API. "
"This tool is useful for fetching aggregate bars (stock prices) for a ticker. "
"Input should be the ticker, date range, timespan, and timespan multiplier"
" that you want to get the aggregate bars for."
)
args_schema: Type[PolygonAggregatesSchema] = PolygonAggregatesSchema
api_wrapper: PolygonAPIWrapper
def _run(
self,
ticker: str,
timespan: str,
timespan_multiplier: int,
from_date: str,
to_date: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Polygon API tool."""
return self.api_wrapper.run(
mode=self.mode,
ticker=ticker,
timespan=timespan,
timespan_multiplier=timespan_multiplier,
from_date=from_date,
to_date=to_date,
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/polygon/__init__.py | """Polygon IO tools."""
from langchain_community.tools.polygon.aggregates import PolygonAggregates
from langchain_community.tools.polygon.financials import PolygonFinancials
from langchain_community.tools.polygon.last_quote import PolygonLastQuote
from langchain_community.tools.polygon.ticker_news import PolygonTickerNews
__all__ = [
"PolygonAggregates",
"PolygonFinancials",
"PolygonLastQuote",
"PolygonTickerNews",
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/polygon/financials.py | from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel
from langchain_community.utilities.polygon import PolygonAPIWrapper
class Inputs(BaseModel):
"""Inputs for Polygon's Financials API"""
query: str
class PolygonFinancials(BaseTool): # type: ignore[override, override]
"""Tool that gets the financials of a ticker from Polygon"""
mode: str = "get_financials"
name: str = "polygon_financials"
description: str = (
"A wrapper around Polygon's Stock Financials API. "
"This tool is useful for fetching fundamental financials from "
"balance sheets, income statements, and cash flow statements "
"for a stock ticker. The input should be the ticker that you want "
"to get the latest fundamental financial data for."
)
args_schema: Type[BaseModel] = Inputs
api_wrapper: PolygonAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Polygon API tool."""
return self.api_wrapper.run(self.mode, ticker=query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/playwright/extract_hyperlinks.py | from __future__ import annotations
import json
from typing import TYPE_CHECKING, Any, Optional, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from pydantic import BaseModel, Field, model_validator
from langchain_community.tools.playwright.base import BaseBrowserTool
from langchain_community.tools.playwright.utils import (
aget_current_page,
get_current_page,
)
if TYPE_CHECKING:
pass
class ExtractHyperlinksToolInput(BaseModel):
"""Input for ExtractHyperlinksTool."""
absolute_urls: bool = Field(
default=False,
description="Return absolute URLs instead of relative URLs",
)
class ExtractHyperlinksTool(BaseBrowserTool): # type: ignore[override, override]
"""Extract all hyperlinks on the page."""
name: str = "extract_hyperlinks"
description: str = "Extract all hyperlinks on the current webpage"
args_schema: Type[BaseModel] = ExtractHyperlinksToolInput
@model_validator(mode="before")
@classmethod
def check_bs_import(cls, values: dict) -> Any:
"""Check that the arguments are valid."""
try:
from bs4 import BeautifulSoup # noqa: F401
except ImportError:
raise ImportError(
"The 'beautifulsoup4' package is required to use this tool."
" Please install it with 'pip install beautifulsoup4'."
)
return values
@staticmethod
def scrape_page(page: Any, html_content: str, absolute_urls: bool) -> str:
from urllib.parse import urljoin
from bs4 import BeautifulSoup
# Parse the HTML content with BeautifulSoup
soup = BeautifulSoup(html_content, "lxml")
# Find all the anchor elements and extract their href attributes
anchors = soup.find_all("a")
if absolute_urls:
base_url = page.url
links = [urljoin(base_url, anchor.get("href", "")) for anchor in anchors]
else:
links = [anchor.get("href", "") for anchor in anchors]
# Return the list of links as a JSON string. Duplicated link
# only appears once in the list
return json.dumps(list(set(links)))
def _run(
self,
absolute_urls: bool = False,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
html_content = page.content()
return self.scrape_page(page, html_content, absolute_urls)
async def _arun(
self,
absolute_urls: bool = False,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
page = await aget_current_page(self.async_browser)
html_content = await page.content()
return self.scrape_page(page, html_content, absolute_urls)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/playwright/base.py | from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional, Tuple, Type
from langchain_core.tools import BaseTool
from langchain_core.utils import guard_import
from pydantic import model_validator
if TYPE_CHECKING:
from playwright.async_api import Browser as AsyncBrowser
from playwright.sync_api import Browser as SyncBrowser
else:
try:
# We do this so pydantic can resolve the types when instantiating
from playwright.async_api import Browser as AsyncBrowser
from playwright.sync_api import Browser as SyncBrowser
except ImportError:
pass
def lazy_import_playwright_browsers() -> Tuple[Type[AsyncBrowser], Type[SyncBrowser]]:
"""
Lazy import playwright browsers.
Returns:
Tuple[Type[AsyncBrowser], Type[SyncBrowser]]:
AsyncBrowser and SyncBrowser classes.
"""
return (
guard_import(module_name="playwright.async_api").Browser,
guard_import(module_name="playwright.sync_api").Browser,
)
class BaseBrowserTool(BaseTool): # type: ignore[override]
"""Base class for browser tools."""
sync_browser: Optional["SyncBrowser"] = None
async_browser: Optional["AsyncBrowser"] = None
@model_validator(mode="before")
@classmethod
def validate_browser_provided(cls, values: dict) -> Any:
"""Check that the arguments are valid."""
lazy_import_playwright_browsers()
if values.get("async_browser") is None and values.get("sync_browser") is None:
raise ValueError("Either async_browser or sync_browser must be specified.")
return values
@classmethod
def from_browser(
cls,
sync_browser: Optional[SyncBrowser] = None,
async_browser: Optional[AsyncBrowser] = None,
) -> BaseBrowserTool:
"""Instantiate the tool."""
lazy_import_playwright_browsers()
return cls(sync_browser=sync_browser, async_browser=async_browser) # type: ignore[call-arg]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/playwright/extract_text.py | from __future__ import annotations
from typing import Any, Optional, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from pydantic import BaseModel, model_validator
from langchain_community.tools.playwright.base import BaseBrowserTool
from langchain_community.tools.playwright.utils import (
aget_current_page,
get_current_page,
)
class ExtractTextToolInput(BaseModel):
"""Explicit no-args input for ExtractTextTool."""
class ExtractTextTool(BaseBrowserTool): # type: ignore[override, override]
"""Tool for extracting all the text on the current webpage."""
name: str = "extract_text"
description: str = "Extract all the text on the current webpage"
args_schema: Type[BaseModel] = ExtractTextToolInput
@model_validator(mode="before")
@classmethod
def check_acheck_bs_importrgs(cls, values: dict) -> Any:
"""Check that the arguments are valid."""
try:
from bs4 import BeautifulSoup # noqa: F401
except ImportError:
raise ImportError(
"The 'beautifulsoup4' package is required to use this tool."
" Please install it with 'pip install beautifulsoup4'."
)
return values
def _run(self, run_manager: Optional[CallbackManagerForToolRun] = None) -> str:
"""Use the tool."""
# Use Beautiful Soup since it's faster than looping through the elements
from bs4 import BeautifulSoup
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
html_content = page.content()
# Parse the HTML content with BeautifulSoup
soup = BeautifulSoup(html_content, "lxml")
return " ".join(text for text in soup.stripped_strings)
async def _arun(
self, run_manager: Optional[AsyncCallbackManagerForToolRun] = None
) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
# Use Beautiful Soup since it's faster than looping through the elements
from bs4 import BeautifulSoup
page = await aget_current_page(self.async_browser)
html_content = await page.content()
# Parse the HTML content with BeautifulSoup
soup = BeautifulSoup(html_content, "lxml")
return " ".join(text for text in soup.stripped_strings)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/playwright/navigate_back.py | from __future__ import annotations
from typing import Optional, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from pydantic import BaseModel
from langchain_community.tools.playwright.base import BaseBrowserTool
from langchain_community.tools.playwright.utils import (
aget_current_page,
get_current_page,
)
class NavigateBackToolInput(BaseModel):
"""Explicit no-args input for NavigateBackTool."""
class NavigateBackTool(BaseBrowserTool): # type: ignore[override, override]
"""Navigate back to the previous page in the browser history."""
name: str = "previous_webpage"
description: str = "Navigate back to the previous page in the browser history"
args_schema: Type[BaseModel] = NavigateBackToolInput
def _run(self, run_manager: Optional[CallbackManagerForToolRun] = None) -> str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
response = page.go_back()
if response:
return (
f"Navigated back to the previous page with URL '{response.url}'."
f" Status code {response.status}"
)
else:
return "Unable to navigate back; no previous page in the history"
async def _arun(
self,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
page = await aget_current_page(self.async_browser)
response = await page.go_back()
if response:
return (
f"Navigated back to the previous page with URL '{response.url}'."
f" Status code {response.status}"
)
else:
return "Unable to navigate back; no previous page in the history"
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/playwright/click.py | from __future__ import annotations
from typing import Optional, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from pydantic import BaseModel, Field
from langchain_community.tools.playwright.base import BaseBrowserTool
from langchain_community.tools.playwright.utils import (
aget_current_page,
get_current_page,
)
class ClickToolInput(BaseModel):
"""Input for ClickTool."""
selector: str = Field(..., description="CSS selector for the element to click")
class ClickTool(BaseBrowserTool): # type: ignore[override, override, override]
"""Tool for clicking on an element with the given CSS selector."""
name: str = "click_element"
description: str = "Click on an element with the given CSS selector"
args_schema: Type[BaseModel] = ClickToolInput
visible_only: bool = True
"""Whether to consider only visible elements."""
playwright_strict: bool = False
"""Whether to employ Playwright's strict mode when clicking on elements."""
playwright_timeout: float = 1_000
"""Timeout (in ms) for Playwright to wait for element to be ready."""
def _selector_effective(self, selector: str) -> str:
if not self.visible_only:
return selector
return f"{selector} >> visible=1"
def _run(
self,
selector: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
# Navigate to the desired webpage before using this tool
selector_effective = self._selector_effective(selector=selector)
from playwright.sync_api import TimeoutError as PlaywrightTimeoutError
try:
page.click(
selector_effective,
strict=self.playwright_strict,
timeout=self.playwright_timeout,
)
except PlaywrightTimeoutError:
return f"Unable to click on element '{selector}'"
return f"Clicked element '{selector}'"
async def _arun(
self,
selector: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
page = await aget_current_page(self.async_browser)
# Navigate to the desired webpage before using this tool
selector_effective = self._selector_effective(selector=selector)
from playwright.async_api import TimeoutError as PlaywrightTimeoutError
try:
await page.click(
selector_effective,
strict=self.playwright_strict,
timeout=self.playwright_timeout,
)
except PlaywrightTimeoutError:
return f"Unable to click on element '{selector}'"
return f"Clicked element '{selector}'"
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/playwright/navigate.py | from __future__ import annotations
from typing import Optional, Type
from urllib.parse import urlparse
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from pydantic import BaseModel, Field, model_validator
from langchain_community.tools.playwright.base import BaseBrowserTool
from langchain_community.tools.playwright.utils import (
aget_current_page,
get_current_page,
)
class NavigateToolInput(BaseModel):
"""Input for NavigateToolInput."""
url: str = Field(..., description="url to navigate to")
@model_validator(mode="before")
@classmethod
def validate_url_scheme(cls, values: dict) -> dict:
"""Check that the URL scheme is valid."""
url = values.get("url")
parsed_url = urlparse(url)
if parsed_url.scheme not in ("http", "https"):
raise ValueError("URL scheme must be 'http' or 'https'")
return values
class NavigateTool(BaseBrowserTool): # type: ignore[override, override]
"""Tool for navigating a browser to a URL.
**Security Note**: This tool provides code to control web-browser navigation.
This tool can navigate to any URL, including internal network URLs, and
URLs exposed on the server itself.
However, if exposing this tool to end-users, consider limiting network
access to the server that hosts the agent.
By default, the URL scheme has been limited to 'http' and 'https' to
prevent navigation to local file system URLs (or other schemes).
If access to the local file system is required, consider creating a custom
tool or providing a custom args_schema that allows the desired URL schemes.
See https://python.langchain.com/docs/security for more information.
"""
name: str = "navigate_browser"
description: str = "Navigate a browser to the specified URL"
args_schema: Type[BaseModel] = NavigateToolInput
def _run(
self,
url: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
response = page.goto(url)
status = response.status if response else "unknown"
return f"Navigating to {url} returned status code {status}"
async def _arun(
self,
url: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
page = await aget_current_page(self.async_browser)
response = await page.goto(url)
status = response.status if response else "unknown"
return f"Navigating to {url} returned status code {status}"
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/playwright/utils.py | """Utilities for the Playwright browser tools."""
from __future__ import annotations
import asyncio
from typing import TYPE_CHECKING, Any, Coroutine, List, Optional, TypeVar
if TYPE_CHECKING:
from playwright.async_api import Browser as AsyncBrowser
from playwright.async_api import Page as AsyncPage
from playwright.sync_api import Browser as SyncBrowser
from playwright.sync_api import Page as SyncPage
async def aget_current_page(browser: AsyncBrowser) -> AsyncPage:
"""
Asynchronously get the current page of the browser.
Args:
browser: The browser (AsyncBrowser) to get the current page from.
Returns:
AsyncPage: The current page.
"""
if not browser.contexts:
context = await browser.new_context()
return await context.new_page()
context = browser.contexts[0] # Assuming you're using the default browser context
if not context.pages:
return await context.new_page()
# Assuming the last page in the list is the active one
return context.pages[-1]
def get_current_page(browser: SyncBrowser) -> SyncPage:
"""
Get the current page of the browser.
Args:
browser: The browser to get the current page from.
Returns:
SyncPage: The current page.
"""
if not browser.contexts:
context = browser.new_context()
return context.new_page()
context = browser.contexts[0] # Assuming you're using the default browser context
if not context.pages:
return context.new_page()
# Assuming the last page in the list is the active one
return context.pages[-1]
def create_async_playwright_browser(
headless: bool = True, args: Optional[List[str]] = None
) -> AsyncBrowser:
"""
Create an async playwright browser.
Args:
headless: Whether to run the browser in headless mode. Defaults to True.
args: arguments to pass to browser.chromium.launch
Returns:
AsyncBrowser: The playwright browser.
"""
from playwright.async_api import async_playwright
browser = run_async(async_playwright().start())
return run_async(browser.chromium.launch(headless=headless, args=args))
def create_sync_playwright_browser(
headless: bool = True, args: Optional[List[str]] = None
) -> SyncBrowser:
"""
Create a playwright browser.
Args:
headless: Whether to run the browser in headless mode. Defaults to True.
args: arguments to pass to browser.chromium.launch
Returns:
SyncBrowser: The playwright browser.
"""
from playwright.sync_api import sync_playwright
browser = sync_playwright().start()
return browser.chromium.launch(headless=headless, args=args)
T = TypeVar("T")
def run_async(coro: Coroutine[Any, Any, T]) -> T:
"""Run an async coroutine.
Args:
coro: The coroutine to run. Coroutine[Any, Any, T]
Returns:
T: The result of the coroutine.
"""
event_loop = asyncio.get_event_loop()
return event_loop.run_until_complete(coro)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/playwright/__init__.py | """Browser tools and toolkit."""
from langchain_community.tools.playwright.click import ClickTool
from langchain_community.tools.playwright.current_page import CurrentWebPageTool
from langchain_community.tools.playwright.extract_hyperlinks import (
ExtractHyperlinksTool,
)
from langchain_community.tools.playwright.extract_text import ExtractTextTool
from langchain_community.tools.playwright.get_elements import GetElementsTool
from langchain_community.tools.playwright.navigate import NavigateTool
from langchain_community.tools.playwright.navigate_back import NavigateBackTool
__all__ = [
"NavigateTool",
"NavigateBackTool",
"ExtractTextTool",
"ExtractHyperlinksTool",
"GetElementsTool",
"ClickTool",
"CurrentWebPageTool",
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/playwright/current_page.py | from __future__ import annotations
from typing import Optional, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from pydantic import BaseModel
from langchain_community.tools.playwright.base import BaseBrowserTool
from langchain_community.tools.playwright.utils import (
aget_current_page,
get_current_page,
)
class CurrentWebPageToolInput(BaseModel):
"""Explicit no-args input for CurrentWebPageTool."""
class CurrentWebPageTool(BaseBrowserTool): # type: ignore[override, override]
"""Tool for getting the URL of the current webpage."""
name: str = "current_webpage"
description: str = "Returns the URL of the current page"
args_schema: Type[BaseModel] = CurrentWebPageToolInput
def _run(
self,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
return str(page.url)
async def _arun(
self,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
page = await aget_current_page(self.async_browser)
return str(page.url)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/playwright/get_elements.py | from __future__ import annotations
import json
from typing import TYPE_CHECKING, List, Optional, Sequence, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from pydantic import BaseModel, Field
from langchain_community.tools.playwright.base import BaseBrowserTool
from langchain_community.tools.playwright.utils import (
aget_current_page,
get_current_page,
)
if TYPE_CHECKING:
from playwright.async_api import Page as AsyncPage
from playwright.sync_api import Page as SyncPage
class GetElementsToolInput(BaseModel):
"""Input for GetElementsTool."""
selector: str = Field(
...,
description="CSS selector, such as '*', 'div', 'p', 'a', #id, .classname",
)
attributes: List[str] = Field(
default_factory=lambda: ["innerText"],
description="Set of attributes to retrieve for each element",
)
async def _aget_elements(
page: AsyncPage, selector: str, attributes: Sequence[str]
) -> List[dict]:
"""Get elements matching the given CSS selector."""
elements = await page.query_selector_all(selector)
results = []
for element in elements:
result = {}
for attribute in attributes:
if attribute == "innerText":
val: Optional[str] = await element.inner_text()
else:
val = await element.get_attribute(attribute)
if val is not None and val.strip() != "":
result[attribute] = val
if result:
results.append(result)
return results
def _get_elements(
page: SyncPage, selector: str, attributes: Sequence[str]
) -> List[dict]:
"""Get elements matching the given CSS selector."""
elements = page.query_selector_all(selector)
results = []
for element in elements:
result = {}
for attribute in attributes:
if attribute == "innerText":
val: Optional[str] = element.inner_text()
else:
val = element.get_attribute(attribute)
if val is not None and val.strip() != "":
result[attribute] = val
if result:
results.append(result)
return results
class GetElementsTool(BaseBrowserTool): # type: ignore[override, override]
"""Tool for getting elements in the current web page matching a CSS selector."""
name: str = "get_elements"
description: str = (
"Retrieve elements in the current web page matching the given CSS selector"
)
args_schema: Type[BaseModel] = GetElementsToolInput
def _run(
self,
selector: str,
attributes: Sequence[str] = ["innerText"],
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.sync_browser is None:
raise ValueError(f"Synchronous browser not provided to {self.name}")
page = get_current_page(self.sync_browser)
# Navigate to the desired webpage before using this tool
results = _get_elements(page, selector, attributes)
return json.dumps(results, ensure_ascii=False)
async def _arun(
self,
selector: str,
attributes: Sequence[str] = ["innerText"],
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
if self.async_browser is None:
raise ValueError(f"Asynchronous browser not provided to {self.name}")
page = await aget_current_page(self.async_browser)
# Navigate to the desired webpage before using this tool
results = await _aget_elements(page, selector, attributes)
return json.dumps(results, ensure_ascii=False)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/bing_search/tool.py | """Tool for the Bing search API."""
from typing import Dict, List, Literal, Optional, Tuple
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.bing_search import BingSearchAPIWrapper
class BingSearchRun(BaseTool): # type: ignore[override]
"""Tool that queries the Bing search API."""
name: str = "bing_search"
description: str = (
"A wrapper around Bing Search. "
"Useful for when you need to answer questions about current events. "
"Input should be a search query."
)
api_wrapper: BingSearchAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.api_wrapper.run(query)
class BingSearchResults(BaseTool): # type: ignore[override, override]
"""Bing Search tool.
Setup:
Install ``langchain-community`` and set environment variable ``BING_SUBSCRIPTION_KEY``.
.. code-block:: bash
pip install -U langchain-community
export BING_SUBSCRIPTION_KEY="your-api-key"
Instantiation:
.. code-block:: python
from langchain_community.tools.bing_search import BingSearchResults
from langchain_community.utilities import BingSearchAPIWrapper
api_wrapper = BingSearchAPIWrapper()
tool = BingSearchResults(api_wrapper=api_wrapper)
Invocation with args:
.. code-block:: python
tool.invoke({"query": "what is the weather in SF?"})
.. code-block:: python
"[{'snippet': '<b>San Francisco, CA</b> <b>Weather</b> Forecast, with current conditions, wind, air quality, and what to expect for the next 3 days.', 'title': 'San Francisco, CA Weather Forecast | AccuWeather', 'link': 'https://www.accuweather.com/en/us/san-francisco/94103/weather-forecast/347629'}, {'snippet': 'Tropical Storm Ernesto Forms; Fire <b>Weather</b> Concerns in the Great Basin: Hot Temperatures Return to the South-Central U.S. ... <b>San Francisco CA</b> 37.77°N 122.41°W (Elev. 131 ft) Last Update: 2:21 pm PDT Aug 12, 2024. Forecast Valid: 6pm PDT Aug 12, 2024-6pm PDT Aug 19, 2024 .', 'title': 'National Weather Service', 'link': 'https://forecast.weather.gov/zipcity.php?inputstring=San+Francisco,CA'}, {'snippet': 'Current <b>weather</b> <b>in San Francisco, CA</b>. Check current conditions <b>in San Francisco, CA</b> with radar, hourly, and more.', 'title': 'San Francisco, CA Current Weather | AccuWeather', 'link': 'https://www.accuweather.com/en/us/san-francisco/94103/current-weather/347629'}, {'snippet': 'Everything you need to know about today's <b>weather</b> <b>in San Francisco, CA</b>. High/Low, Precipitation Chances, Sunrise/Sunset, and today's Temperature History.', 'title': 'Weather Today for San Francisco, CA | AccuWeather', 'link': 'https://www.accuweather.com/en/us/san-francisco/94103/weather-today/347629'}]"
Invocation with ToolCall:
.. code-block:: python
tool.invoke({"args": {"query":"what is the weather in SF?"}, "id": "1", "name": tool.name, "type": "tool_call"})
.. code-block:: python
ToolMessage(
content="[{'snippet': 'Get the latest <b>weather</b> forecast for <b>San Francisco, CA</b>, including temperature, RealFeel, and chance of precipitation. Find out how the <b>weather</b> will affect your plans and activities in the city of ...', 'title': 'San Francisco, CA Weather Forecast | AccuWeather', 'link': 'https://www.accuweather.com/en/us/san-francisco/94103/weather-forecast/347629'}, {'snippet': 'Radar. Be prepared with the most accurate 10-day forecast for <b>San Francisco, CA</b> with highs, lows, chance of precipitation from The <b>Weather</b> Channel and <b>Weather</b>.com.', 'title': '10-Day Weather Forecast for San Francisco, CA - The Weather Channel', 'link': 'https://weather.com/weather/tenday/l/San+Francisco+CA+USCA0987:1:US'}, {'snippet': 'Tropical Storm Ernesto Forms; Fire <b>Weather</b> Concerns in the Great Basin: Hot Temperatures Return to the South-Central U.S. ... <b>San Francisco CA</b> 37.77°N 122.41°W (Elev. 131 ft) Last Update: 2:21 pm PDT Aug 12, 2024. Forecast Valid: 6pm PDT Aug 12, 2024-6pm PDT Aug 19, 2024 .', 'title': 'National Weather Service', 'link': 'https://forecast.weather.gov/zipcity.php?inputstring=San+Francisco,CA'}, {'snippet': 'Current <b>weather</b> <b>in San Francisco, CA</b>. Check current conditions <b>in San Francisco, CA</b> with radar, hourly, and more.', 'title': 'San Francisco, CA Current Weather | AccuWeather', 'link': 'https://www.accuweather.com/en/us/san-francisco/94103/current-weather/347629'}]",
artifact=[{'snippet': 'Get the latest <b>weather</b> forecast for <b>San Francisco, CA</b>, including temperature, RealFeel, and chance of precipitation. Find out how the <b>weather</b> will affect your plans and activities in the city of ...', 'title': 'San Francisco, CA Weather Forecast | AccuWeather', 'link': 'https://www.accuweather.com/en/us/san-francisco/94103/weather-forecast/347629'}, {'snippet': 'Radar. Be prepared with the most accurate 10-day forecast for <b>San Francisco, CA</b> with highs, lows, chance of precipitation from The <b>Weather</b> Channel and <b>Weather</b>.com.', 'title': '10-Day Weather Forecast for San Francisco, CA - The Weather Channel', 'link': 'https://weather.com/weather/tenday/l/San+Francisco+CA+USCA0987:1:US'}, {'snippet': 'Tropical Storm Ernesto Forms; Fire <b>Weather</b> Concerns in the Great Basin: Hot Temperatures Return to the South-Central U.S. ... <b>San Francisco CA</b> 37.77°N 122.41°W (Elev. 131 ft) Last Update: 2:21 pm PDT Aug 12, 2024. Forecast Valid: 6pm PDT Aug 12, 2024-6pm PDT Aug 19, 2024 .', 'title': 'National Weather Service', 'link': 'https://forecast.weather.gov/zipcity.php?inputstring=San+Francisco,CA'}, {'snippet': 'Current <b>weather</b> <b>in San Francisco, CA</b>. Check current conditions <b>in San Francisco, CA</b> with radar, hourly, and more.', 'title': 'San Francisco, CA Current Weather | AccuWeather', 'link': 'https://www.accuweather.com/en/us/san-francisco/94103/current-weather/347629'}],
name='bing_search_results_json',
tool_call_id='1'
)
""" # noqa: E501
name: str = "bing_search_results_json"
description: str = (
"A wrapper around Bing Search. "
"Useful for when you need to answer questions about current events. "
"Input should be a search query. Output is an array of the query results."
)
num_results: int = 4
"""Max search results to return, default is 4."""
api_wrapper: BingSearchAPIWrapper
response_format: Literal["content_and_artifact"] = "content_and_artifact"
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> Tuple[str, List[Dict]]:
"""Use the tool."""
try:
results = self.api_wrapper.results(query, self.num_results)
return str(results), results
except Exception as e:
return repr(e), []
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/bing_search/__init__.py | """Bing Search API toolkit."""
from langchain_community.tools.bing_search.tool import BingSearchResults, BingSearchRun
__all__ = ["BingSearchRun", "BingSearchResults"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/google_places/tool.py | """Tool for the Google search API."""
from typing import Optional, Type
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.google_places_api import GooglePlacesAPIWrapper
class GooglePlacesSchema(BaseModel):
"""Input for GooglePlacesTool."""
query: str = Field(..., description="Query for google maps")
@deprecated(
since="0.0.33",
removal="1.0",
alternative_import="langchain_google_community.GooglePlacesTool",
)
class GooglePlacesTool(BaseTool): # type: ignore[override, override]
"""Tool that queries the Google places API."""
name: str = "google_places"
description: str = (
"A wrapper around Google Places. "
"Useful for when you need to validate or "
"discover addressed from ambiguous text. "
"Input should be a search query."
)
api_wrapper: GooglePlacesAPIWrapper = Field(default_factory=GooglePlacesAPIWrapper) # type: ignore[arg-type]
args_schema: Type[BaseModel] = GooglePlacesSchema
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.api_wrapper.run(query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/google_places/__init__.py | """Google Places API Toolkit."""
from langchain_community.tools.google_places.tool import GooglePlacesTool
__all__ = ["GooglePlacesTool"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/json/tool.py | # flake8: noqa
"""Tools for working with JSON specs."""
from __future__ import annotations
import json
import re
from pathlib import Path
from typing import Dict, List, Optional, Union
from pydantic import BaseModel
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
def _parse_input(text: str) -> List[Union[str, int]]:
"""Parse input of the form data["key1"][0]["key2"] into a list of keys."""
_res = re.findall(r"\[.*?]", text)
# strip the brackets and quotes, convert to int if possible
res = [i[1:-1].replace('"', "").replace("'", "") for i in _res]
res = [int(i) if i.isdigit() else i for i in res]
return res
class JsonSpec(BaseModel):
"""Base class for JSON spec."""
dict_: Dict
max_value_length: int = 200
@classmethod
def from_file(cls, path: Path) -> JsonSpec:
"""Create a JsonSpec from a file."""
if not path.exists():
raise FileNotFoundError(f"File not found: {path}")
dict_ = json.loads(path.read_text())
return cls(dict_=dict_)
def keys(self, text: str) -> str:
"""Return the keys of the dict at the given path.
Args:
text: Python representation of the path to the dict (e.g. data["key1"][0]["key2"]).
"""
try:
items = _parse_input(text)
val = self.dict_
for i in items:
if i:
val = val[i]
if not isinstance(val, dict):
raise ValueError(
f"Value at path `{text}` is not a dict, get the value directly."
)
return str(list(val.keys()))
except Exception as e:
return repr(e)
def value(self, text: str) -> str:
"""Return the value of the dict at the given path.
Args:
text: Python representation of the path to the dict (e.g. data["key1"][0]["key2"]).
"""
try:
items = _parse_input(text)
val = self.dict_
for i in items:
val = val[i]
if isinstance(val, dict) and len(str(val)) > self.max_value_length:
return "Value is a large dictionary, should explore its keys directly"
str_val = str(val)
if len(str_val) > self.max_value_length:
str_val = str_val[: self.max_value_length] + "..."
return str_val
except Exception as e:
return repr(e)
class JsonListKeysTool(BaseTool): # type: ignore[override]
"""Tool for listing keys in a JSON spec."""
name: str = "json_spec_list_keys"
description: str = """
Can be used to list all keys at a given path.
Before calling this you should be SURE that the path to this exists.
The input is a text representation of the path to the dict in Python syntax (e.g. data["key1"][0]["key2"]).
"""
spec: JsonSpec
def _run(
self,
tool_input: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
return self.spec.keys(tool_input)
async def _arun(
self,
tool_input: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
return self._run(tool_input)
class JsonGetValueTool(BaseTool): # type: ignore[override]
"""Tool for getting a value in a JSON spec."""
name: str = "json_spec_get_value"
description: str = """
Can be used to see value in string format at a given path.
Before calling this you should be SURE that the path to this exists.
The input is a text representation of the path to the dict in Python syntax (e.g. data["key1"][0]["key2"]).
"""
spec: JsonSpec
def _run(
self,
tool_input: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
return self.spec.value(tool_input)
async def _arun(
self,
tool_input: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
return self._run(tool_input)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/json/__init__.py | """Tools for interacting with a JSON file."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/rememberizer.py | """Wrapper for Rememberizer APIs."""
from typing import Any, Dict, List, Optional, cast
import requests
from langchain_core.documents import Document
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, model_validator
class RememberizerAPIWrapper(BaseModel):
"""Wrapper for Rememberizer APIs."""
top_k_results: int = 10
rememberizer_api_key: Optional[str] = None
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key in environment."""
rememberizer_api_key = get_from_dict_or_env(
values, "rememberizer_api_key", "REMEMBERIZER_API_KEY"
)
values["rememberizer_api_key"] = rememberizer_api_key
return values
def search(self, query: str) -> dict:
"""Search for a query in the Rememberizer API."""
url = f"https://api.rememberizer.ai/api/v1/documents/search?q={query}&n={self.top_k_results}"
response = requests.get(
url, headers={"x-api-key": cast(str, self.rememberizer_api_key)}
)
data = response.json()
if response.status_code != 200:
raise ValueError(f"API Error: {data}")
matched_chunks = data.get("matched_chunks", [])
return matched_chunks
def load(self, query: str) -> List[Document]:
matched_chunks = self.search(query)
docs = []
for matched_chunk in matched_chunks:
docs.append(
Document(
page_content=matched_chunk["matched_content"],
metadata=matched_chunk["document"],
)
)
return docs
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/redis.py | from __future__ import annotations
import logging
import re
from typing import TYPE_CHECKING, Any, List, Optional, Pattern
from urllib.parse import urlparse
import numpy as np
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from redis.client import Redis as RedisType
def _array_to_buffer(array: List[float], dtype: Any = np.float32) -> bytes:
return np.array(array).astype(dtype).tobytes()
def _buffer_to_array(buffer: bytes, dtype: Any = np.float32) -> List[float]:
return np.frombuffer(buffer, dtype=dtype).tolist()
class TokenEscaper:
"""
Escape punctuation within an input string.
"""
# Characters that RediSearch requires us to escape during queries.
# Source: https://redis.io/docs/stack/search/reference/escaping/#the-rules-of-text-field-tokenization
DEFAULT_ESCAPED_CHARS: str = r"[,.<>{}\[\]\\\"\':;!@#$%^&*()\-+=~\/ ]"
def __init__(self, escape_chars_re: Optional[Pattern] = None):
if escape_chars_re:
self.escaped_chars_re = escape_chars_re
else:
self.escaped_chars_re = re.compile(self.DEFAULT_ESCAPED_CHARS)
def escape(self, value: str) -> str:
if not isinstance(value, str):
raise TypeError(
"Value must be a string object for token escaping."
f"Got type {type(value)}"
)
def escape_symbol(match: re.Match) -> str:
value = match.group(0)
return f"\\{value}"
return self.escaped_chars_re.sub(escape_symbol, value)
def check_redis_module_exist(client: RedisType, required_modules: List[dict]) -> None:
"""Check if the correct Redis modules are installed."""
installed_modules = client.module_list()
installed_modules = {
module[b"name"].decode("utf-8"): module for module in installed_modules
}
for module in required_modules:
if module["name"] in installed_modules and int(
installed_modules[module["name"]][b"ver"]
) >= int(module["ver"]):
return
# otherwise raise error
error_message = (
"Redis cannot be used as a vector database without RediSearch >=2.4"
"Please head to https://redis.io/docs/stack/search/quick_start/"
"to know more about installing the RediSearch module within Redis Stack."
)
logger.error(error_message)
raise ValueError(error_message)
def get_client(redis_url: str, **kwargs: Any) -> RedisType:
"""Get a redis client from the connection url given. This helper accepts
urls for Redis server (TCP with/without TLS or UnixSocket) as well as
Redis Sentinel connections.
Redis Cluster is not supported.
Before creating a connection the existence of the database driver is checked
an and ValueError raised otherwise
To use, you should have the ``redis`` python package installed.
Example:
.. code-block:: python
from langchain_community.utilities.redis import get_client
redis_client = get_client(
redis_url="redis://username:password@localhost:6379"
index_name="my-index",
embedding_function=embeddings.embed_query,
)
To use a redis replication setup with multiple redis server and redis sentinels
set "redis_url" to "redis+sentinel://" scheme. With this url format a path is
needed holding the name of the redis service within the sentinels to get the
correct redis server connection. The default service name is "mymaster". The
optional second part of the path is the redis db number to connect to.
An optional username or password is used for booth connections to the rediserver
and the sentinel, different passwords for server and sentinel are not supported.
And as another constraint only one sentinel instance can be given:
Example:
.. code-block:: python
from langchain_community.utilities.redis import get_client
redis_client = get_client(
redis_url="redis+sentinel://username:password@sentinelhost:26379/mymaster/0"
index_name="my-index",
embedding_function=embeddings.embed_query,
)
"""
# Initialize with necessary components.
try:
import redis
except ImportError:
raise ImportError(
"Could not import redis python package. "
"Please install it with `pip install redis>=4.1.0`."
)
# check if normal redis:// or redis+sentinel:// url
if redis_url.startswith("redis+sentinel"):
redis_client = _redis_sentinel_client(redis_url, **kwargs)
elif redis_url.startswith("rediss+sentinel"): # sentinel with TLS support enables
kwargs["ssl"] = True
if "ssl_cert_reqs" not in kwargs:
kwargs["ssl_cert_reqs"] = "none"
redis_client = _redis_sentinel_client(redis_url, **kwargs)
else:
# connect to redis server from url, reconnect with cluster client if needed
redis_client = redis.from_url(redis_url, **kwargs)
if _check_for_cluster(redis_client):
redis_client.close()
redis_client = _redis_cluster_client(redis_url, **kwargs)
return redis_client
def _redis_sentinel_client(redis_url: str, **kwargs: Any) -> RedisType:
"""helper method to parse an (un-official) redis+sentinel url
and create a Sentinel connection to fetch the final redis client
connection to a replica-master for read-write operations.
If username and/or password for authentication is given the
same credentials are used for the Redis Sentinel as well as Redis Server.
With this implementation using a redis url only it is not possible
to use different data for authentication on booth systems.
"""
import redis
parsed_url = urlparse(redis_url)
# sentinel needs list with (host, port) tuple, use default port if none available
sentinel_list = [(parsed_url.hostname or "localhost", parsed_url.port or 26379)]
if parsed_url.path:
# "/mymaster/0" first part is service name, optional second part is db number
path_parts = parsed_url.path.split("/")
service_name = path_parts[1] or "mymaster"
if len(path_parts) > 2:
kwargs["db"] = path_parts[2]
else:
service_name = "mymaster"
sentinel_args = {}
if parsed_url.password:
sentinel_args["password"] = parsed_url.password
kwargs["password"] = parsed_url.password
if parsed_url.username:
sentinel_args["username"] = parsed_url.username
kwargs["username"] = parsed_url.username
# check for all SSL related properties and copy them into sentinel_kwargs too,
# add client_name also
for arg in kwargs:
if arg.startswith("ssl") or arg == "client_name":
sentinel_args[arg] = kwargs[arg]
# sentinel user/pass is part of sentinel_kwargs, user/pass for redis server
# connection as direct parameter in kwargs
sentinel_client = redis.sentinel.Sentinel(
sentinel_list, sentinel_kwargs=sentinel_args, **kwargs
)
# redis server might have password but not sentinel - fetch this error and try
# again without pass, everything else cannot be handled here -> user needed
try:
sentinel_client.execute_command("ping")
except redis.exceptions.AuthenticationError as ae:
if "no password is set" in ae.args[0]:
logger.warning(
"Redis sentinel connection configured with password but Sentinel \
answered NO PASSWORD NEEDED - Please check Sentinel configuration"
)
sentinel_client = redis.sentinel.Sentinel(sentinel_list, **kwargs)
else:
raise ae
return sentinel_client.master_for(service_name)
def _check_for_cluster(redis_client: RedisType) -> bool:
import redis
try:
cluster_info = redis_client.info("cluster")
return cluster_info["cluster_enabled"] == 1
except redis.exceptions.RedisError:
return False
def _redis_cluster_client(redis_url: str, **kwargs: Any) -> RedisType:
from redis.cluster import RedisCluster
return RedisCluster.from_url(redis_url, **kwargs) # type: ignore[return-value]
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/mojeek_search.py | import json
from typing import List
import requests
from pydantic import BaseModel, Field
class MojeekSearchAPIWrapper(BaseModel):
api_key: str
search_kwargs: dict = Field(default_factory=dict)
api_url: str = "https://api.mojeek.com/search"
def run(self, query: str) -> str:
search_results = self._search(query)
results = []
for result in search_results:
title = result.get("title", "")
url = result.get("url", "")
desc = result.get("desc", "")
results.append({"title": title, "url": url, "desc": desc})
return json.dumps(results)
def _search(self, query: str) -> List[dict]:
headers = {
"Accept": "application/json",
}
req = requests.PreparedRequest()
request = {
**self.search_kwargs,
**{"q": query, "fmt": "json", "api_key": self.api_key},
}
req.prepare_url(self.api_url, request)
if req.url is None:
raise ValueError("prepared url is None, this should not happen")
response = requests.get(req.url, headers=headers)
if not response.ok:
raise Exception(f"HTTP error {response.status_code}")
return response.json().get("response", {}).get("results", [])
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/polygon.py | """
Util that calls several of Polygon's stock market REST APIs.
Docs: https://polygon.io/docs/stocks/getting-started
"""
import json
from typing import Any, Dict, Optional
import requests
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, model_validator
POLYGON_BASE_URL = "https://api.polygon.io/"
class PolygonAPIWrapper(BaseModel):
"""Wrapper for Polygon API."""
polygon_api_key: Optional[str] = None
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key in environment."""
polygon_api_key = get_from_dict_or_env(
values, "polygon_api_key", "POLYGON_API_KEY"
)
values["polygon_api_key"] = polygon_api_key
return values
def get_financials(self, ticker: str) -> Optional[dict]:
"""
Get fundamental financial data, which is found in balance sheets,
income statements, and cash flow statements for a given ticker.
/vX/reference/financials
"""
url = (
f"{POLYGON_BASE_URL}vX/reference/financials?"
f"ticker={ticker}&"
f"apiKey={self.polygon_api_key}"
)
response = requests.get(url)
data = response.json()
status = data.get("status", None)
if status not in ("OK", "STOCKBUSINESS", "STOCKSBUSINESS"):
raise ValueError(f"API Error: {data}")
return data.get("results", None)
def get_last_quote(self, ticker: str) -> Optional[dict]:
"""
Get the most recent National Best Bid and Offer (Quote) for a ticker.
/v2/last/nbbo/{ticker}
"""
url = f"{POLYGON_BASE_URL}v2/last/nbbo/{ticker}?apiKey={self.polygon_api_key}"
response = requests.get(url)
data = response.json()
status = data.get("status", None)
if status not in ("OK", "STOCKBUSINESS", "STOCKSBUSINESS"):
raise ValueError(f"API Error: {data}")
return data.get("results", None)
def get_ticker_news(self, ticker: str) -> Optional[dict]:
"""
Get the most recent news articles relating to a stock ticker symbol,
including a summary of the article and a link to the original source.
/v2/reference/news
"""
url = (
f"{POLYGON_BASE_URL}v2/reference/news?"
f"ticker={ticker}&"
f"apiKey={self.polygon_api_key}"
)
response = requests.get(url)
data = response.json()
status = data.get("status", None)
if status not in ("OK", "STOCKBUSINESS", "STOCKSBUSINESS"):
raise ValueError(f"API Error: {data}")
return data.get("results", None)
def get_aggregates(self, ticker: str, **kwargs: Any) -> Optional[dict]:
"""
Get aggregate bars for a stock over a given date range
in custom time window sizes.
/v2/aggs/ticker/{ticker}/range/{multiplier}/{timespan}/{from_date}/{to_date}
"""
timespan = kwargs.get("timespan", "day")
multiplier = kwargs.get("timespan_multiplier", 1)
from_date = kwargs.get("from_date", None)
to_date = kwargs.get("to_date", None)
adjusted = kwargs.get("adjusted", True)
sort = kwargs.get("sort", "asc")
url = (
f"{POLYGON_BASE_URL}v2/aggs"
f"/ticker/{ticker}"
f"/range/{multiplier}"
f"/{timespan}"
f"/{from_date}"
f"/{to_date}"
f"?apiKey={self.polygon_api_key}"
f"&adjusted={adjusted}"
f"&sort={sort}"
)
response = requests.get(url)
data = response.json()
status = data.get("status", None)
if status not in ("OK", "STOCKBUSINESS", "STOCKSBUSINESS"):
raise ValueError(f"API Error: {data}")
return data.get("results", None)
def run(self, mode: str, ticker: str, **kwargs: Any) -> str:
if mode == "get_financials":
return json.dumps(self.get_financials(ticker))
elif mode == "get_last_quote":
return json.dumps(self.get_last_quote(ticker))
elif mode == "get_ticker_news":
return json.dumps(self.get_ticker_news(ticker))
elif mode == "get_aggregates":
return json.dumps(self.get_aggregates(ticker, **kwargs))
else:
raise ValueError(f"Invalid mode {mode} for Polygon API.")
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/google_scholar.py | """Util that calls Google Scholar Search."""
from typing import Any, Dict, Optional
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, model_validator
class GoogleScholarAPIWrapper(BaseModel):
"""Wrapper for Google Scholar API
You can create serpapi key by signing up at: https://serpapi.com/users/sign_up.
The wrapper uses the serpapi python package:
https://serpapi.com/integrations/python#search-google-scholar
To use, you should have the environment variable ``SERP_API_KEY``
set with your API key, or pass `serp_api_key` as a named parameter
to the constructor.
Attributes:
top_k_results: number of results to return from google-scholar query search.
By default it returns top 10 results.
hl: attribute defines the language to use for the Google Scholar search.
It's a two-letter language code.
(e.g., en for English, es for Spanish, or fr for French). Head to the
Google languages page for a full list of supported Google languages:
https://serpapi.com/google-languages
lr: attribute defines one or multiple languages to limit the search to.
It uses lang_{two-letter language code} to specify languages
and | as a delimiter. (e.g., lang_fr|lang_de will only search French
and German pages). Head to the Google lr languages for a full
list of supported languages: https://serpapi.com/google-lr-languages
Example:
.. code-block:: python
from langchain_community.utilities import GoogleScholarAPIWrapper
google_scholar = GoogleScholarAPIWrapper()
google_scholar.run('langchain')
"""
top_k_results: int = 10
hl: str = "en"
lr: str = "lang_en"
serp_api_key: Optional[str] = None
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
serp_api_key = get_from_dict_or_env(values, "serp_api_key", "SERP_API_KEY")
values["SERP_API_KEY"] = serp_api_key
try:
from serpapi import GoogleScholarSearch
except ImportError:
raise ImportError(
"google-search-results is not installed. "
"Please install it with `pip install google-search-results"
">=2.4.2`"
)
GoogleScholarSearch.SERP_API_KEY = serp_api_key
values["google_scholar_engine"] = GoogleScholarSearch
return values
def run(self, query: str) -> str:
"""Run query through GoogleSearchScholar and parse result"""
total_results = []
page = 0
while page < max((self.top_k_results - 20), 1):
# We are getting 20 results from every page
# which is the max in order to reduce the number of API CALLS.
# 0 is the first page of results, 20 is the 2nd page of results,
# 40 is the 3rd page of results, etc.
results = (
self.google_scholar_engine( # type: ignore
{
"q": query,
"start": page,
"hl": self.hl,
"num": min(
self.top_k_results, 20
), # if top_k_result is less than 20.
"lr": self.lr,
}
)
.get_dict()
.get("organic_results", [])
)
total_results.extend(results)
if not results: # No need to search for more pages if current page
# has returned no results
break
page += 20
if (
self.top_k_results % 20 != 0 and page > 20 and total_results
): # From the last page we would only need top_k_results%20 results
# if k is not divisible by 20.
results = (
self.google_scholar_engine( # type: ignore
{
"q": query,
"start": page,
"num": self.top_k_results % 20,
"hl": self.hl,
"lr": self.lr,
}
)
.get_dict()
.get("organic_results", [])
)
total_results.extend(results)
if not total_results:
return "No good Google Scholar Result was found"
docs = [
f"Title: {result.get('title','')}\n"
f"Authors: {','.join([author.get('name') for author in result.get('publication_info',{}).get('authors',[])])}\n" # noqa: E501
f"Summary: {result.get('publication_info',{}).get('summary','')}\n"
f"Total-Citations: {result.get('inline_links',{}).get('cited_by',{}).get('total','')}" # noqa: E501
for result in total_results
]
return "\n\n".join(docs)
|
0 | lc_public_repos/langchain/libs/community/langchain_community | lc_public_repos/langchain/libs/community/langchain_community/utilities/dataforseo_api_search.py | import base64
from typing import Any, Dict, Optional
from urllib.parse import quote
import aiohttp
import requests
from langchain_core.utils import get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, Field, model_validator
class DataForSeoAPIWrapper(BaseModel):
"""Wrapper around the DataForSeo API."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
default_params: dict = Field(
default={
"location_name": "United States",
"language_code": "en",
"depth": 10,
"se_name": "google",
"se_type": "organic",
}
)
"""Default parameters to use for the DataForSEO SERP API."""
params: dict = Field(default={})
"""Additional parameters to pass to the DataForSEO SERP API."""
api_login: Optional[str] = None
"""The API login to use for the DataForSEO SERP API."""
api_password: Optional[str] = None
"""The API password to use for the DataForSEO SERP API."""
json_result_types: Optional[list] = None
"""The JSON result types."""
json_result_fields: Optional[list] = None
"""The JSON result fields."""
top_count: Optional[int] = None
"""The number of top results to return."""
aiosession: Optional[aiohttp.ClientSession] = None
"""The aiohttp session to use for the DataForSEO SERP API."""
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that login and password exists in environment."""
login = get_from_dict_or_env(values, "api_login", "DATAFORSEO_LOGIN")
password = get_from_dict_or_env(values, "api_password", "DATAFORSEO_PASSWORD")
values["api_login"] = login
values["api_password"] = password
return values
async def arun(self, url: str) -> str:
"""Run request to DataForSEO SERP API and parse result async."""
return self._process_response(await self._aresponse_json(url))
def run(self, url: str) -> str:
"""Run request to DataForSEO SERP API and parse result async."""
return self._process_response(self._response_json(url))
def results(self, url: str) -> list:
res = self._response_json(url)
return self._filter_results(res)
async def aresults(self, url: str) -> list:
res = await self._aresponse_json(url)
return self._filter_results(res)
def _prepare_request(self, keyword: str) -> dict:
"""Prepare the request details for the DataForSEO SERP API."""
if self.api_login is None or self.api_password is None:
raise ValueError("api_login or api_password is not provided")
cred = base64.b64encode(
f"{self.api_login}:{self.api_password}".encode("utf-8")
).decode("utf-8")
headers = {"Authorization": f"Basic {cred}", "Content-Type": "application/json"}
obj = {"keyword": quote(keyword)}
obj = {**obj, **self.default_params, **self.params}
data = [obj]
_url = (
f"https://api.dataforseo.com/v3/serp/{obj['se_name']}"
f"/{obj['se_type']}/live/advanced"
)
return {
"url": _url,
"headers": headers,
"data": data,
}
def _check_response(self, response: dict) -> dict:
"""Check the response from the DataForSEO SERP API for errors."""
if response.get("status_code") != 20000:
raise ValueError(
f"Got error from DataForSEO SERP API: {response.get('status_message')}"
)
return response
def _response_json(self, url: str) -> dict:
"""Use requests to run request to DataForSEO SERP API and return results."""
request_details = self._prepare_request(url)
response = requests.post(
request_details["url"],
headers=request_details["headers"],
json=request_details["data"],
)
response.raise_for_status()
return self._check_response(response.json())
async def _aresponse_json(self, url: str) -> dict:
"""Use aiohttp to request DataForSEO SERP API and return results async."""
request_details = self._prepare_request(url)
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.post(
request_details["url"],
headers=request_details["headers"],
json=request_details["data"],
) as response:
res = await response.json()
else:
async with self.aiosession.post(
request_details["url"],
headers=request_details["headers"],
json=request_details["data"],
) as response:
res = await response.json()
return self._check_response(res)
def _filter_results(self, res: dict) -> list:
output = []
types = self.json_result_types if self.json_result_types is not None else []
for task in res.get("tasks", []):
for result in task.get("result", []):
for item in result.get("items", []):
if len(types) == 0 or item.get("type", "") in types:
self._cleanup_unnecessary_items(item)
if len(item) != 0:
output.append(item)
if self.top_count is not None and len(output) >= self.top_count:
break
return output
def _cleanup_unnecessary_items(self, d: dict) -> dict:
fields = self.json_result_fields if self.json_result_fields is not None else []
if len(fields) > 0:
for k, v in list(d.items()):
if isinstance(v, dict):
self._cleanup_unnecessary_items(v)
if len(v) == 0:
del d[k]
elif k not in fields:
del d[k]
if "xpath" in d:
del d["xpath"]
if "position" in d:
del d["position"]
if "rectangle" in d:
del d["rectangle"]
for k, v in list(d.items()):
if isinstance(v, dict):
self._cleanup_unnecessary_items(v)
return d
def _process_response(self, res: dict) -> str:
"""Process response from DataForSEO SERP API."""
toret = "No good search result found"
for task in res.get("tasks", []):
for result in task.get("result", []):
item_types = result.get("item_types")
items = result.get("items", [])
if "answer_box" in item_types:
toret = next(
item for item in items if item.get("type") == "answer_box"
).get("text")
elif "knowledge_graph" in item_types:
toret = next(
item for item in items if item.get("type") == "knowledge_graph"
).get("description")
elif "featured_snippet" in item_types:
toret = next(
item for item in items if item.get("type") == "featured_snippet"
).get("description")
elif "shopping" in item_types:
toret = next(
item for item in items if item.get("type") == "shopping"
).get("price")
elif "organic" in item_types:
toret = next(
item for item in items if item.get("type") == "organic"
).get("description")
if toret:
break
return toret
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.