index int64 0 0 | repo_id stringclasses 596 values | file_path stringlengths 31 168 | content stringlengths 1 6.2M |
|---|---|---|---|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/financial_datasets/balance_sheets.py | from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.financial_datasets import FinancialDatasetsAPIWrapper
class BalanceSheetsSchema(BaseModel):
"""Input for BalanceSheets."""
ticker: str = Field(
description="The ticker symbol to fetch balance sheets for.",
)
period: str = Field(
description="The period of the balance sheets. "
"Possible values are: "
"annual, quarterly, ttm. "
"Default is 'annual'.",
)
limit: int = Field(
description="The number of balance sheets to return. " "Default is 10.",
)
class BalanceSheets(BaseTool): # type: ignore[override, override]
"""
Tool that gets balance sheets for a given ticker over a given period.
"""
mode: str = "get_balance_sheets"
name: str = "balance_sheets"
description: str = (
"A wrapper around financial datasets's Balance Sheets API. "
"This tool is useful for fetching balance sheets for a given ticker."
"The tool fetches balance sheets for a given ticker over a given period."
"The period can be annual, quarterly, or trailing twelve months (ttm)."
"The number of balance sheets to return can also be "
"specified using the limit parameter."
)
args_schema: Type[BalanceSheetsSchema] = BalanceSheetsSchema
api_wrapper: FinancialDatasetsAPIWrapper = Field(..., exclude=True)
def __init__(self, api_wrapper: FinancialDatasetsAPIWrapper):
super().__init__(api_wrapper=api_wrapper)
def _run(
self,
ticker: str,
period: str,
limit: Optional[int],
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Balance Sheets API tool."""
return self.api_wrapper.run(
mode=self.mode,
ticker=ticker,
period=period,
limit=limit,
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/financial_datasets/income_statements.py | from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.financial_datasets import FinancialDatasetsAPIWrapper
class IncomeStatementsSchema(BaseModel):
"""Input for IncomeStatements."""
ticker: str = Field(
description="The ticker symbol to fetch income statements for.",
)
period: str = Field(
description="The period of the income statement. "
"Possible values are: "
"annual, quarterly, ttm. "
"Default is 'annual'.",
)
limit: int = Field(
description="The number of income statements to return. " "Default is 10.",
)
class IncomeStatements(BaseTool): # type: ignore[override, override]
"""
Tool that gets income statements for a given ticker over a given period.
"""
mode: str = "get_income_statements"
name: str = "income_statements"
description: str = (
"A wrapper around financial datasets's Income Statements API. "
"This tool is useful for fetching income statements for a given ticker."
"The tool fetches income statements for a given ticker over a given period."
"The period can be annual, quarterly, or trailing twelve months (ttm)."
"The number of income statements to return can also be "
"specified using the limit parameter."
)
args_schema: Type[IncomeStatementsSchema] = IncomeStatementsSchema
api_wrapper: FinancialDatasetsAPIWrapper = Field(..., exclude=True)
def __init__(self, api_wrapper: FinancialDatasetsAPIWrapper):
super().__init__(api_wrapper=api_wrapper)
def _run(
self,
ticker: str,
period: str,
limit: Optional[int],
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Income Statements API tool."""
return self.api_wrapper.run(
mode=self.mode,
ticker=ticker,
period=period,
limit=limit,
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/financial_datasets/cash_flow_statements.py | from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.financial_datasets import FinancialDatasetsAPIWrapper
class CashFlowStatementsSchema(BaseModel):
"""Input for CashFlowStatements."""
ticker: str = Field(
description="The ticker symbol to fetch cash flow statements for.",
)
period: str = Field(
description="The period of the cash flow statement. "
"Possible values are: "
"annual, quarterly, ttm. "
"Default is 'annual'.",
)
limit: int = Field(
description="The number of cash flow statements to return. " "Default is 10.",
)
class CashFlowStatements(BaseTool): # type: ignore[override, override]
"""
Tool that gets cash flow statements for a given ticker over a given period.
"""
mode: str = "get_cash_flow_statements"
name: str = "cash_flow_statements"
description: str = (
"A wrapper around financial datasets's Cash Flow Statements API. "
"This tool is useful for fetching cash flow statements for a given ticker."
"The tool fetches cash flow statements for a given ticker over a given period."
"The period can be annual, quarterly, or trailing twelve months (ttm)."
"The number of cash flow statements to return can also be "
"specified using the limit parameter."
)
args_schema: Type[CashFlowStatementsSchema] = CashFlowStatementsSchema
api_wrapper: FinancialDatasetsAPIWrapper = Field(..., exclude=True)
def __init__(self, api_wrapper: FinancialDatasetsAPIWrapper):
super().__init__(api_wrapper=api_wrapper)
def _run(
self,
ticker: str,
period: str,
limit: Optional[int],
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Cash Flow Statements API tool."""
return self.api_wrapper.run(
mode=self.mode,
ticker=ticker,
period=period,
limit=limit,
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/financial_datasets/__init__.py | """financial datasets tools."""
from langchain_community.tools.financial_datasets.balance_sheets import (
BalanceSheets,
)
from langchain_community.tools.financial_datasets.cash_flow_statements import (
CashFlowStatements,
)
from langchain_community.tools.financial_datasets.income_statements import (
IncomeStatements,
)
__all__ = [
"BalanceSheets",
"CashFlowStatements",
"IncomeStatements",
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/cogniswitch/tool.py | from __future__ import annotations
from typing import Any, Dict, Optional
import requests
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
class CogniswitchKnowledgeRequest(BaseTool): # type: ignore[override]
"""Tool that uses the Cogniswitch service to answer questions.
name: str = "cogniswitch_knowledge_request"
description: str = (
"A wrapper around cogniswitch service to answer the question
from the knowledge base."
"Input should be a search query."
)
"""
name: str = "cogniswitch_knowledge_request"
description: str = """A wrapper around cogniswitch service to
answer the question from the knowledge base."""
cs_token: str
OAI_token: str
apiKey: str
api_url: str = "https://api.cogniswitch.ai:8243/cs-api/0.0.1/cs/knowledgeRequest"
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> Dict[str, Any]:
"""
Use the tool to answer a query.
Args:
query (str): Natural language query,
that you would like to ask to your knowledge graph.
run_manager (Optional[CallbackManagerForChainRun]):
Manager for chain run callbacks.
Returns:
Dict[str, Any]: Output dictionary containing
the 'response' from the service.
"""
response = self.answer_cs(self.cs_token, self.OAI_token, query, self.apiKey)
return response
def answer_cs(self, cs_token: str, OAI_token: str, query: str, apiKey: str) -> dict:
"""
Send a query to the Cogniswitch service and retrieve the response.
Args:
cs_token (str): Cogniswitch token.
OAI_token (str): OpenAI token.
apiKey (str): OAuth token.
query (str): Query to be answered.
Returns:
dict: Response JSON from the Cogniswitch service.
"""
if not cs_token:
raise ValueError("Missing cs_token")
if not OAI_token:
raise ValueError("Missing OpenAI token")
if not apiKey:
raise ValueError("Missing cogniswitch OAuth token")
if not query:
raise ValueError("Missing input query")
headers = {
"apiKey": apiKey,
"platformToken": cs_token,
"openAIToken": OAI_token,
}
data = {"query": query}
response = requests.post(self.api_url, headers=headers, verify=False, data=data)
return response.json()
class CogniswitchKnowledgeStatus(BaseTool): # type: ignore[override]
"""Tool that uses the Cogniswitch services to get the
status of the document or url uploaded.
name: str = "cogniswitch_knowledge_status"
description: str = (
"A wrapper around cogniswitch services to know the status of
the document uploaded from a url or a file. "
"Input should be a file name or the url link"
)
"""
name: str = "cogniswitch_knowledge_status"
description: str = """A wrapper around cogniswitch services to know
the status of the document uploaded from a url or a file."""
cs_token: str
OAI_token: str
apiKey: str
knowledge_status_url: str = (
"https://api.cogniswitch.ai:8243/cs-api/0.0.1/cs/knowledgeSource/status"
)
def _run(
self,
document_name: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> Dict[str, Any]:
"""
Use the tool to know the status of the document uploaded.
Args:
document_name (str): name of the document or
the url uploaded
run_manager (Optional[CallbackManagerForChainRun]):
Manager for chain run callbacks.
Returns:
Dict[str, Any]: Output dictionary containing
the 'response' from the service.
"""
response = self.knowledge_status(document_name)
return response
def knowledge_status(self, document_name: str) -> dict:
"""
Use this function to know the status of the document or the URL uploaded
Args:
document_name (str): The document name or the url that is uploaded.
Returns:
dict: Response JSON from the Cogniswitch service.
"""
params = {"docName": document_name, "platformToken": self.cs_token}
headers = {
"apiKey": self.apiKey,
"openAIToken": self.OAI_token,
"platformToken": self.cs_token,
}
response = requests.get(
self.knowledge_status_url,
headers=headers,
params=params,
verify=False,
)
if response.status_code == 200:
source_info = response.json()
source_data = dict(source_info[-1])
status = source_data.get("status")
if status == 0:
source_data["status"] = "SUCCESS"
elif status == 1:
source_data["status"] = "PROCESSING"
elif status == 2:
source_data["status"] = "UPLOADED"
elif status == 3:
source_data["status"] = "FAILURE"
elif status == 4:
source_data["status"] = "UPLOAD_FAILURE"
elif status == 5:
source_data["status"] = "REJECTED"
if "filePath" in source_data.keys():
source_data.pop("filePath")
if "savedFileName" in source_data.keys():
source_data.pop("savedFileName")
if "integrationConfigId" in source_data.keys():
source_data.pop("integrationConfigId")
if "metaData" in source_data.keys():
source_data.pop("metaData")
if "docEntryId" in source_data.keys():
source_data.pop("docEntryId")
return source_data
else:
# error_message = response.json()["message"]
return {
"message": response.status_code,
}
class CogniswitchKnowledgeSourceFile(BaseTool): # type: ignore[override]
"""Tool that uses the Cogniswitch services to store data from file.
name: str = "cogniswitch_knowledge_source_file"
description: str = (
"This calls the CogniSwitch services to analyze & store data from a file.
If the input looks like a file path, assign that string value to file key.
Assign document name & description only if provided in input."
)
"""
name: str = "cogniswitch_knowledge_source_file"
description: str = """
This calls the CogniSwitch services to analyze & store data from a file.
If the input looks like a file path, assign that string value to file key.
Assign document name & description only if provided in input.
"""
cs_token: str
OAI_token: str
apiKey: str
knowledgesource_file: str = (
"https://api.cogniswitch.ai:8243/cs-api/0.0.1/cs/knowledgeSource/file"
)
def _run(
self,
file: Optional[str] = None,
document_name: Optional[str] = None,
document_description: Optional[str] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> Dict[str, Any]:
"""
Execute the tool to store the data given from a file.
This calls the CogniSwitch services to analyze & store data from a file.
If the input looks like a file path, assign that string value to file key.
Assign document name & description only if provided in input.
Args:
file Optional[str]: The file path of your knowledge
document_name Optional[str]: Name of your knowledge document
document_description Optional[str]: Description of your knowledge document
run_manager (Optional[CallbackManagerForChainRun]):
Manager for chain run callbacks.
Returns:
Dict[str, Any]: Output dictionary containing
the 'response' from the service.
"""
if not file:
return {
"message": "No input provided",
}
else:
response = self.store_data(
file=file,
document_name=document_name,
document_description=document_description,
)
return response
def store_data(
self,
file: Optional[str],
document_name: Optional[str],
document_description: Optional[str],
) -> dict:
"""
Store data using the Cogniswitch service.
This calls the CogniSwitch services to analyze & store data from a file.
If the input looks like a file path, assign that string value to file key.
Assign document name & description only if provided in input.
Args:
file (Optional[str]): file path of your file.
the current files supported by the files are
.txt, .pdf, .docx, .doc, .html
document_name (Optional[str]): Name of the document you are uploading.
document_description (Optional[str]): Description of the document.
Returns:
dict: Response JSON from the Cogniswitch service.
"""
headers = {
"apiKey": self.apiKey,
"openAIToken": self.OAI_token,
"platformToken": self.cs_token,
}
data: Dict[str, Any]
if not document_name:
document_name = ""
if not document_description:
document_description = ""
if file is not None:
files = {"file": open(file, "rb")}
data = {
"documentName": document_name,
"documentDescription": document_description,
}
response = requests.post(
self.knowledgesource_file,
headers=headers,
verify=False,
data=data,
files=files,
)
if response.status_code == 200:
return response.json()
else:
return {"message": "Bad Request"}
class CogniswitchKnowledgeSourceURL(BaseTool): # type: ignore[override]
"""Tool that uses the Cogniswitch services to store data from a URL.
name: str = "cogniswitch_knowledge_source_url"
description: str = (
"This calls the CogniSwitch services to analyze & store data from a url.
the URL is provided in input, assign that value to the url key.
Assign document name & description only if provided in input"
)
"""
name: str = "cogniswitch_knowledge_source_url"
description: str = """
This calls the CogniSwitch services to analyze & store data from a url.
the URL is provided in input, assign that value to the url key.
Assign document name & description only if provided in input"""
cs_token: str
OAI_token: str
apiKey: str
knowledgesource_url: str = (
"https://api.cogniswitch.ai:8243/cs-api/0.0.1/cs/knowledgeSource/url"
)
def _run(
self,
url: Optional[str] = None,
document_name: Optional[str] = None,
document_description: Optional[str] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> Dict[str, Any]:
"""
Execute the tool to store the data given from a url.
This calls the CogniSwitch services to analyze & store data from a url.
the URL is provided in input, assign that value to the url key.
Assign document name & description only if provided in input.
Args:
url Optional[str]: The website/url link of your knowledge
document_name Optional[str]: Name of your knowledge document
document_description Optional[str]: Description of your knowledge document
run_manager (Optional[CallbackManagerForChainRun]):
Manager for chain run callbacks.
Returns:
Dict[str, Any]: Output dictionary containing
the 'response' from the service.
"""
if not url:
return {
"message": "No input provided",
}
response = self.store_data(
url=url,
document_name=document_name,
document_description=document_description,
)
return response
def store_data(
self,
url: Optional[str],
document_name: Optional[str],
document_description: Optional[str],
) -> dict:
"""
Store data using the Cogniswitch service.
This calls the CogniSwitch services to analyze & store data from a url.
the URL is provided in input, assign that value to the url key.
Assign document name & description only if provided in input.
Args:
url (Optional[str]): URL link.
document_name (Optional[str]): Name of the document you are uploading.
document_description (Optional[str]): Description of the document.
Returns:
dict: Response JSON from the Cogniswitch service.
"""
headers = {
"apiKey": self.apiKey,
"openAIToken": self.OAI_token,
"platformToken": self.cs_token,
}
data: Dict[str, Any]
if not document_name:
document_name = ""
if not document_description:
document_description = ""
if not url:
return {
"message": "No input provided",
}
else:
data = {"url": url}
response = requests.post(
self.knowledgesource_url,
headers=headers,
verify=False,
data=data,
)
if response.status_code == 200:
return response.json()
else:
return {"message": "Bad Request"}
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/cogniswitch/__init__.py | "Cogniswitch Tools"
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/shell/tool.py | import logging
import platform
import warnings
from typing import Any, List, Optional, Type, Union
from langchain_core.callbacks import (
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field, model_validator
logger = logging.getLogger(__name__)
class ShellInput(BaseModel):
"""Commands for the Bash Shell tool."""
commands: Union[str, List[str]] = Field(
...,
description="List of shell commands to run. Deserialized using json.loads",
)
"""List of shell commands to run."""
@model_validator(mode="before")
@classmethod
def _validate_commands(cls, values: dict) -> Any:
"""Validate commands."""
# TODO: Add real validators
commands = values.get("commands")
if not isinstance(commands, list):
values["commands"] = [commands]
# Warn that the bash tool is not safe
warnings.warn(
"The shell tool has no safeguards by default. Use at your own risk."
)
return values
def _get_default_bash_process() -> Any:
"""Get default bash process."""
try:
from langchain_experimental.llm_bash.bash import BashProcess
except ImportError:
raise ImportError(
"BashProcess has been moved to langchain experimental."
"To use this tool, install langchain-experimental "
"with `pip install langchain-experimental`."
)
return BashProcess(return_err_output=True)
def _get_platform() -> str:
"""Get platform."""
system = platform.system()
if system == "Darwin":
return "MacOS"
return system
class ShellTool(BaseTool): # type: ignore[override, override]
"""Tool to run shell commands."""
process: Any = Field(default_factory=_get_default_bash_process)
"""Bash process to run commands."""
name: str = "terminal"
"""Name of tool."""
description: str = f"Run shell commands on this {_get_platform()} machine."
"""Description of tool."""
args_schema: Type[BaseModel] = ShellInput
"""Schema for input arguments."""
ask_human_input: bool = False
"""
If True, prompts the user for confirmation (y/n) before executing
a command generated by the language model in the bash shell.
"""
def _run(
self,
commands: Union[str, List[str]],
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Run commands and return final output."""
print(f"Executing command:\n {commands}") # noqa: T201
try:
if self.ask_human_input:
user_input = input("Proceed with command execution? (y/n): ").lower()
if user_input == "y":
return self.process.run(commands)
else:
logger.info("Invalid input. User aborted command execution.")
return None # type: ignore[return-value]
else:
return self.process.run(commands)
except Exception as e:
logger.error(f"Error during command execution: {e}")
return None # type: ignore[return-value]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/shell/__init__.py | """Shell tool."""
from langchain_community.tools.shell.tool import ShellTool
__all__ = ["ShellTool"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/gmail/create_draft.py | import base64
from email.message import EmailMessage
from typing import List, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.gmail.base import GmailBaseTool
class CreateDraftSchema(BaseModel):
"""Input for CreateDraftTool."""
message: str = Field(
...,
description="The message to include in the draft.",
)
to: List[str] = Field(
...,
description="The list of recipients.",
)
subject: str = Field(
...,
description="The subject of the message.",
)
cc: Optional[List[str]] = Field(
None,
description="The list of CC recipients.",
)
bcc: Optional[List[str]] = Field(
None,
description="The list of BCC recipients.",
)
class GmailCreateDraft(GmailBaseTool): # type: ignore[override, override]
"""Tool that creates a draft email for Gmail."""
name: str = "create_gmail_draft"
description: str = (
"Use this tool to create a draft email with the provided message fields."
)
args_schema: Type[CreateDraftSchema] = CreateDraftSchema
def _prepare_draft_message(
self,
message: str,
to: List[str],
subject: str,
cc: Optional[List[str]] = None,
bcc: Optional[List[str]] = None,
) -> dict:
draft_message = EmailMessage()
draft_message.set_content(message)
draft_message["To"] = ", ".join(to)
draft_message["Subject"] = subject
if cc is not None:
draft_message["Cc"] = ", ".join(cc)
if bcc is not None:
draft_message["Bcc"] = ", ".join(bcc)
encoded_message = base64.urlsafe_b64encode(draft_message.as_bytes()).decode()
return {"message": {"raw": encoded_message}}
def _run(
self,
message: str,
to: List[str],
subject: str,
cc: Optional[List[str]] = None,
bcc: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
create_message = self._prepare_draft_message(message, to, subject, cc, bcc)
draft = (
self.api_resource.users()
.drafts()
.create(userId="me", body=create_message)
.execute()
)
output = f'Draft created. Draft Id: {draft["id"]}'
return output
except Exception as e:
raise Exception(f"An error occurred: {e}")
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/gmail/base.py | """Base class for Gmail tools."""
from __future__ import annotations
from typing import TYPE_CHECKING
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.tools.gmail.utils import build_resource_service
if TYPE_CHECKING:
# This is for linting and IDE typehints
from googleapiclient.discovery import Resource
else:
try:
# We do this so pydantic can resolve the types when instantiating
from googleapiclient.discovery import Resource
except ImportError:
pass
class GmailBaseTool(BaseTool): # type: ignore[override]
"""Base class for Gmail tools."""
api_resource: Resource = Field(default_factory=build_resource_service)
@classmethod
def from_api_resource(cls, api_resource: Resource) -> "GmailBaseTool":
"""Create a tool from an api resource.
Args:
api_resource: The api resource to use.
Returns:
A tool.
"""
return cls(service=api_resource) # type: ignore[call-arg]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/gmail/search.py | import base64
import email
from enum import Enum
from typing import Any, Dict, List, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.gmail.base import GmailBaseTool
from langchain_community.tools.gmail.utils import clean_email_body
class Resource(str, Enum):
"""Enumerator of Resources to search."""
THREADS = "threads"
MESSAGES = "messages"
class SearchArgsSchema(BaseModel):
"""Input for SearchGmailTool."""
# From https://support.google.com/mail/answer/7190?hl=en
query: str = Field(
...,
description="The Gmail query. Example filters include from:sender,"
" to:recipient, subject:subject, -filtered_term,"
" in:folder, is:important|read|starred, after:year/mo/date, "
"before:year/mo/date, label:label_name"
' "exact phrase".'
" Search newer/older than using d (day), m (month), and y (year): "
"newer_than:2d, older_than:1y."
" Attachments with extension example: filename:pdf. Multiple term"
" matching example: from:amy OR from:david.",
)
resource: Resource = Field(
default=Resource.MESSAGES,
description="Whether to search for threads or messages.",
)
max_results: int = Field(
default=10,
description="The maximum number of results to return.",
)
class GmailSearch(GmailBaseTool): # type: ignore[override, override]
"""Tool that searches for messages or threads in Gmail."""
name: str = "search_gmail"
description: str = (
"Use this tool to search for email messages or threads."
" The input must be a valid Gmail query."
" The output is a JSON list of the requested resource."
)
args_schema: Type[SearchArgsSchema] = SearchArgsSchema
def _parse_threads(self, threads: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
# Add the thread message snippets to the thread results
results = []
for thread in threads:
thread_id = thread["id"]
thread_data = (
self.api_resource.users()
.threads()
.get(userId="me", id=thread_id)
.execute()
)
messages = thread_data["messages"]
thread["messages"] = []
for message in messages:
snippet = message["snippet"]
thread["messages"].append({"snippet": snippet, "id": message["id"]})
results.append(thread)
return results
def _parse_messages(self, messages: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
results = []
for message in messages:
message_id = message["id"]
message_data = (
self.api_resource.users()
.messages()
.get(userId="me", format="raw", id=message_id)
.execute()
)
raw_message = base64.urlsafe_b64decode(message_data["raw"])
email_msg = email.message_from_bytes(raw_message)
subject = email_msg["Subject"]
sender = email_msg["From"]
message_body = ""
if email_msg.is_multipart():
for part in email_msg.walk():
ctype = part.get_content_type()
cdispo = str(part.get("Content-Disposition"))
if ctype == "text/plain" and "attachment" not in cdispo:
try:
message_body = part.get_payload(decode=True).decode("utf-8") # type: ignore[union-attr]
except UnicodeDecodeError:
message_body = part.get_payload(decode=True).decode( # type: ignore[union-attr]
"latin-1"
)
break
else:
message_body = email_msg.get_payload(decode=True).decode("utf-8") # type: ignore[union-attr]
body = clean_email_body(message_body)
results.append(
{
"id": message["id"],
"threadId": message_data["threadId"],
"snippet": message_data["snippet"],
"body": body,
"subject": subject,
"sender": sender,
"from": email_msg["From"],
"date": email_msg["Date"],
"to": email_msg["To"],
"cc": email_msg["Cc"],
}
)
return results
def _run(
self,
query: str,
resource: Resource = Resource.MESSAGES,
max_results: int = 10,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> List[Dict[str, Any]]:
"""Run the tool."""
results = (
self.api_resource.users()
.messages()
.list(userId="me", q=query, maxResults=max_results)
.execute()
.get(resource.value, [])
)
if resource == Resource.THREADS:
return self._parse_threads(results)
elif resource == Resource.MESSAGES:
return self._parse_messages(results)
else:
raise NotImplementedError(f"Resource of type {resource} not implemented.")
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/gmail/get_thread.py | from typing import Dict, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.gmail.base import GmailBaseTool
class GetThreadSchema(BaseModel):
"""Input for GetMessageTool."""
# From https://support.google.com/mail/answer/7190?hl=en
thread_id: str = Field(
...,
description="The thread ID.",
)
class GmailGetThread(GmailBaseTool): # type: ignore[override, override]
"""Tool that gets a thread by ID from Gmail."""
name: str = "get_gmail_thread"
description: str = (
"Use this tool to search for email messages."
" The input must be a valid Gmail query."
" The output is a JSON list of messages."
)
args_schema: Type[GetThreadSchema] = GetThreadSchema
def _run(
self,
thread_id: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> Dict:
"""Run the tool."""
query = self.api_resource.users().threads().get(userId="me", id=thread_id)
thread_data = query.execute()
if not isinstance(thread_data, dict):
raise ValueError("The output of the query must be a list.")
messages = thread_data["messages"]
thread_data["messages"] = []
keys_to_keep = ["id", "snippet", "snippet"]
# TODO: Parse body.
for message in messages:
thread_data["messages"].append(
{k: message[k] for k in keys_to_keep if k in message}
)
return thread_data
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/gmail/utils.py | """Gmail tool utils."""
from __future__ import annotations
import logging
import os
from typing import TYPE_CHECKING, List, Optional, Tuple
from langchain_core.utils import guard_import
if TYPE_CHECKING:
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import Resource
from googleapiclient.discovery import build as build_resource
logger = logging.getLogger(__name__)
def import_google() -> Tuple[Request, Credentials]:
"""Import google libraries.
Returns:
Tuple[Request, Credentials]: Request and Credentials classes.
"""
return (
guard_import(
module_name="google.auth.transport.requests",
pip_name="google-auth-httplib2",
).Request,
guard_import(
module_name="google.oauth2.credentials", pip_name="google-auth-httplib2"
).Credentials,
)
def import_installed_app_flow() -> InstalledAppFlow:
"""Import InstalledAppFlow class.
Returns:
InstalledAppFlow: InstalledAppFlow class.
"""
return guard_import(
module_name="google_auth_oauthlib.flow", pip_name="google-auth-oauthlib"
).InstalledAppFlow
def import_googleapiclient_resource_builder() -> build_resource:
"""Import googleapiclient.discovery.build function.
Returns:
build_resource: googleapiclient.discovery.build function.
"""
return guard_import(
module_name="googleapiclient.discovery", pip_name="google-api-python-client"
).build
DEFAULT_SCOPES = ["https://mail.google.com/"]
DEFAULT_CREDS_TOKEN_FILE = "token.json"
DEFAULT_CLIENT_SECRETS_FILE = "credentials.json"
def get_gmail_credentials(
token_file: Optional[str] = None,
client_secrets_file: Optional[str] = None,
scopes: Optional[List[str]] = None,
) -> Credentials:
"""Get credentials."""
# From https://developers.google.com/gmail/api/quickstart/python
Request, Credentials = import_google()
InstalledAppFlow = import_installed_app_flow()
creds = None
scopes = scopes or DEFAULT_SCOPES
token_file = token_file or DEFAULT_CREDS_TOKEN_FILE
client_secrets_file = client_secrets_file or DEFAULT_CLIENT_SECRETS_FILE
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists(token_file):
creds = Credentials.from_authorized_user_file(token_file, scopes)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request()) # type: ignore[call-arg]
else:
# https://developers.google.com/gmail/api/quickstart/python#authorize_credentials_for_a_desktop_application # noqa
flow = InstalledAppFlow.from_client_secrets_file(
client_secrets_file, scopes
)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(token_file, "w") as token:
token.write(creds.to_json())
return creds
def build_resource_service(
credentials: Optional[Credentials] = None,
service_name: str = "gmail",
service_version: str = "v1",
) -> Resource:
"""Build a Gmail service."""
credentials = credentials or get_gmail_credentials()
builder = import_googleapiclient_resource_builder()
return builder(service_name, service_version, credentials=credentials)
def clean_email_body(body: str) -> str:
"""Clean email body."""
try:
from bs4 import BeautifulSoup
try:
soup = BeautifulSoup(str(body), "html.parser")
body = soup.get_text()
return str(body)
except Exception as e:
logger.error(e)
return str(body)
except ImportError:
logger.warning("BeautifulSoup not installed. Skipping cleaning.")
return str(body)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/gmail/__init__.py | """Gmail tools."""
from langchain_community.tools.gmail.create_draft import GmailCreateDraft
from langchain_community.tools.gmail.get_message import GmailGetMessage
from langchain_community.tools.gmail.get_thread import GmailGetThread
from langchain_community.tools.gmail.search import GmailSearch
from langchain_community.tools.gmail.send_message import GmailSendMessage
from langchain_community.tools.gmail.utils import get_gmail_credentials
__all__ = [
"GmailCreateDraft",
"GmailSendMessage",
"GmailSearch",
"GmailGetMessage",
"GmailGetThread",
"get_gmail_credentials",
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/gmail/send_message.py | """Send Gmail messages."""
import base64
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from typing import Any, Dict, List, Optional, Type, Union
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.gmail.base import GmailBaseTool
class SendMessageSchema(BaseModel):
"""Input for SendMessageTool."""
message: str = Field(
...,
description="The message to send.",
)
to: Union[str, List[str]] = Field(
...,
description="The list of recipients.",
)
subject: str = Field(
...,
description="The subject of the message.",
)
cc: Optional[Union[str, List[str]]] = Field(
None,
description="The list of CC recipients.",
)
bcc: Optional[Union[str, List[str]]] = Field(
None,
description="The list of BCC recipients.",
)
class GmailSendMessage(GmailBaseTool): # type: ignore[override, override]
"""Tool that sends a message to Gmail."""
name: str = "send_gmail_message"
description: str = (
"Use this tool to send email messages." " The input is the message, recipients"
)
args_schema: Type[SendMessageSchema] = SendMessageSchema
def _prepare_message(
self,
message: str,
to: Union[str, List[str]],
subject: str,
cc: Optional[Union[str, List[str]]] = None,
bcc: Optional[Union[str, List[str]]] = None,
) -> Dict[str, Any]:
"""Create a message for an email."""
mime_message = MIMEMultipart()
mime_message.attach(MIMEText(message, "html"))
mime_message["To"] = ", ".join(to if isinstance(to, list) else [to])
mime_message["Subject"] = subject
if cc is not None:
mime_message["Cc"] = ", ".join(cc if isinstance(cc, list) else [cc])
if bcc is not None:
mime_message["Bcc"] = ", ".join(bcc if isinstance(bcc, list) else [bcc])
encoded_message = base64.urlsafe_b64encode(mime_message.as_bytes()).decode()
return {"raw": encoded_message}
def _run(
self,
message: str,
to: Union[str, List[str]],
subject: str,
cc: Optional[Union[str, List[str]]] = None,
bcc: Optional[Union[str, List[str]]] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Run the tool."""
try:
create_message = self._prepare_message(message, to, subject, cc=cc, bcc=bcc)
send_message = (
self.api_resource.users()
.messages()
.send(userId="me", body=create_message)
)
sent_message = send_message.execute()
return f'Message sent. Message Id: {sent_message["id"]}'
except Exception as error:
raise Exception(f"An error occurred: {error}")
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/gmail/get_message.py | import base64
import email
from typing import Dict, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.gmail.base import GmailBaseTool
from langchain_community.tools.gmail.utils import clean_email_body
class SearchArgsSchema(BaseModel):
"""Input for GetMessageTool."""
message_id: str = Field(
...,
description="The unique ID of the email message, retrieved from a search.",
)
class GmailGetMessage(GmailBaseTool): # type: ignore[override, override]
"""Tool that gets a message by ID from Gmail."""
name: str = "get_gmail_message"
description: str = (
"Use this tool to fetch an email by message ID."
" Returns the thread ID, snippet, body, subject, and sender."
)
args_schema: Type[SearchArgsSchema] = SearchArgsSchema
def _run(
self,
message_id: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> Dict:
"""Run the tool."""
query = (
self.api_resource.users()
.messages()
.get(userId="me", format="raw", id=message_id)
)
message_data = query.execute()
raw_message = base64.urlsafe_b64decode(message_data["raw"])
email_msg = email.message_from_bytes(raw_message)
subject = email_msg["Subject"]
sender = email_msg["From"]
message_body = ""
if email_msg.is_multipart():
for part in email_msg.walk():
ctype = part.get_content_type()
cdispo = str(part.get("Content-Disposition"))
if ctype == "text/plain" and "attachment" not in cdispo:
message_body = part.get_payload(decode=True).decode("utf-8") # type: ignore[union-attr]
break
else:
message_body = email_msg.get_payload(decode=True).decode("utf-8") # type: ignore[union-attr]
body = clean_email_body(message_body)
return {
"id": message_id,
"threadId": message_data["threadId"],
"snippet": message_data["snippet"],
"body": body,
"subject": subject,
"sender": sender,
}
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/google_scholar/tool.py | """Tool for the Google Scholar"""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.google_scholar import GoogleScholarAPIWrapper
class GoogleScholarQueryRun(BaseTool): # type: ignore[override]
"""Tool that queries the Google search API."""
name: str = "google_scholar"
description: str = (
"A wrapper around Google Scholar Search. "
"Useful for when you need to get information about"
"research papers from Google Scholar"
"Input should be a search query."
)
api_wrapper: GoogleScholarAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.api_wrapper.run(query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/google_scholar/__init__.py | """Google Scholar API Toolkit."""
from langchain_community.tools.google_scholar.tool import GoogleScholarQueryRun
__all__ = ["GoogleScholarQueryRun"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/bearly/tool.py | import base64
import itertools
import json
import re
from pathlib import Path
from typing import Dict, List, Type
import requests
from langchain_core.tools import Tool
from pydantic import BaseModel, Field
def strip_markdown_code(md_string: str) -> str:
"""Strip markdown code from a string."""
stripped_string = re.sub(r"^`{1,3}.*?\n", "", md_string, flags=re.DOTALL)
stripped_string = re.sub(r"`{1,3}$", "", stripped_string)
return stripped_string
def head_file(path: str, n: int) -> List[str]:
"""Get the first n lines of a file."""
try:
with open(path, "r") as f:
return [str(line) for line in itertools.islice(f, n)]
except Exception:
return []
def file_to_base64(path: str) -> str:
"""Convert a file to base64."""
with open(path, "rb") as f:
return base64.b64encode(f.read()).decode()
class BearlyInterpreterToolArguments(BaseModel):
"""Arguments for the BearlyInterpreterTool."""
python_code: str = Field(
...,
examples=["print('Hello World')"],
description=(
"The pure python script to be evaluated. "
"The contents will be in main.py. "
"It should not be in markdown format."
),
)
base_description = """Evaluates python code in a sandbox environment. \
The environment resets on every execution. \
You must send the whole script every time and print your outputs. \
Script should be pure python code that can be evaluated. \
It should be in python format NOT markdown. \
The code should NOT be wrapped in backticks. \
All python packages including requests, matplotlib, scipy, numpy, pandas, \
etc are available. \
If you have any files outputted write them to "output/" relative to the execution \
path. Output can only be read from the directory, stdout, and stdin. \
Do not use things like plot.show() as it will \
not work instead write them out `output/` and a link to the file will be returned. \
print() any output and results so you can capture the output."""
class FileInfo(BaseModel):
"""Information about a file to be uploaded."""
source_path: str
description: str
target_path: str
class BearlyInterpreterTool:
"""Tool for evaluating python code in a sandbox environment."""
api_key: str
endpoint: str = "https://exec.bearly.ai/v1/interpreter"
name: str = "bearly_interpreter"
args_schema: Type[BaseModel] = BearlyInterpreterToolArguments
files: Dict[str, FileInfo] = {}
def __init__(self, api_key: str):
self.api_key = api_key
@property
def file_description(self) -> str:
if len(self.files) == 0:
return ""
lines = ["The following files available in the evaluation environment:"]
for target_path, file_info in self.files.items():
peek_content = head_file(file_info.source_path, 4)
lines.append(
f"- path: `{target_path}` \n first four lines: {peek_content}"
f" \n description: `{file_info.description}`"
)
return "\n".join(lines)
@property
def description(self) -> str:
return (base_description + "\n\n" + self.file_description).strip()
def make_input_files(self) -> List[dict]:
files = []
for target_path, file_info in self.files.items():
files.append(
{
"pathname": target_path,
"contentsBasesixtyfour": file_to_base64(file_info.source_path),
}
)
return files
def _run(self, python_code: str) -> dict:
script = strip_markdown_code(python_code)
resp = requests.post(
"https://exec.bearly.ai/v1/interpreter",
data=json.dumps(
{
"fileContents": script,
"inputFiles": self.make_input_files(),
"outputDir": "output/",
"outputAsLinks": True,
}
),
headers={"Authorization": self.api_key},
).json()
return {
"stdout": (
base64.b64decode(resp["stdoutBasesixtyfour"]).decode()
if resp["stdoutBasesixtyfour"]
else ""
),
"stderr": (
base64.b64decode(resp["stderrBasesixtyfour"]).decode()
if resp["stderrBasesixtyfour"]
else ""
),
"fileLinks": resp["fileLinks"],
"exitCode": resp["exitCode"],
}
async def _arun(self, query: str) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("custom_search does not support async")
def add_file(self, source_path: str, target_path: str, description: str) -> None:
if target_path in self.files:
raise ValueError("target_path already exists")
if not Path(source_path).exists():
raise ValueError("source_path does not exist")
self.files[target_path] = FileInfo(
target_path=target_path, source_path=source_path, description=description
)
def clear_files(self) -> None:
self.files = {}
# TODO: this is because we can't have a dynamic description
# because of the base pydantic class
def as_tool(self) -> Tool:
return Tool.from_function(
func=self._run,
name=self.name,
description=self.description,
args_schema=self.args_schema,
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/multion/close_session.py | from typing import TYPE_CHECKING, Optional, Type
from langchain_core.callbacks import (
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
if TYPE_CHECKING:
# This is for linting and IDE typehints
import multion
else:
try:
# We do this so pydantic can resolve the types when instantiating
import multion
except ImportError:
pass
class CloseSessionSchema(BaseModel):
"""Input for UpdateSessionTool."""
sessionId: str = Field(
...,
description="""The sessionId, received from one of the createSessions
or updateSessions run before""",
)
class MultionCloseSession(BaseTool): # type: ignore[override, override]
"""Tool that closes an existing Multion Browser Window with provided fields.
Attributes:
name: The name of the tool. Default: "close_multion_session"
description: The description of the tool.
args_schema: The schema for the tool's arguments. Default: UpdateSessionSchema
"""
name: str = "close_multion_session"
description: str = """Use this tool to close \
an existing corresponding Multion Browser Window with provided fields. \
Note: SessionId must be received from previous Browser window creation."""
args_schema: Type[CloseSessionSchema] = CloseSessionSchema
sessionId: str = ""
def _run(
self,
sessionId: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> None:
try:
try:
multion.close_session(sessionId)
except Exception as e:
print(f"{e}, retrying...") # noqa: T201
except Exception as e:
raise Exception(f"An error occurred: {e}")
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/multion/update_session.py | from typing import TYPE_CHECKING, Optional, Type
from langchain_core.callbacks import (
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
if TYPE_CHECKING:
# This is for linting and IDE typehints
import multion
else:
try:
# We do this so pydantic can resolve the types when instantiating
import multion
except ImportError:
pass
class UpdateSessionSchema(BaseModel):
"""Input for UpdateSessionTool."""
sessionId: str = Field(
...,
description="""The sessionID,
received from one of the createSessions run before""",
)
query: str = Field(
...,
description="The query to run in multion agent.",
)
url: str = Field(
"https://www.google.com/",
description="""The Url to run the agent at. \
Note: accepts only secure links having https://""",
)
class MultionUpdateSession(BaseTool): # type: ignore[override, override]
"""Tool that updates an existing Multion Browser Window with provided fields.
Attributes:
name: The name of the tool. Default: "update_multion_session"
description: The description of the tool.
args_schema: The schema for the tool's arguments. Default: UpdateSessionSchema
"""
name: str = "update_multion_session"
description: str = """Use this tool to update \
an existing corresponding Multion Browser Window with provided fields. \
Note: sessionId must be received from previous Browser window creation."""
args_schema: Type[UpdateSessionSchema] = UpdateSessionSchema
sessionId: str = ""
def _run(
self,
sessionId: str,
query: str,
url: Optional[str] = "https://www.google.com/",
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> dict:
try:
try:
response = multion.update_session(
sessionId, {"input": query, "url": url}
)
content = {"sessionId": sessionId, "Response": response["message"]}
self.sessionId = sessionId
return content
except Exception as e:
print(f"{e}, retrying...") # noqa: T201
return {"error": f"{e}", "Response": "retrying..."}
except Exception as e:
raise Exception(f"An error occurred: {e}")
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/multion/create_session.py | from typing import TYPE_CHECKING, Optional, Type
from langchain_core.callbacks import (
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
if TYPE_CHECKING:
# This is for linting and IDE typehints
import multion
else:
try:
# We do this so pydantic can resolve the types when instantiating
import multion
except ImportError:
pass
class CreateSessionSchema(BaseModel):
"""Input for CreateSessionTool."""
query: str = Field(
...,
description="The query to run in multion agent.",
)
url: str = Field(
"https://www.google.com/",
description="""The Url to run the agent at. Note: accepts only secure \
links having https://""",
)
class MultionCreateSession(BaseTool): # type: ignore[override]
"""Tool that creates a new Multion Browser Window with provided fields.
Attributes:
name: The name of the tool. Default: "create_multion_session"
description: The description of the tool.
args_schema: The schema for the tool's arguments.
"""
name: str = "create_multion_session"
description: str = """
Create a new web browsing session based on a user's command or request. \
The command should include the full info required for the session. \
Also include an url (defaults to google.com if no better option) \
to start the session. \
Use this tool to create a new Browser Window with provided fields. \
Always the first step to run any activities that can be done using browser.
"""
args_schema: Type[CreateSessionSchema] = CreateSessionSchema
def _run(
self,
query: str,
url: Optional[str] = "https://www.google.com/",
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> dict:
try:
response = multion.new_session({"input": query, "url": url})
return {
"sessionId": response["session_id"],
"Response": response["message"],
}
except Exception as e:
raise Exception(f"An error occurred: {e}")
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/multion/__init__.py | """MutliOn Client API tools."""
from langchain_community.tools.multion.close_session import MultionCloseSession
from langchain_community.tools.multion.create_session import MultionCreateSession
from langchain_community.tools.multion.update_session import MultionUpdateSession
__all__ = ["MultionCreateSession", "MultionUpdateSession", "MultionCloseSession"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/zenguard/tool.py | import os
from enum import Enum
from typing import Any, Dict, List, Optional, Type
import requests
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field, ValidationError, validator
class Detector(str, Enum):
ALLOWED_TOPICS = "allowed_subjects"
BANNED_TOPICS = "banned_subjects"
PROMPT_INJECTION = "prompt_injection"
KEYWORDS = "keywords"
PII = "pii"
SECRETS = "secrets"
TOXICITY = "toxicity"
class DetectorAPI(str, Enum):
ALLOWED_TOPICS = "v1/detect/topics/allowed"
BANNED_TOPICS = "v1/detect/topics/banned"
PROMPT_INJECTION = "v1/detect/prompt_injection"
KEYWORDS = "v1/detect/keywords"
PII = "v1/detect/pii"
SECRETS = "v1/detect/secrets"
TOXICITY = "v1/detect/toxicity"
class ZenGuardInput(BaseModel):
prompts: List[str] = Field(
...,
min_length=1,
description="Prompt to check",
)
detectors: List[Detector] = Field(
...,
min_length=1,
description="List of detectors by which you want to check the prompt",
)
in_parallel: bool = Field(
default=True,
description="Run prompt detection by the detector in parallel or sequentially",
)
class ZenGuardTool(BaseTool): # type: ignore[override, override]
name: str = "ZenGuard"
description: str = (
"ZenGuard AI integration package. ZenGuard AI - the fastest GenAI guardrails."
)
args_schema: Type[BaseModel] = ZenGuardInput
return_direct: bool = True
zenguard_api_key: Optional[str] = Field(default=None)
_ZENGUARD_API_URL_ROOT: str = "https://api.zenguard.ai/"
_ZENGUARD_API_KEY_ENV_NAME: str = "ZENGUARD_API_KEY"
@validator("zenguard_api_key", pre=True, always=True, check_fields=False)
def set_api_key(cls, v: str) -> str:
if v is None:
v = os.getenv(cls._ZENGUARD_API_KEY_ENV_NAME)
if v is None:
raise ValidationError(
"The zenguard_api_key tool option must be set either "
"by passing zenguard_api_key to the tool or by setting "
f"the f{cls._ZENGUARD_API_KEY_ENV_NAME} environment variable"
)
return v
@property
def _api_key(self) -> str:
if self.zenguard_api_key is None:
raise ValueError(
"API key is required for the ZenGuardTool. "
"Please provide the API key by either:\n"
"1. Manually specifying it when initializing the tool: "
"ZenGuardTool(zenguard_api_key='your_api_key')\n"
"2. Setting it as an environment variable:"
f" {self._ZENGUARD_API_KEY_ENV_NAME}"
)
return self.zenguard_api_key
def _run(
self,
prompts: List[str],
detectors: List[Detector],
in_parallel: bool = True,
) -> Dict[str, Any]:
try:
postfix = None
json: Optional[Dict[str, Any]] = None
if len(detectors) == 1:
postfix = self._convert_detector_to_api(detectors[0])
json = {"messages": prompts}
else:
postfix = "v1/detect"
json = {
"messages": prompts,
"in_parallel": in_parallel,
"detectors": detectors,
}
response = requests.post(
self._ZENGUARD_API_URL_ROOT + postfix,
json=json,
headers={"x-api-key": self._api_key},
timeout=5,
)
response.raise_for_status()
return response.json()
except (requests.HTTPError, requests.Timeout) as e:
return {"error": str(e)}
def _convert_detector_to_api(self, detector: Detector) -> str:
return DetectorAPI[detector.name].value
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/zenguard/__init__.py | from langchain_community.tools.zenguard.tool import (
Detector,
ZenGuardInput,
ZenGuardTool,
)
__all__ = [
"ZenGuardTool",
"Detector",
"ZenGuardInput",
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/databricks/_execution.py | import inspect
import json
import logging
import os
import time
from dataclasses import dataclass
from io import StringIO
from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional
if TYPE_CHECKING:
from databricks.sdk import WorkspaceClient
from databricks.sdk.service.catalog import FunctionInfo
from databricks.sdk.service.sql import StatementParameterListItem, StatementState
EXECUTE_FUNCTION_ARG_NAME = "__execution_args__"
DEFAULT_EXECUTE_FUNCTION_ARGS = {
"wait_timeout": "30s",
"row_limit": 100,
"byte_limit": 4096,
}
UC_TOOL_CLIENT_EXECUTION_TIMEOUT = "UC_TOOL_CLIENT_EXECUTION_TIMEOUT"
DEFAULT_UC_TOOL_CLIENT_EXECUTION_TIMEOUT = "120"
_logger = logging.getLogger(__name__)
def is_scalar(function: "FunctionInfo") -> bool:
from databricks.sdk.service.catalog import ColumnTypeName
return function.data_type != ColumnTypeName.TABLE_TYPE
@dataclass
class ParameterizedStatement:
statement: str
parameters: List["StatementParameterListItem"]
@dataclass
class FunctionExecutionResult:
"""
Result of executing a function.
We always use a string to present the result value for AI model to consume.
"""
error: Optional[str] = None
format: Optional[Literal["SCALAR", "CSV"]] = None
value: Optional[str] = None
truncated: Optional[bool] = None
def to_json(self) -> str:
data = {k: v for (k, v) in self.__dict__.items() if v is not None}
return json.dumps(data)
def get_execute_function_sql_stmt(
function: "FunctionInfo", json_params: Dict[str, Any]
) -> ParameterizedStatement:
from databricks.sdk.service.catalog import ColumnTypeName
from databricks.sdk.service.sql import StatementParameterListItem
parts = []
output_params = []
if is_scalar(function):
# TODO: IDENTIFIER(:function) did not work
parts.append(f"SELECT {function.full_name}(")
else:
parts.append(f"SELECT * FROM {function.full_name}(")
if function.input_params is None or function.input_params.parameters is None:
assert (
not json_params
), "Function has no parameters but parameters were provided."
else:
args = []
use_named_args = False
for p in function.input_params.parameters:
if p.name not in json_params:
if p.parameter_default is not None:
use_named_args = True
else:
raise ValueError(
f"Parameter {p.name} is required but not provided."
)
else:
arg_clause = ""
if use_named_args:
arg_clause += f"{p.name} => "
json_value = json_params[p.name]
if p.type_name in (
ColumnTypeName.ARRAY,
ColumnTypeName.MAP,
ColumnTypeName.STRUCT,
):
# Use from_json to restore values of complex types.
json_value_str = json.dumps(json_value)
# TODO: parametrize type
arg_clause += f"from_json(:{p.name}, '{p.type_text}')"
output_params.append(
StatementParameterListItem(name=p.name, value=json_value_str)
)
elif p.type_name == ColumnTypeName.BINARY:
# Use ubbase64 to restore binary values.
arg_clause += f"unbase64(:{p.name})"
output_params.append(
StatementParameterListItem(name=p.name, value=json_value)
)
else:
arg_clause += f":{p.name}"
output_params.append(
StatementParameterListItem(
name=p.name, value=json_value, type=p.type_text
)
)
args.append(arg_clause)
parts.append(",".join(args))
parts.append(")")
# TODO: check extra params in kwargs
statement = "".join(parts)
return ParameterizedStatement(statement=statement, parameters=output_params)
def execute_function(
ws: "WorkspaceClient",
warehouse_id: str,
function: "FunctionInfo",
parameters: Dict[str, Any],
) -> FunctionExecutionResult:
"""
Execute a function with the given arguments and return the result.
"""
try:
import pandas as pd
except ImportError as e:
raise ImportError(
"Could not import pandas python package. "
"Please install it with `pip install pandas`."
) from e
from databricks.sdk.service.sql import StatementState
if (
function.input_params
and function.input_params.parameters
and any(
p.name == EXECUTE_FUNCTION_ARG_NAME
for p in function.input_params.parameters
)
):
raise ValueError(
"Parameter name conflicts with the reserved argument name for executing "
f"functions: {EXECUTE_FUNCTION_ARG_NAME}. "
f"Please rename the parameter {EXECUTE_FUNCTION_ARG_NAME}."
)
# avoid modifying the original dict
execute_statement_args = {**DEFAULT_EXECUTE_FUNCTION_ARGS}
allowed_execute_statement_args = inspect.signature(
ws.statement_execution.execute_statement
).parameters
if not any(
p.kind in (p.VAR_POSITIONAL, p.VAR_KEYWORD)
for p in allowed_execute_statement_args.values()
):
invalid_params = set()
passed_execute_statement_args = parameters.pop(EXECUTE_FUNCTION_ARG_NAME, {})
for k, v in passed_execute_statement_args.items():
if k in allowed_execute_statement_args:
execute_statement_args[k] = v
else:
invalid_params.add(k)
if invalid_params:
raise ValueError(
f"Invalid parameters for executing functions: {invalid_params}. "
f"Allowed parameters are: {allowed_execute_statement_args.keys()}."
)
# TODO: async so we can run functions in parallel
parametrized_statement = get_execute_function_sql_stmt(function, parameters)
response = ws.statement_execution.execute_statement(
statement=parametrized_statement.statement,
warehouse_id=warehouse_id,
parameters=parametrized_statement.parameters,
**execute_statement_args, # type: ignore
)
if response.status and job_pending(response.status.state) and response.statement_id:
statement_id = response.statement_id
wait_time = 0
retry_cnt = 0
client_execution_timeout = int(
os.environ.get(
UC_TOOL_CLIENT_EXECUTION_TIMEOUT,
DEFAULT_UC_TOOL_CLIENT_EXECUTION_TIMEOUT,
)
)
while wait_time < client_execution_timeout:
wait = min(2**retry_cnt, client_execution_timeout - wait_time)
_logger.debug(
f"Retrying {retry_cnt} time to get statement execution "
f"status after {wait} seconds."
)
time.sleep(wait)
response = ws.statement_execution.get_statement(statement_id) # type: ignore
if response.status is None or not job_pending(response.status.state):
break
wait_time += wait
retry_cnt += 1
if response.status and job_pending(response.status.state):
return FunctionExecutionResult(
error=f"Statement execution is still pending after {wait_time} "
"seconds. Please increase the wait_timeout argument for executing "
f"the function or increase {UC_TOOL_CLIENT_EXECUTION_TIMEOUT} "
"environment variable for increasing retrying time, default is "
f"{DEFAULT_UC_TOOL_CLIENT_EXECUTION_TIMEOUT} seconds."
)
assert response.status is not None, f"Statement execution failed: {response}"
if response.status.state != StatementState.SUCCEEDED:
error = response.status.error
assert (
error is not None
), f"Statement execution failed but no error message was provided: {response}"
return FunctionExecutionResult(error=f"{error.error_code}: {error.message}")
manifest = response.manifest
assert manifest is not None
truncated = manifest.truncated
result = response.result
assert (
result is not None
), "Statement execution succeeded but no result was provided."
data_array = result.data_array
if is_scalar(function):
value = None
if data_array and len(data_array) > 0 and len(data_array[0]) > 0:
value = str(data_array[0][0]) # type: ignore
return FunctionExecutionResult(
format="SCALAR", value=value, truncated=truncated
)
else:
schema = manifest.schema
assert (
schema is not None and schema.columns is not None
), "Statement execution succeeded but no schema was provided."
columns = [c.name for c in schema.columns]
if data_array is None:
data_array = []
pdf = pd.DataFrame.from_records(data_array, columns=columns)
csv_buffer = StringIO()
pdf.to_csv(csv_buffer, index=False)
return FunctionExecutionResult(
format="CSV", value=csv_buffer.getvalue(), truncated=truncated
)
def job_pending(state: Optional["StatementState"]) -> bool:
from databricks.sdk.service.sql import StatementState
return state in (StatementState.PENDING, StatementState.RUNNING)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/databricks/tool.py | import json
from datetime import date, datetime
from decimal import Decimal
from hashlib import md5
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union
from langchain_core.tools import BaseTool, StructuredTool
from langchain_core.tools.base import BaseToolkit
from pydantic import BaseModel, Field, create_model
from typing_extensions import Self
if TYPE_CHECKING:
from databricks.sdk.service.catalog import FunctionInfo
from pydantic import ConfigDict
from langchain_community.tools.databricks._execution import execute_function
def _uc_type_to_pydantic_type(uc_type_json: Union[str, Dict[str, Any]]) -> Type:
mapping = {
"long": int,
"binary": bytes,
"boolean": bool,
"date": date,
"double": float,
"float": float,
"integer": int,
"short": int,
"string": str,
"timestamp": datetime,
"timestamp_ntz": datetime,
"byte": int,
}
if isinstance(uc_type_json, str):
if uc_type_json in mapping:
return mapping[uc_type_json]
else:
if uc_type_json.startswith("decimal"):
return Decimal
elif uc_type_json == "void" or uc_type_json.startswith("interval"):
raise TypeError(f"Type {uc_type_json} is not supported.")
else:
raise TypeError(
f"Unknown type {uc_type_json}. Try upgrading this package."
)
else:
assert isinstance(uc_type_json, dict)
tpe = uc_type_json["type"]
if tpe == "array":
element_type = _uc_type_to_pydantic_type(uc_type_json["elementType"])
if uc_type_json["containsNull"]:
element_type = Optional[element_type] # type: ignore
return List[element_type] # type: ignore
elif tpe == "map":
key_type = uc_type_json["keyType"]
assert key_type == "string", TypeError(
f"Only support STRING key type for MAP but got {key_type}."
)
value_type = _uc_type_to_pydantic_type(uc_type_json["valueType"])
if uc_type_json["valueContainsNull"]:
value_type: Type = Optional[value_type] # type: ignore
return Dict[str, value_type] # type: ignore
elif tpe == "struct":
fields = {}
for field in uc_type_json["fields"]:
field_type = _uc_type_to_pydantic_type(field["type"])
if field.get("nullable"):
field_type = Optional[field_type] # type: ignore
comment = (
uc_type_json["metadata"].get("comment")
if "metadata" in uc_type_json
else None
)
fields[field["name"]] = (field_type, Field(..., description=comment))
uc_type_json_str = json.dumps(uc_type_json, sort_keys=True)
type_hash = md5(uc_type_json_str.encode()).hexdigest()[:8]
return create_model(f"Struct_{type_hash}", **fields) # type: ignore
else:
raise TypeError(f"Unknown type {uc_type_json}. Try upgrading this package.")
def _generate_args_schema(function: "FunctionInfo") -> Type[BaseModel]:
if function.input_params is None:
return BaseModel
params = function.input_params.parameters
assert params is not None
fields = {}
for p in params:
assert p.type_json is not None
type_json = json.loads(p.type_json)["type"]
pydantic_type = _uc_type_to_pydantic_type(type_json)
description = p.comment
default: Any = ...
if p.parameter_default:
pydantic_type = Optional[pydantic_type] # type: ignore
default = None
# TODO: Convert default value string to the correct type.
# We might need to use statement execution API
# to get the JSON representation of the value.
default_description = f"(Default: {p.parameter_default})"
if description:
description += f" {default_description}"
else:
description = default_description
fields[p.name] = (
pydantic_type,
Field(default=default, description=description),
)
return create_model(
f"{function.catalog_name}__{function.schema_name}__{function.name}__params",
**fields, # type: ignore
)
def _get_tool_name(function: "FunctionInfo") -> str:
tool_name = f"{function.catalog_name}__{function.schema_name}__{function.name}"[
-64:
]
return tool_name
def _get_default_workspace_client() -> Any:
try:
from databricks.sdk import WorkspaceClient
except ImportError as e:
raise ImportError(
"Could not import databricks-sdk python package. "
"Please install it with `pip install databricks-sdk`."
) from e
return WorkspaceClient()
class UCFunctionToolkit(BaseToolkit):
warehouse_id: str = Field(
description="The ID of a Databricks SQL Warehouse to execute functions."
)
workspace_client: Any = Field(
default_factory=_get_default_workspace_client,
description="Databricks workspace client.",
)
tools: Dict[str, BaseTool] = Field(default_factory=dict)
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def include(self, *function_names: str, **kwargs: Any) -> Self:
"""
Includes UC functions to the toolkit.
Args:
functions: A list of UC function names in the format
"catalog_name.schema_name.function_name" or
"catalog_name.schema_name.*".
If the function name ends with ".*",
all functions in the schema will be added.
kwargs: Extra arguments to pass to StructuredTool, e.g., `return_direct`.
"""
for name in function_names:
if name.endswith(".*"):
catalog_name, schema_name = name[:-2].split(".")
# TODO: handle pagination, warn and truncate if too many
functions = self.workspace_client.functions.list(
catalog_name=catalog_name, schema_name=schema_name
)
for f in functions:
assert f.full_name is not None
self.include(f.full_name, **kwargs)
else:
if name not in self.tools:
self.tools[name] = self._make_tool(name, **kwargs)
return self
def _make_tool(self, function_name: str, **kwargs: Any) -> BaseTool:
function = self.workspace_client.functions.get(function_name)
name = _get_tool_name(function)
description = function.comment or ""
args_schema = _generate_args_schema(function)
def func(*args: Any, **kwargs: Any) -> str:
# TODO: We expect all named args and ignore args.
# Non-empty args show up when the function has no parameters.
args_json = json.loads(json.dumps(kwargs, default=str))
result = execute_function(
ws=self.workspace_client,
warehouse_id=self.warehouse_id,
function=function,
parameters=args_json,
)
return result.to_json()
return StructuredTool(
name=name,
description=description,
args_schema=args_schema,
func=func,
**kwargs,
)
def get_tools(self) -> List[BaseTool]:
return list(self.tools.values())
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/databricks/__init__.py | from langchain_community.tools.databricks.tool import UCFunctionToolkit
__all__ = ["UCFunctionToolkit"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/reddit_search/tool.py | """Tool for the Reddit search API."""
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.reddit_search import RedditSearchAPIWrapper
class RedditSearchSchema(BaseModel):
"""Input for Reddit search."""
query: str = Field(
description="should be query string that post title should \
contain, or '*' if anything is allowed."
)
sort: str = Field(
description='should be sort method, which is one of: "relevance" \
, "hot", "top", "new", or "comments".'
)
time_filter: str = Field(
description='should be time period to filter by, which is \
one of "all", "day", "hour", "month", "week", or "year"'
)
subreddit: str = Field(
description='should be name of subreddit, like "all" for \
r/all'
)
limit: str = Field(
description="a positive integer indicating the maximum number \
of results to return"
)
class RedditSearchRun(BaseTool): # type: ignore[override, override]
"""Tool that queries for posts on a subreddit."""
name: str = "reddit_search"
description: str = (
"A tool that searches for posts on Reddit."
"Useful when you need to know post information on a subreddit."
)
api_wrapper: RedditSearchAPIWrapper = Field(default_factory=RedditSearchAPIWrapper) # type: ignore[arg-type]
args_schema: Type[BaseModel] = RedditSearchSchema
def _run(
self,
query: str,
sort: str,
time_filter: str,
subreddit: str,
limit: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.api_wrapper.run(
query=query,
sort=sort,
time_filter=time_filter,
subreddit=subreddit,
limit=int(limit),
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/steamship_image_generation/tool.py | """This tool allows agents to generate images using Steamship.
Steamship offers access to different third party image generation APIs
using a single API key.
Today the following models are supported:
- Dall-E
- Stable Diffusion
To use this tool, you must first set as environment variables:
STEAMSHIP_API_KEY
```
"""
from __future__ import annotations
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_core.utils import get_from_dict_or_env
from pydantic import model_validator
from langchain_community.tools.steamship_image_generation.utils import make_image_public
if TYPE_CHECKING:
from steamship import Steamship
class ModelName(str, Enum):
"""Supported Image Models for generation."""
DALL_E = "dall-e"
STABLE_DIFFUSION = "stable-diffusion"
SUPPORTED_IMAGE_SIZES = {
ModelName.DALL_E: ("256x256", "512x512", "1024x1024"),
ModelName.STABLE_DIFFUSION: ("512x512", "768x768"),
}
class SteamshipImageGenerationTool(BaseTool): # type: ignore[override]
"""Tool used to generate images from a text-prompt."""
model_name: ModelName
size: Optional[str] = "512x512"
steamship: Steamship
return_urls: Optional[bool] = False
name: str = "generate_image"
description: str = (
"Useful for when you need to generate an image."
"Input: A detailed text-2-image prompt describing an image"
"Output: the UUID of a generated image"
)
@model_validator(mode="before")
@classmethod
def validate_size(cls, values: Dict) -> Any:
if "size" in values:
size = values["size"]
model_name = values["model_name"]
if size not in SUPPORTED_IMAGE_SIZES[model_name]:
raise RuntimeError(f"size {size} is not supported by {model_name}")
return values
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
steamship_api_key = get_from_dict_or_env(
values, "steamship_api_key", "STEAMSHIP_API_KEY"
)
try:
from steamship import Steamship
except ImportError:
raise ImportError(
"steamship is not installed. "
"Please install it with `pip install steamship`"
)
steamship = Steamship(
api_key=steamship_api_key,
)
values["steamship"] = steamship
if "steamship_api_key" in values:
del values["steamship_api_key"]
return values
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
image_generator = self.steamship.use_plugin(
plugin_handle=self.model_name.value, config={"n": 1, "size": self.size}
)
task = image_generator.generate(text=query, append_output_to_file=True)
task.wait()
blocks = task.output.blocks
if len(blocks) > 0:
if self.return_urls:
return make_image_public(self.steamship, blocks[0])
else:
return blocks[0].id
raise RuntimeError(f"[{self.name}] Tool unable to generate image!")
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/steamship_image_generation/utils.py | """Steamship Utils."""
from __future__ import annotations
import uuid
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from steamship import Block, Steamship
def make_image_public(client: Steamship, block: Block) -> str:
"""Upload a block to a signed URL and return the public URL."""
try:
from steamship.data.workspace import SignedUrl
from steamship.utils.signed_urls import upload_to_signed_url
except ImportError:
raise ImportError(
"The make_image_public function requires the steamship"
" package to be installed. Please install steamship"
" with `pip install --upgrade steamship`"
)
filepath = str(uuid.uuid4())
signed_url = (
client.get_workspace()
.create_signed_url(
SignedUrl.Request(
bucket=SignedUrl.Bucket.PLUGIN_DATA,
filepath=filepath,
operation=SignedUrl.Operation.WRITE,
)
)
.signed_url
)
read_signed_url = (
client.get_workspace()
.create_signed_url(
SignedUrl.Request(
bucket=SignedUrl.Bucket.PLUGIN_DATA,
filepath=filepath,
operation=SignedUrl.Operation.READ,
)
)
.signed_url
)
upload_to_signed_url(signed_url, block.raw())
return read_signed_url
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/steamship_image_generation/__init__.py | """Tool to generate an image."""
from langchain_community.tools.steamship_image_generation.tool import (
SteamshipImageGenerationTool,
)
__all__ = ["SteamshipImageGenerationTool"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/sleep/tool.py | """Tool for agent to sleep."""
from asyncio import sleep as asleep
from time import sleep
from typing import Optional, Type
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
class SleepInput(BaseModel):
"""Input for CopyFileTool."""
sleep_time: int = Field(..., description="Time to sleep in seconds")
class SleepTool(BaseTool): # type: ignore[override]
"""Tool that adds the capability to sleep."""
name: str = "sleep"
args_schema: Type[BaseModel] = SleepInput
description: str = "Make agent sleep for a specified number of seconds."
def _run(
self,
sleep_time: int,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Sleep tool."""
sleep(sleep_time)
return f"Agent slept for {sleep_time} seconds."
async def _arun(
self,
sleep_time: int,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the sleep tool asynchronously."""
await asleep(sleep_time)
return f"Agent slept for {sleep_time} seconds."
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/sleep/__init__.py | """Sleep tool."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/e2b_data_analysis/unparse.py | # mypy: disable-error-code=no-untyped-def
# Because Python >3.9 doesn't support ast.unparse,
# we copied the unparse functionality from here:
# https://github.com/python/cpython/blob/3.8/Tools/parser/unparse.py
"Usage: unparse.py <path to source file>"
import ast
import io
import sys
import tokenize
# Large float and imaginary literals get turned into infinities in the AST.
# We unparse those infinities to INFSTR.
INFSTR = "1e" + repr(sys.float_info.max_10_exp + 1)
def interleave(inter, f, seq):
"""Call f on each item in seq, calling inter() in between."""
seq = iter(seq)
try:
f(next(seq))
except StopIteration:
pass
else:
for x in seq:
inter()
f(x)
class Unparser:
"""Traverse an AST and
output source code for the abstract syntax; original formatting
is disregarded."""
def __init__(self, tree, file=sys.stdout):
"""Unparser(tree, file=sys.stdout) -> None.
Print the source for tree to file."""
self.f = file
self._indent = 0
self.dispatch(tree)
self.f.flush()
def fill(self, text=""):
"Indent a piece of text, according to the current indentation level"
self.f.write("\n" + " " * self._indent + text)
def write(self, text):
"Append a piece of text to the current line."
self.f.write(text)
def enter(self):
"Print ':', and increase the indentation."
self.write(":")
self._indent += 1
def leave(self):
"Decrease the indentation level."
self._indent -= 1
def dispatch(self, tree):
"Dispatcher function, dispatching tree type T to method _T."
if isinstance(tree, list):
for t in tree:
self.dispatch(t)
return
meth = getattr(self, "_" + tree.__class__.__name__)
meth(tree)
############### Unparsing methods ######################
# There should be one method per concrete grammar type #
# Constructors should be grouped by sum type. Ideally, #
# this would follow the order in the grammar, but #
# currently doesn't. #
########################################################
def _Module(self, tree):
for stmt in tree.body:
self.dispatch(stmt)
# stmt
def _Expr(self, tree):
self.fill()
self.dispatch(tree.value)
def _NamedExpr(self, tree):
self.write("(")
self.dispatch(tree.target)
self.write(" := ")
self.dispatch(tree.value)
self.write(")")
def _Import(self, t):
self.fill("import ")
interleave(lambda: self.write(", "), self.dispatch, t.names)
def _ImportFrom(self, t):
self.fill("from ")
self.write("." * t.level)
if t.module:
self.write(t.module)
self.write(" import ")
interleave(lambda: self.write(", "), self.dispatch, t.names)
def _Assign(self, t):
self.fill()
for target in t.targets:
self.dispatch(target)
self.write(" = ")
self.dispatch(t.value)
def _AugAssign(self, t):
self.fill()
self.dispatch(t.target)
self.write(" " + self.binop[t.op.__class__.__name__] + "= ")
self.dispatch(t.value)
def _AnnAssign(self, t):
self.fill()
if not t.simple and isinstance(t.target, ast.Name):
self.write("(")
self.dispatch(t.target)
if not t.simple and isinstance(t.target, ast.Name):
self.write(")")
self.write(": ")
self.dispatch(t.annotation)
if t.value:
self.write(" = ")
self.dispatch(t.value)
def _Return(self, t):
self.fill("return")
if t.value:
self.write(" ")
self.dispatch(t.value)
def _Pass(self, t):
self.fill("pass")
def _Break(self, t):
self.fill("break")
def _Continue(self, t):
self.fill("continue")
def _Delete(self, t):
self.fill("del ")
interleave(lambda: self.write(", "), self.dispatch, t.targets)
def _Assert(self, t):
self.fill("assert ")
self.dispatch(t.test)
if t.msg:
self.write(", ")
self.dispatch(t.msg)
def _Global(self, t):
self.fill("global ")
interleave(lambda: self.write(", "), self.write, t.names)
def _Nonlocal(self, t):
self.fill("nonlocal ")
interleave(lambda: self.write(", "), self.write, t.names)
def _Await(self, t):
self.write("(")
self.write("await")
if t.value:
self.write(" ")
self.dispatch(t.value)
self.write(")")
def _Yield(self, t):
self.write("(")
self.write("yield")
if t.value:
self.write(" ")
self.dispatch(t.value)
self.write(")")
def _YieldFrom(self, t):
self.write("(")
self.write("yield from")
if t.value:
self.write(" ")
self.dispatch(t.value)
self.write(")")
def _Raise(self, t):
self.fill("raise")
if not t.exc:
assert not t.cause
return
self.write(" ")
self.dispatch(t.exc)
if t.cause:
self.write(" from ")
self.dispatch(t.cause)
def _Try(self, t):
self.fill("try")
self.enter()
self.dispatch(t.body)
self.leave()
for ex in t.handlers:
self.dispatch(ex)
if t.orelse:
self.fill("else")
self.enter()
self.dispatch(t.orelse)
self.leave()
if t.finalbody:
self.fill("finally")
self.enter()
self.dispatch(t.finalbody)
self.leave()
def _ExceptHandler(self, t):
self.fill("except")
if t.type:
self.write(" ")
self.dispatch(t.type)
if t.name:
self.write(" as ")
self.write(t.name)
self.enter()
self.dispatch(t.body)
self.leave()
def _ClassDef(self, t):
self.write("\n")
for deco in t.decorator_list:
self.fill("@")
self.dispatch(deco)
self.fill("class " + t.name)
self.write("(")
comma = False
for e in t.bases:
if comma:
self.write(", ")
else:
comma = True
self.dispatch(e)
for e in t.keywords:
if comma:
self.write(", ")
else:
comma = True
self.dispatch(e)
self.write(")")
self.enter()
self.dispatch(t.body)
self.leave()
def _FunctionDef(self, t):
self.__FunctionDef_helper(t, "def")
def _AsyncFunctionDef(self, t):
self.__FunctionDef_helper(t, "async def")
def __FunctionDef_helper(self, t, fill_suffix):
self.write("\n")
for deco in t.decorator_list:
self.fill("@")
self.dispatch(deco)
def_str = fill_suffix + " " + t.name + "("
self.fill(def_str)
self.dispatch(t.args)
self.write(")")
if t.returns:
self.write(" -> ")
self.dispatch(t.returns)
self.enter()
self.dispatch(t.body)
self.leave()
def _For(self, t):
self.__For_helper("for ", t)
def _AsyncFor(self, t):
self.__For_helper("async for ", t)
def __For_helper(self, fill, t):
self.fill(fill)
self.dispatch(t.target)
self.write(" in ")
self.dispatch(t.iter)
self.enter()
self.dispatch(t.body)
self.leave()
if t.orelse:
self.fill("else")
self.enter()
self.dispatch(t.orelse)
self.leave()
def _If(self, t):
self.fill("if ")
self.dispatch(t.test)
self.enter()
self.dispatch(t.body)
self.leave()
# collapse nested ifs into equivalent elifs.
while t.orelse and len(t.orelse) == 1 and isinstance(t.orelse[0], ast.If):
t = t.orelse[0]
self.fill("elif ")
self.dispatch(t.test)
self.enter()
self.dispatch(t.body)
self.leave()
# final else
if t.orelse:
self.fill("else")
self.enter()
self.dispatch(t.orelse)
self.leave()
def _While(self, t):
self.fill("while ")
self.dispatch(t.test)
self.enter()
self.dispatch(t.body)
self.leave()
if t.orelse:
self.fill("else")
self.enter()
self.dispatch(t.orelse)
self.leave()
def _With(self, t):
self.fill("with ")
interleave(lambda: self.write(", "), self.dispatch, t.items)
self.enter()
self.dispatch(t.body)
self.leave()
def _AsyncWith(self, t):
self.fill("async with ")
interleave(lambda: self.write(", "), self.dispatch, t.items)
self.enter()
self.dispatch(t.body)
self.leave()
# expr
def _JoinedStr(self, t):
self.write("f")
string = io.StringIO()
self._fstring_JoinedStr(t, string.write)
self.write(repr(string.getvalue()))
def _FormattedValue(self, t):
self.write("f")
string = io.StringIO()
self._fstring_FormattedValue(t, string.write)
self.write(repr(string.getvalue()))
def _fstring_JoinedStr(self, t, write):
for value in t.values:
meth = getattr(self, "_fstring_" + type(value).__name__)
meth(value, write)
def _fstring_Constant(self, t, write):
assert isinstance(t.value, str)
value = t.value.replace("{", "{{").replace("}", "}}")
write(value)
def _fstring_FormattedValue(self, t, write):
write("{")
expr = io.StringIO()
Unparser(t.value, expr)
expr = expr.getvalue().rstrip("\n")
if expr.startswith("{"):
write(" ") # Separate pair of opening brackets as "{ {"
write(expr)
if t.conversion != -1:
conversion = chr(t.conversion)
assert conversion in "sra"
write(f"!{conversion}")
if t.format_spec:
write(":")
meth = getattr(self, "_fstring_" + type(t.format_spec).__name__)
meth(t.format_spec, write)
write("}")
def _Name(self, t):
self.write(t.id)
def _write_constant(self, value):
if isinstance(value, (float, complex)):
# Substitute overflowing decimal literal for AST infinities.
self.write(repr(value).replace("inf", INFSTR))
else:
self.write(repr(value))
def _Constant(self, t):
value = t.value
if isinstance(value, tuple):
self.write("(")
if len(value) == 1:
self._write_constant(value[0])
self.write(",")
else:
interleave(lambda: self.write(", "), self._write_constant, value)
self.write(")")
elif value is ...:
self.write("...")
else:
if t.kind == "u":
self.write("u")
self._write_constant(t.value)
def _List(self, t):
self.write("[")
interleave(lambda: self.write(", "), self.dispatch, t.elts)
self.write("]")
def _ListComp(self, t):
self.write("[")
self.dispatch(t.elt)
for gen in t.generators:
self.dispatch(gen)
self.write("]")
def _GeneratorExp(self, t):
self.write("(")
self.dispatch(t.elt)
for gen in t.generators:
self.dispatch(gen)
self.write(")")
def _SetComp(self, t):
self.write("{")
self.dispatch(t.elt)
for gen in t.generators:
self.dispatch(gen)
self.write("}")
def _DictComp(self, t):
self.write("{")
self.dispatch(t.key)
self.write(": ")
self.dispatch(t.value)
for gen in t.generators:
self.dispatch(gen)
self.write("}")
def _comprehension(self, t):
if t.is_async:
self.write(" async for ")
else:
self.write(" for ")
self.dispatch(t.target)
self.write(" in ")
self.dispatch(t.iter)
for if_clause in t.ifs:
self.write(" if ")
self.dispatch(if_clause)
def _IfExp(self, t):
self.write("(")
self.dispatch(t.body)
self.write(" if ")
self.dispatch(t.test)
self.write(" else ")
self.dispatch(t.orelse)
self.write(")")
def _Set(self, t):
assert t.elts # should be at least one element
self.write("{")
interleave(lambda: self.write(", "), self.dispatch, t.elts)
self.write("}")
def _Dict(self, t):
self.write("{")
def write_key_value_pair(k, v):
self.dispatch(k)
self.write(": ")
self.dispatch(v)
def write_item(item):
k, v = item
if k is None:
# for dictionary unpacking operator in dicts {**{'y': 2}}
# see PEP 448 for details
self.write("**")
self.dispatch(v)
else:
write_key_value_pair(k, v)
interleave(lambda: self.write(", "), write_item, zip(t.keys, t.values))
self.write("}")
def _Tuple(self, t):
self.write("(")
if len(t.elts) == 1:
elt = t.elts[0]
self.dispatch(elt)
self.write(",")
else:
interleave(lambda: self.write(", "), self.dispatch, t.elts)
self.write(")")
unop = {"Invert": "~", "Not": "not", "UAdd": "+", "USub": "-"}
def _UnaryOp(self, t):
self.write("(")
self.write(self.unop[t.op.__class__.__name__])
self.write(" ")
self.dispatch(t.operand)
self.write(")")
binop = {
"Add": "+",
"Sub": "-",
"Mult": "*",
"MatMult": "@",
"Div": "/",
"Mod": "%",
"LShift": "<<",
"RShift": ">>",
"BitOr": "|",
"BitXor": "^",
"BitAnd": "&",
"FloorDiv": "//",
"Pow": "**",
}
def _BinOp(self, t):
self.write("(")
self.dispatch(t.left)
self.write(" " + self.binop[t.op.__class__.__name__] + " ")
self.dispatch(t.right)
self.write(")")
cmpops = {
"Eq": "==",
"NotEq": "!=",
"Lt": "<",
"LtE": "<=",
"Gt": ">",
"GtE": ">=",
"Is": "is",
"IsNot": "is not",
"In": "in",
"NotIn": "not in",
}
def _Compare(self, t):
self.write("(")
self.dispatch(t.left)
for o, e in zip(t.ops, t.comparators):
self.write(" " + self.cmpops[o.__class__.__name__] + " ")
self.dispatch(e)
self.write(")")
boolops = {ast.And: "and", ast.Or: "or"}
def _BoolOp(self, t):
self.write("(")
s = " %s " % self.boolops[t.op.__class__]
interleave(lambda: self.write(s), self.dispatch, t.values)
self.write(")")
def _Attribute(self, t):
self.dispatch(t.value)
# Special case: 3.__abs__() is a syntax error, so if t.value
# is an integer literal then we need to either parenthesize
# it or add an extra space to get 3 .__abs__().
if isinstance(t.value, ast.Constant) and isinstance(t.value.value, int):
self.write(" ")
self.write(".")
self.write(t.attr)
def _Call(self, t):
self.dispatch(t.func)
self.write("(")
comma = False
for e in t.args:
if comma:
self.write(", ")
else:
comma = True
self.dispatch(e)
for e in t.keywords:
if comma:
self.write(", ")
else:
comma = True
self.dispatch(e)
self.write(")")
def _Subscript(self, t):
self.dispatch(t.value)
self.write("[")
if (
isinstance(t.slice, ast.Index)
and isinstance(t.slice.value, ast.Tuple)
and t.slice.value.elts
):
if len(t.slice.value.elts) == 1:
elt = t.slice.value.elts[0]
self.dispatch(elt)
self.write(",")
else:
interleave(lambda: self.write(", "), self.dispatch, t.slice.value.elts)
else:
self.dispatch(t.slice)
self.write("]")
def _Starred(self, t):
self.write("*")
self.dispatch(t.value)
# slice
def _Ellipsis(self, t):
self.write("...")
def _Index(self, t):
self.dispatch(t.value)
def _Slice(self, t):
if t.lower:
self.dispatch(t.lower)
self.write(":")
if t.upper:
self.dispatch(t.upper)
if t.step:
self.write(":")
self.dispatch(t.step)
def _ExtSlice(self, t):
if len(t.dims) == 1:
elt = t.dims[0]
self.dispatch(elt)
self.write(",")
else:
interleave(lambda: self.write(", "), self.dispatch, t.dims)
# argument
def _arg(self, t):
self.write(t.arg)
if t.annotation:
self.write(": ")
self.dispatch(t.annotation)
# others
def _arguments(self, t):
first = True
# normal arguments
all_args = t.posonlyargs + t.args
defaults = [None] * (len(all_args) - len(t.defaults)) + t.defaults
for index, elements in enumerate(zip(all_args, defaults), 1):
a, d = elements
if first:
first = False
else:
self.write(", ")
self.dispatch(a)
if d:
self.write("=")
self.dispatch(d)
if index == len(t.posonlyargs):
self.write(", /")
# varargs, or bare '*' if no varargs but keyword-only arguments present
if t.vararg or t.kwonlyargs:
if first:
first = False
else:
self.write(", ")
self.write("*")
if t.vararg:
self.write(t.vararg.arg)
if t.vararg.annotation:
self.write(": ")
self.dispatch(t.vararg.annotation)
# keyword-only arguments
if t.kwonlyargs:
for a, d in zip(t.kwonlyargs, t.kw_defaults):
if first:
first = False
else:
self.write(", ")
self.dispatch(a)
if d:
self.write("=")
self.dispatch(d)
# kwargs
if t.kwarg:
if first:
first = False
else:
self.write(", ")
self.write("**" + t.kwarg.arg)
if t.kwarg.annotation:
self.write(": ")
self.dispatch(t.kwarg.annotation)
def _keyword(self, t):
if t.arg is None:
self.write("**")
else:
self.write(t.arg)
self.write("=")
self.dispatch(t.value)
def _Lambda(self, t):
self.write("(")
self.write("lambda ")
self.dispatch(t.args)
self.write(": ")
self.dispatch(t.body)
self.write(")")
def _alias(self, t):
self.write(t.name)
if t.asname:
self.write(" as " + t.asname)
def _withitem(self, t):
self.dispatch(t.context_expr)
if t.optional_vars:
self.write(" as ")
self.dispatch(t.optional_vars)
def roundtrip(filename, output=sys.stdout):
"""Parse a file and pretty-print it to output.
The output is formatted as valid Python source code.
Args:
filename: The name of the file to parse.
output: The output stream to write to.
"""
with open(filename, "rb") as pyfile:
encoding = tokenize.detect_encoding(pyfile.readline)[0]
with open(filename, "r", encoding=encoding) as pyfile:
source = pyfile.read()
tree = compile(source, filename, "exec", ast.PyCF_ONLY_AST)
Unparser(tree, output)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/e2b_data_analysis/tool.py | from __future__ import annotations
import ast
import json
import os
from io import StringIO
from sys import version_info
from typing import IO, TYPE_CHECKING, Any, Callable, List, Optional, Type, Union
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManager,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool, Tool
from pydantic import BaseModel, Field, PrivateAttr
from langchain_community.tools.e2b_data_analysis.unparse import Unparser
if TYPE_CHECKING:
from e2b import EnvVars
from e2b.templates.data_analysis import Artifact
base_description = """Evaluates python code in a sandbox environment. \
The environment is long running and exists across multiple executions. \
You must send the whole script every time and print your outputs. \
Script should be pure python code that can be evaluated. \
It should be in python format NOT markdown. \
The code should NOT be wrapped in backticks. \
All python packages including requests, matplotlib, scipy, numpy, pandas, \
etc are available. Create and display chart using `plt.show()`."""
def _unparse(tree: ast.AST) -> str:
"""Unparse the AST."""
if version_info.minor < 9:
s = StringIO()
Unparser(tree, file=s)
source_code = s.getvalue()
s.close()
else:
source_code = ast.unparse(tree) # type: ignore[attr-defined]
return source_code
def add_last_line_print(code: str) -> str:
"""Add print statement to the last line if it's missing.
Sometimes, the LLM-generated code doesn't have `print(variable_name)`, instead the
LLM tries to print the variable only by writing `variable_name` (as you would in
REPL, for example).
This methods checks the AST of the generated Python code and adds the print
statement to the last line if it's missing.
"""
tree = ast.parse(code)
node = tree.body[-1]
if isinstance(node, ast.Expr) and isinstance(node.value, ast.Call):
if isinstance(node.value.func, ast.Name) and node.value.func.id == "print":
return _unparse(tree)
if isinstance(node, ast.Expr):
tree.body[-1] = ast.Expr(
value=ast.Call(
func=ast.Name(id="print", ctx=ast.Load()),
args=[node.value],
keywords=[],
)
)
return _unparse(tree)
class UploadedFile(BaseModel):
"""Description of the uploaded path with its remote path."""
name: str
remote_path: str
description: str
class E2BDataAnalysisToolArguments(BaseModel):
"""Arguments for the E2BDataAnalysisTool."""
python_code: str = Field(
...,
examples=["print('Hello World')"],
description=(
"The python script to be evaluated. "
"The contents will be in main.py. "
"It should not be in markdown format."
),
)
class E2BDataAnalysisTool(BaseTool): # type: ignore[override, override]
"""Tool for running python code in a sandboxed environment for data analysis."""
name: str = "e2b_data_analysis"
args_schema: Type[BaseModel] = E2BDataAnalysisToolArguments
session: Any
description: str
_uploaded_files: List[UploadedFile] = PrivateAttr(default_factory=list)
def __init__(
self,
api_key: Optional[str] = None,
cwd: Optional[str] = None,
env_vars: Optional[EnvVars] = None,
on_stdout: Optional[Callable[[str], Any]] = None,
on_stderr: Optional[Callable[[str], Any]] = None,
on_artifact: Optional[Callable[[Artifact], Any]] = None,
on_exit: Optional[Callable[[int], Any]] = None,
**kwargs: Any,
):
try:
from e2b import DataAnalysis
except ImportError as e:
raise ImportError(
"Unable to import e2b, please install with `pip install e2b`."
) from e
# If no API key is provided, E2B will try to read it from the environment
# variable E2B_API_KEY
super().__init__(description=base_description, **kwargs)
self.session = DataAnalysis(
api_key=api_key,
cwd=cwd,
env_vars=env_vars,
on_stdout=on_stdout,
on_stderr=on_stderr,
on_exit=on_exit,
on_artifact=on_artifact,
)
def close(self) -> None:
"""Close the cloud sandbox."""
self._uploaded_files = []
self.session.close()
@property
def uploaded_files_description(self) -> str:
if len(self._uploaded_files) == 0:
return ""
lines = ["The following files available in the sandbox:"]
for f in self._uploaded_files:
if f.description == "":
lines.append(f"- path: `{f.remote_path}`")
else:
lines.append(
f"- path: `{f.remote_path}` \n description: `{f.description}`"
)
return "\n".join(lines)
def _run(
self,
python_code: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
callbacks: Optional[CallbackManager] = None,
) -> str:
python_code = add_last_line_print(python_code)
if callbacks is not None:
on_artifact = getattr(callbacks.metadata, "on_artifact", None)
else:
on_artifact = None
stdout, stderr, artifacts = self.session.run_python(
python_code, on_artifact=on_artifact
)
out = {
"stdout": stdout,
"stderr": stderr,
"artifacts": list(map(lambda artifact: artifact.name, artifacts)),
}
return json.dumps(out)
async def _arun(
self,
python_code: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
raise NotImplementedError("e2b_data_analysis does not support async")
def run_command(
self,
cmd: str,
) -> dict:
"""Run shell command in the sandbox."""
proc = self.session.process.start(cmd)
output = proc.wait()
return {
"stdout": output.stdout,
"stderr": output.stderr,
"exit_code": output.exit_code,
}
def install_python_packages(self, package_names: Union[str, List[str]]) -> None:
"""Install python packages in the sandbox."""
self.session.install_python_packages(package_names)
def install_system_packages(self, package_names: Union[str, List[str]]) -> None:
"""Install system packages (via apt) in the sandbox."""
self.session.install_system_packages(package_names)
def download_file(self, remote_path: str) -> bytes:
"""Download file from the sandbox."""
return self.session.download_file(remote_path)
def upload_file(self, file: IO, description: str) -> UploadedFile:
"""Upload file to the sandbox.
The file is uploaded to the '/home/user/<filename>' path."""
remote_path = self.session.upload_file(file)
f = UploadedFile(
name=os.path.basename(file.name),
remote_path=remote_path,
description=description,
)
self._uploaded_files.append(f)
self.description = self.description + "\n" + self.uploaded_files_description
return f
def remove_uploaded_file(self, uploaded_file: UploadedFile) -> None:
"""Remove uploaded file from the sandbox."""
self.session.filesystem.remove(uploaded_file.remote_path)
self._uploaded_files = [
f
for f in self._uploaded_files
if f.remote_path != uploaded_file.remote_path
]
self.description = self.description + "\n" + self.uploaded_files_description
def as_tool(self) -> Tool: # type: ignore[override]
return Tool.from_function(
func=self._run,
name=self.name,
description=self.description,
args_schema=self.args_schema,
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/slack/base.py | """Base class for Slack tools."""
from __future__ import annotations
from typing import TYPE_CHECKING
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.tools.slack.utils import login
if TYPE_CHECKING:
from slack_sdk import WebClient
class SlackBaseTool(BaseTool): # type: ignore[override]
"""Base class for Slack tools."""
client: WebClient = Field(default_factory=login)
"""The WebClient object."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/slack/schedule_message.py | import logging
from datetime import datetime as dt
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.slack.base import SlackBaseTool
from langchain_community.tools.slack.utils import UTC_FORMAT
logger = logging.getLogger(__name__)
class ScheduleMessageSchema(BaseModel):
"""Input for ScheduleMessageTool."""
message: str = Field(
...,
description="The message to be sent.",
)
channel: str = Field(
...,
description="The channel, private group, or IM channel to send message to.",
)
timestamp: str = Field(
...,
description="The datetime for when the message should be sent in the "
' following format: YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date '
" and time components, and the time zone offset is specified as ±hh:mm. "
' For example: "2023-06-09T10:30:00+03:00" represents June 9th, '
" 2023, at 10:30 AM in a time zone with a positive offset of 3 "
" hours from Coordinated Universal Time (UTC).",
)
class SlackScheduleMessage(SlackBaseTool): # type: ignore[override, override]
"""Tool for scheduling a message in Slack."""
name: str = "schedule_message"
description: str = (
"Use this tool to schedule a message to be sent on a specific date and time."
)
args_schema: Type[ScheduleMessageSchema] = ScheduleMessageSchema
def _run(
self,
message: str,
channel: str,
timestamp: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
unix_timestamp = dt.timestamp(dt.strptime(timestamp, UTC_FORMAT))
result = self.client.chat_scheduleMessage(
channel=channel, text=message, post_at=unix_timestamp
)
output = "Message scheduled: " + str(result)
return output
except Exception as e:
return "Error scheduling message: {}".format(e)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/slack/get_channel.py | import json
import logging
from typing import Any, Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_community.tools.slack.base import SlackBaseTool
class SlackGetChannel(SlackBaseTool): # type: ignore[override]
"""Tool that gets Slack channel information."""
name: str = "get_channelid_name_dict"
description: str = (
"Use this tool to get channelid-name dict. There is no input to this tool"
)
def _run(
self, *args: Any, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
try:
logging.getLogger(__name__)
result = self.client.conversations_list()
channels = result["channels"]
filtered_result = [
{key: channel[key] for key in ("id", "name", "created", "num_members")}
for channel in channels
if "id" in channel
and "name" in channel
and "created" in channel
and "num_members" in channel
]
return json.dumps(filtered_result, ensure_ascii=False)
except Exception as e:
return "Error creating conversation: {}".format(e)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/slack/utils.py | """Slack tool utils."""
from __future__ import annotations
import logging
import os
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from slack_sdk import WebClient
logger = logging.getLogger(__name__)
def login() -> WebClient:
"""Authenticate using the Slack API."""
try:
from slack_sdk import WebClient
except ImportError as e:
raise ImportError(
"Cannot import slack_sdk. Please install the package with \
`pip install slack_sdk`."
) from e
if "SLACK_BOT_TOKEN" in os.environ:
token = os.environ["SLACK_BOT_TOKEN"]
client = WebClient(token=token)
logger.info("slack login success")
return client
elif "SLACK_USER_TOKEN" in os.environ:
token = os.environ["SLACK_USER_TOKEN"]
client = WebClient(token=token)
logger.info("slack login success")
return client
else:
logger.error(
"Error: The SLACK_BOT_TOKEN or SLACK_USER_TOKEN \
environment variable have not been set."
)
UTC_FORMAT = "%Y-%m-%dT%H:%M:%S%z"
"""UTC format for datetime objects."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/slack/__init__.py | """Slack tools."""
from langchain_community.tools.slack.get_channel import SlackGetChannel
from langchain_community.tools.slack.get_message import SlackGetMessage
from langchain_community.tools.slack.schedule_message import SlackScheduleMessage
from langchain_community.tools.slack.send_message import SlackSendMessage
from langchain_community.tools.slack.utils import login
__all__ = [
"SlackGetChannel",
"SlackGetMessage",
"SlackScheduleMessage",
"SlackSendMessage",
"login",
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/slack/send_message.py | from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.slack.base import SlackBaseTool
class SendMessageSchema(BaseModel):
"""Input for SendMessageTool."""
message: str = Field(
...,
description="The message to be sent.",
)
channel: str = Field(
...,
description="The channel, private group, or IM channel to send message to.",
)
class SlackSendMessage(SlackBaseTool): # type: ignore[override, override]
"""Tool for sending a message in Slack."""
name: str = "send_message"
description: str = (
"Use this tool to send a message with the provided message fields."
)
args_schema: Type[SendMessageSchema] = SendMessageSchema
def _run(
self,
message: str,
channel: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
result = self.client.chat_postMessage(channel=channel, text=message)
output = "Message sent: " + str(result)
return output
except Exception as e:
return "Error creating conversation: {}".format(e)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/slack/get_message.py | import json
import logging
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.slack.base import SlackBaseTool
class SlackGetMessageSchema(BaseModel):
"""Input schema for SlackGetMessages."""
channel_id: str = Field(
...,
description="The channel id, private group, or IM channel to send message to.",
)
class SlackGetMessage(SlackBaseTool): # type: ignore[override, override]
"""Tool that gets Slack messages."""
name: str = "get_messages"
description: str = "Use this tool to get messages from a channel."
args_schema: Type[SlackGetMessageSchema] = SlackGetMessageSchema
def _run(
self,
channel_id: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
logging.getLogger(__name__)
try:
result = self.client.conversations_history(channel=channel_id)
messages = result["messages"]
filtered_messages = [
{key: message[key] for key in ("user", "text", "ts")}
for message in messages
if "user" in message and "text" in message and "ts" in message
]
return json.dumps(filtered_messages, ensure_ascii=False)
except Exception as e:
return "Error creating conversation: {}".format(e)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/google_serper/tool.py | """Tool for the Serper.dev Google Search API."""
from typing import Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.utilities.google_serper import GoogleSerperAPIWrapper
class GoogleSerperRun(BaseTool): # type: ignore[override]
"""Tool that queries the Serper.dev Google search API."""
name: str = "google_serper"
description: str = (
"A low-cost Google Search API."
"Useful for when you need to answer questions about current events."
"Input should be a search query."
)
api_wrapper: GoogleSerperAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return str(self.api_wrapper.run(query))
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
return (await self.api_wrapper.arun(query)).__str__()
class GoogleSerperResults(BaseTool): # type: ignore[override]
"""Tool that queries the Serper.dev Google Search API
and get back json."""
name: str = "google_serper_results_json"
description: str = (
"A low-cost Google Search API."
"Useful for when you need to answer questions about current events."
"Input should be a search query. Output is a JSON object of the query results"
)
api_wrapper: GoogleSerperAPIWrapper = Field(default_factory=GoogleSerperAPIWrapper)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return str(self.api_wrapper.results(query))
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
return (await self.api_wrapper.aresults(query)).__str__()
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/google_serper/__init__.py | from langchain_community.tools.google_serper.tool import (
GoogleSerperResults,
GoogleSerperRun,
)
"""Google Serper API Toolkit."""
"""Tool for the Serer.dev Google Search API."""
__all__ = ["GoogleSerperRun", "GoogleSerperResults"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/metaphor_search/tool.py | """Tool for the Metaphor search API."""
from typing import Dict, List, Optional, Union
from langchain_core._api.deprecation import deprecated
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from langchain_community.utilities.metaphor_search import MetaphorSearchAPIWrapper
@deprecated(
since="0.0.15",
removal="1.0",
alternative="langchain_exa.ExaSearchResults",
)
class MetaphorSearchResults(BaseTool): # type: ignore[override]
"""Tool that queries the Metaphor Search API and gets back json."""
name: str = "metaphor_search_results_json"
description: str = (
"A wrapper around Metaphor Search. "
"Input should be a Metaphor-optimized query. "
"Output is a JSON array of the query results"
)
api_wrapper: MetaphorSearchAPIWrapper
def _run(
self,
query: str,
num_results: int,
include_domains: Optional[List[str]] = None,
exclude_domains: Optional[List[str]] = None,
start_crawl_date: Optional[str] = None,
end_crawl_date: Optional[str] = None,
start_published_date: Optional[str] = None,
end_published_date: Optional[str] = None,
use_autoprompt: Optional[bool] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> Union[List[Dict], str]:
"""Use the tool."""
try:
return self.api_wrapper.results(
query,
num_results,
include_domains,
exclude_domains,
start_crawl_date,
end_crawl_date,
start_published_date,
end_published_date,
use_autoprompt,
)
except Exception as e:
return repr(e)
async def _arun(
self,
query: str,
num_results: int,
include_domains: Optional[List[str]] = None,
exclude_domains: Optional[List[str]] = None,
start_crawl_date: Optional[str] = None,
end_crawl_date: Optional[str] = None,
start_published_date: Optional[str] = None,
end_published_date: Optional[str] = None,
use_autoprompt: Optional[bool] = None,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> Union[List[Dict], str]:
"""Use the tool asynchronously."""
try:
return await self.api_wrapper.results_async(
query,
num_results,
include_domains,
exclude_domains,
start_crawl_date,
end_crawl_date,
start_published_date,
end_published_date,
use_autoprompt,
)
except Exception as e:
return repr(e)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/metaphor_search/__init__.py | """Metaphor Search API toolkit."""
from langchain_community.tools.metaphor_search.tool import MetaphorSearchResults
__all__ = ["MetaphorSearchResults"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/youtube/search.py | """
Adapted from https://github.com/venuv/langchain_yt_tools
CustomYTSearchTool searches YouTube videos related to a person
and returns a specified number of video URLs.
Input to this tool should be a comma separated list,
- the first part contains a person name
- and the second(optional) a number that is the
maximum number of video results to return
"""
import json
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
class YouTubeSearchTool(BaseTool): # type: ignore[override]
"""Tool that queries YouTube."""
name: str = "youtube_search"
description: str = (
"search for youtube videos associated with a person. "
"the input to this tool should be a comma separated list, "
"the first part contains a person name and the second a "
"number that is the maximum number of video results "
"to return aka num_results. the second part is optional"
)
def _search(self, person: str, num_results: int) -> str:
from youtube_search import YoutubeSearch
results = YoutubeSearch(person, num_results).to_json()
data = json.loads(results)
url_suffix_list = [
"https://www.youtube.com" + video["url_suffix"] for video in data["videos"]
]
return str(url_suffix_list)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
values = query.split(",")
person = values[0]
if len(values) > 1:
num_results = int(values[1])
else:
num_results = 2
return self._search(person, num_results)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/vectorstore/tool.py | """Tools for interacting with vectorstores."""
import json
from typing import Any, Dict, Optional
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import BaseTool
from langchain_core.vectorstores import VectorStore
from pydantic import BaseModel, ConfigDict, Field
from langchain_community.llms.openai import OpenAI
class BaseVectorStoreTool(BaseModel):
"""Base class for tools that use a VectorStore."""
vectorstore: VectorStore = Field(exclude=True)
llm: BaseLanguageModel = Field(default_factory=lambda: OpenAI(temperature=0))
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
def _create_description_from_template(values: Dict[str, Any]) -> Dict[str, Any]:
values["description"] = values["template"].format(name=values["name"])
return values
class VectorStoreQATool(BaseVectorStoreTool, BaseTool): # type: ignore[override]
"""Tool for the VectorDBQA chain. To be initialized with name and chain."""
@staticmethod
def get_description(name: str, description: str) -> str:
template: str = (
"Useful for when you need to answer questions about {name}. "
"Whenever you need information about {description} "
"you should ALWAYS use this. "
"Input should be a fully formed question."
)
return template.format(name=name, description=description)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
from langchain.chains.retrieval_qa.base import RetrievalQA
chain = RetrievalQA.from_chain_type(
self.llm, retriever=self.vectorstore.as_retriever()
)
return chain.invoke(
{chain.input_key: query},
config={"callbacks": run_manager.get_child() if run_manager else None},
)[chain.output_key]
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
from langchain.chains.retrieval_qa.base import RetrievalQA
chain = RetrievalQA.from_chain_type(
self.llm, retriever=self.vectorstore.as_retriever()
)
return (
await chain.ainvoke(
{chain.input_key: query},
config={"callbacks": run_manager.get_child() if run_manager else None},
)
)[chain.output_key]
class VectorStoreQAWithSourcesTool(BaseVectorStoreTool, BaseTool): # type: ignore[override]
"""Tool for the VectorDBQAWithSources chain."""
@staticmethod
def get_description(name: str, description: str) -> str:
template: str = (
"Useful for when you need to answer questions about {name} and the sources "
"used to construct the answer. "
"Whenever you need information about {description} "
"you should ALWAYS use this. "
" Input should be a fully formed question. "
"Output is a json serialized dictionary with keys `answer` and `sources`. "
"Only use this tool if the user explicitly asks for sources."
)
return template.format(name=name, description=description)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
from langchain.chains.qa_with_sources.retrieval import (
RetrievalQAWithSourcesChain,
)
chain = RetrievalQAWithSourcesChain.from_chain_type(
self.llm, retriever=self.vectorstore.as_retriever()
)
return json.dumps(
chain.invoke(
{chain.question_key: query},
return_only_outputs=True,
config={"callbacks": run_manager.get_child() if run_manager else None},
)
)
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
"""Use the tool asynchronously."""
from langchain.chains.qa_with_sources.retrieval import (
RetrievalQAWithSourcesChain,
)
chain = RetrievalQAWithSourcesChain.from_chain_type(
self.llm, retriever=self.vectorstore.as_retriever()
)
return json.dumps(
await chain.ainvoke(
{chain.question_key: query},
return_only_outputs=True,
config={"callbacks": run_manager.get_child() if run_manager else None},
)
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/vectorstore/__init__.py | """Simple tool wrapper around VectorDBQA chain."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/google_finance/tool.py | """Tool for the Google Finance"""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.google_finance import GoogleFinanceAPIWrapper
class GoogleFinanceQueryRun(BaseTool): # type: ignore[override]
"""Tool that queries the Google Finance API."""
name: str = "google_finance"
description: str = (
"A wrapper around Google Finance Search. "
"Useful for when you need to get information about"
"google search Finance from Google Finance"
"Input should be a search query."
)
api_wrapper: GoogleFinanceAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.api_wrapper.run(query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/google_finance/__init__.py | """Google Finance API Toolkit."""
from langchain_community.tools.google_finance.tool import GoogleFinanceQueryRun
__all__ = ["GoogleFinanceQueryRun"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/merriam_webster/tool.py | """Tool for the Merriam-Webster API."""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.merriam_webster import MerriamWebsterAPIWrapper
class MerriamWebsterQueryRun(BaseTool): # type: ignore[override]
"""Tool that searches the Merriam-Webster API."""
name: str = "merriam_webster"
description: str = (
"A wrapper around Merriam-Webster. "
"Useful for when you need to get the definition of a word."
"Input should be the word you want the definition of."
)
api_wrapper: MerriamWebsterAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Merriam-Webster tool."""
return self.api_wrapper.run(query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/merriam_webster/__init__.py | """Merriam-Webster API toolkit."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/semanticscholar/tool.py | """Tool for the SemanticScholar API."""
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.utilities.semanticscholar import SemanticScholarAPIWrapper
class SemantscholarInput(BaseModel):
"""Input for the SemanticScholar tool."""
query: str = Field(description="search query to look up")
class SemanticScholarQueryRun(BaseTool): # type: ignore[override, override]
"""Tool that searches the semanticscholar API."""
name: str = "semanticscholar"
description: str = (
"A wrapper around semantischolar.org "
"Useful for when you need to answer to questions"
"from research papers."
"Input should be a search query."
)
api_wrapper: SemanticScholarAPIWrapper = Field(
default_factory=SemanticScholarAPIWrapper # type: ignore[arg-type]
)
args_schema: Type[BaseModel] = SemantscholarInput
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Semantic Scholar tool."""
return self.api_wrapper.run(query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/semanticscholar/__init__.py | from langchain_community.tools.semanticscholar.tool import SemanticScholarQueryRun
"""Semantic Scholar API toolkit."""
"""Tool for the Semantic Scholar Search API."""
__all__ = ["SemanticScholarQueryRun"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/jira/tool.py | """
This tool allows agents to interact with the atlassian-python-api library
and operate on a Jira instance. For more information on the
atlassian-python-api library, see https://atlassian-python-api.readthedocs.io/jira.html
To use this tool, you must first set as environment variables:
JIRA_API_TOKEN
JIRA_USERNAME
JIRA_INSTANCE_URL
JIRA_CLOUD
Below is a sample script that uses the Jira tool:
```python
from langchain_community.agent_toolkits.jira.toolkit import JiraToolkit
from langchain_community.utilities.jira import JiraAPIWrapper
jira = JiraAPIWrapper()
toolkit = JiraToolkit.from_jira_api_wrapper(jira)
```
"""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.utilities.jira import JiraAPIWrapper
class JiraAction(BaseTool): # type: ignore[override]
"""Tool that queries the Atlassian Jira API."""
api_wrapper: JiraAPIWrapper = Field(default_factory=JiraAPIWrapper) # type: ignore[arg-type]
mode: str
name: str = ""
description: str = ""
def _run(
self,
instructions: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Atlassian Jira API to run an operation."""
return self.api_wrapper.run(self.mode, instructions)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/jira/__init__.py | """Jira Tool."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/jira/prompt.py | # flake8: noqa
JIRA_ISSUE_CREATE_PROMPT = """
This tool is a wrapper around atlassian-python-api's Jira issue_create API, useful when you need to create a Jira issue.
The input to this tool is a dictionary specifying the fields of the Jira issue, and will be passed into atlassian-python-api's Jira `issue_create` function.
For example, to create a low priority task called "test issue" with description "test description", you would pass in the following dictionary:
{{"summary": "test issue", "description": "test description", "issuetype": {{"name": "Task"}}, "priority": {{"name": "Low"}}}}
"""
JIRA_GET_ALL_PROJECTS_PROMPT = """
This tool is a wrapper around atlassian-python-api's Jira project API,
useful when you need to fetch all the projects the user has access to, find out how many projects there are, or as an intermediary step that involv searching by projects.
there is no input to this tool.
"""
JIRA_JQL_PROMPT = """
This tool is a wrapper around atlassian-python-api's Jira jql API, useful when you need to search for Jira issues.
The input to this tool is a JQL query string, and will be passed into atlassian-python-api's Jira `jql` function,
For example, to find all the issues in project "Test" assigned to the me, you would pass in the following string:
project = Test AND assignee = currentUser()
or to find issues with summaries that contain the word "test", you would pass in the following string:
summary ~ 'test'
"""
JIRA_CATCH_ALL_PROMPT = """
This tool is a wrapper around atlassian-python-api's Jira API.
There are other dedicated tools for fetching all projects, and creating and searching for issues,
use this tool if you need to perform any other actions allowed by the atlassian-python-api Jira API.
The input to this tool is a dictionary specifying a function from atlassian-python-api's Jira API,
as well as a list of arguments and dictionary of keyword arguments to pass into the function.
For example, to get all the users in a group, while increasing the max number of results to 100, you would
pass in the following dictionary: {{"function": "get_all_users_from_group", "args": ["group"], "kwargs": {{"limit":100}} }}
or to find out how many projects are in the Jira instance, you would pass in the following string:
{{"function": "projects"}}
For more information on the Jira API, refer to https://atlassian-python-api.readthedocs.io/jira.html
"""
JIRA_CONFLUENCE_PAGE_CREATE_PROMPT = """This tool is a wrapper around atlassian-python-api's Confluence
atlassian-python-api API, useful when you need to create a Confluence page. The input to this tool is a dictionary
specifying the fields of the Confluence page, and will be passed into atlassian-python-api's Confluence `create_page`
function. For example, to create a page in the DEMO space titled "This is the title" with body "This is the body. You can use
<strong>HTML tags</strong>!", you would pass in the following dictionary: {{"space": "DEMO", "title":"This is the
title","body":"This is the body. You can use <strong>HTML tags</strong>!"}} """
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/spark_sql/tool.py | # flake8: noqa
"""Tools for interacting with Spark SQL."""
from typing import Any, Dict, Optional
from pydantic import BaseModel, Field, root_validator, model_validator, ConfigDict
from langchain_core.language_models import BaseLanguageModel
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.prompts import PromptTemplate
from langchain_community.utilities.spark_sql import SparkSQL
from langchain_core.tools import BaseTool
from langchain_community.tools.spark_sql.prompt import QUERY_CHECKER
class BaseSparkSQLTool(BaseModel):
"""Base tool for interacting with Spark SQL."""
db: SparkSQL = Field(exclude=True)
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
class QuerySparkSQLTool(BaseSparkSQLTool, BaseTool): # type: ignore[override]
"""Tool for querying a Spark SQL."""
name: str = "query_sql_db"
description: str = """
Input to this tool is a detailed and correct SQL query, output is a result from the Spark SQL.
If the query is not correct, an error message will be returned.
If an error is returned, rewrite the query, check the query, and try again.
"""
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Execute the query, return the results or an error message."""
return self.db.run_no_throw(query)
class InfoSparkSQLTool(BaseSparkSQLTool, BaseTool): # type: ignore[override]
"""Tool for getting metadata about a Spark SQL."""
name: str = "schema_sql_db"
description: str = """
Input to this tool is a comma-separated list of tables, output is the schema and sample rows for those tables.
Be sure that the tables actually exist by calling list_tables_sql_db first!
Example Input: "table1, table2, table3"
"""
def _run(
self,
table_names: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Get the schema for tables in a comma-separated list."""
return self.db.get_table_info_no_throw(table_names.split(", "))
class ListSparkSQLTool(BaseSparkSQLTool, BaseTool): # type: ignore[override]
"""Tool for getting tables names."""
name: str = "list_tables_sql_db"
description: str = "Input is an empty string, output is a comma separated list of tables in the Spark SQL."
def _run(
self,
tool_input: str = "",
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Get the schema for a specific table."""
return ", ".join(self.db.get_usable_table_names())
class QueryCheckerTool(BaseSparkSQLTool, BaseTool): # type: ignore[override]
"""Use an LLM to check if a query is correct.
Adapted from https://www.patterns.app/blog/2023/01/18/crunchbot-sql-analyst-gpt/"""
template: str = QUERY_CHECKER
llm: BaseLanguageModel
llm_chain: Any = Field(init=False)
name: str = "query_checker_sql_db"
description: str = """
Use this tool to double check if your query is correct before executing it.
Always use this tool before executing a query with query_sql_db!
"""
@model_validator(mode="before")
@classmethod
def initialize_llm_chain(cls, values: Dict[str, Any]) -> Any:
if "llm_chain" not in values:
from langchain.chains.llm import LLMChain
values["llm_chain"] = LLMChain(
llm=values.get("llm"), # type: ignore[arg-type]
prompt=PromptTemplate(
template=QUERY_CHECKER, input_variables=["query"]
),
)
if values["llm_chain"].prompt.input_variables != ["query"]:
raise ValueError(
"LLM chain for QueryCheckerTool need to use ['query'] as input_variables "
"for the embedded prompt"
)
return values
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the LLM to check the query."""
return self.llm_chain.predict(
query=query, callbacks=run_manager.get_child() if run_manager else None
)
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
return await self.llm_chain.apredict(
query=query, callbacks=run_manager.get_child() if run_manager else None
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/spark_sql/__init__.py | """Tools for interacting with Spark SQL."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/spark_sql/prompt.py | # flake8: noqa
QUERY_CHECKER = """
{query}
Double check the Spark SQL query above for common mistakes, including:
- Using NOT IN with NULL values
- Using UNION when UNION ALL should have been used
- Using BETWEEN for exclusive ranges
- Data type mismatch in predicates
- Properly quoting identifiers
- Using the correct number of arguments for functions
- Casting to the correct data type
- Using the proper columns for joins
If there are any of the above mistakes, rewrite the query. If there are no mistakes, just reproduce the original query."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/sql_database/tool.py | # flake8: noqa
"""Tools for interacting with a SQL database."""
from typing import Any, Dict, Optional, Sequence, Type, Union
from sqlalchemy.engine import Result
from pydantic import BaseModel, Field, root_validator, model_validator, ConfigDict
from langchain_core.language_models import BaseLanguageModel
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.prompts import PromptTemplate
from langchain_community.utilities.sql_database import SQLDatabase
from langchain_core.tools import BaseTool
from langchain_community.tools.sql_database.prompt import QUERY_CHECKER
class BaseSQLDatabaseTool(BaseModel):
"""Base tool for interacting with a SQL database."""
db: SQLDatabase = Field(exclude=True)
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
class _QuerySQLDataBaseToolInput(BaseModel):
query: str = Field(..., description="A detailed and correct SQL query.")
class QuerySQLDataBaseTool(BaseSQLDatabaseTool, BaseTool): # type: ignore[override, override]
"""Tool for querying a SQL database."""
name: str = "sql_db_query"
description: str = """
Execute a SQL query against the database and get back the result..
If the query is not correct, an error message will be returned.
If an error is returned, rewrite the query, check the query, and try again.
"""
args_schema: Type[BaseModel] = _QuerySQLDataBaseToolInput
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> Union[str, Sequence[Dict[str, Any]], Result]:
"""Execute the query, return the results or an error message."""
return self.db.run_no_throw(query)
class _InfoSQLDatabaseToolInput(BaseModel):
table_names: str = Field(
...,
description=(
"A comma-separated list of the table names for which to return the schema. "
"Example input: 'table1, table2, table3'"
),
)
class InfoSQLDatabaseTool(BaseSQLDatabaseTool, BaseTool): # type: ignore[override, override]
"""Tool for getting metadata about a SQL database."""
name: str = "sql_db_schema"
description: str = "Get the schema and sample rows for the specified SQL tables."
args_schema: Type[BaseModel] = _InfoSQLDatabaseToolInput
def _run(
self,
table_names: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Get the schema for tables in a comma-separated list."""
return self.db.get_table_info_no_throw(
[t.strip() for t in table_names.split(",")]
)
class _ListSQLDataBaseToolInput(BaseModel):
tool_input: str = Field("", description="An empty string")
class ListSQLDatabaseTool(BaseSQLDatabaseTool, BaseTool): # type: ignore[override, override]
"""Tool for getting tables names."""
name: str = "sql_db_list_tables"
description: str = "Input is an empty string, output is a comma-separated list of tables in the database."
args_schema: Type[BaseModel] = _ListSQLDataBaseToolInput
def _run(
self,
tool_input: str = "",
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Get a comma-separated list of table names."""
return ", ".join(self.db.get_usable_table_names())
class _QuerySQLCheckerToolInput(BaseModel):
query: str = Field(..., description="A detailed and SQL query to be checked.")
class QuerySQLCheckerTool(BaseSQLDatabaseTool, BaseTool): # type: ignore[override, override]
"""Use an LLM to check if a query is correct.
Adapted from https://www.patterns.app/blog/2023/01/18/crunchbot-sql-analyst-gpt/"""
template: str = QUERY_CHECKER
llm: BaseLanguageModel
llm_chain: Any = Field(init=False)
name: str = "sql_db_query_checker"
description: str = """
Use this tool to double check if your query is correct before executing it.
Always use this tool before executing a query with sql_db_query!
"""
args_schema: Type[BaseModel] = _QuerySQLCheckerToolInput
@model_validator(mode="before")
@classmethod
def initialize_llm_chain(cls, values: Dict[str, Any]) -> Any:
if "llm_chain" not in values:
from langchain.chains.llm import LLMChain
values["llm_chain"] = LLMChain(
llm=values.get("llm"), # type: ignore[arg-type]
prompt=PromptTemplate(
template=QUERY_CHECKER, input_variables=["dialect", "query"]
),
)
if values["llm_chain"].prompt.input_variables != ["dialect", "query"]:
raise ValueError(
"LLM chain for QueryCheckerTool must have input variables ['query', 'dialect']"
)
return values
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the LLM to check the query."""
return self.llm_chain.predict(
query=query,
dialect=self.db.dialect,
callbacks=run_manager.get_child() if run_manager else None,
)
async def _arun(
self,
query: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
return await self.llm_chain.apredict(
query=query,
dialect=self.db.dialect,
callbacks=run_manager.get_child() if run_manager else None,
)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/sql_database/__init__.py | """Tools for interacting with a SQL database."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/sql_database/prompt.py | # flake8: noqa
QUERY_CHECKER = """
{query}
Double check the {dialect} query above for common mistakes, including:
- Using NOT IN with NULL values
- Using UNION when UNION ALL should have been used
- Using BETWEEN for exclusive ranges
- Data type mismatch in predicates
- Properly quoting identifiers
- Using the correct number of arguments for functions
- Casting to the correct data type
- Using the proper columns for joins
If there are any of the above mistakes, rewrite the query. If there are no mistakes, just reproduce the original query.
Output the final SQL query only.
SQL Query: """
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/connery/service.py | import json
from typing import Any, Dict, List, Optional
import requests
from langchain_core.utils.env import get_from_dict_or_env
from pydantic import BaseModel, model_validator
from langchain_community.tools.connery.models import Action
from langchain_community.tools.connery.tool import ConneryAction
class ConneryService(BaseModel):
"""Service for interacting with the Connery Runner API.
It gets the list of available actions from the Connery Runner,
wraps them in ConneryAction Tools and returns them to the user.
It also provides a method for running the actions.
"""
runner_url: Optional[str] = None
api_key: Optional[str] = None
@model_validator(mode="before")
@classmethod
def validate_attributes(cls, values: Dict) -> Any:
"""
Validate the attributes of the ConneryService class.
Parameters:
values (dict): The arguments to validate.
Returns:
dict: The validated arguments.
"""
runner_url = get_from_dict_or_env(values, "runner_url", "CONNERY_RUNNER_URL")
api_key = get_from_dict_or_env(values, "api_key", "CONNERY_RUNNER_API_KEY")
if not runner_url:
raise ValueError("CONNERY_RUNNER_URL environment variable must be set.")
if not api_key:
raise ValueError("CONNERY_RUNNER_API_KEY environment variable must be set.")
values["runner_url"] = runner_url
values["api_key"] = api_key
return values
def list_actions(self) -> List[ConneryAction]:
"""
Returns the list of actions available in the Connery Runner.
Returns:
List[ConneryAction]: The list of actions available in the Connery Runner.
"""
return [
ConneryAction.create_instance(action, self)
for action in self._list_actions()
]
def get_action(self, action_id: str) -> ConneryAction:
"""
Returns the specified action available in the Connery Runner.
Parameters:
action_id (str): The ID of the action to return.
Returns:
ConneryAction: The action with the specified ID.
"""
return ConneryAction.create_instance(self._get_action(action_id), self)
def run_action(self, action_id: str, input: Dict[str, str] = {}) -> Dict[str, str]:
"""
Runs the specified Connery Action with the provided input.
Parameters:
action_id (str): The ID of the action to run.
input (Dict[str, str]): The input object expected by the action.
Returns:
Dict[str, str]: The output of the action.
"""
return self._run_action(action_id, input)
def _list_actions(self) -> List[Action]:
"""
Returns the list of actions available in the Connery Runner.
Returns:
List[Action]: The list of actions available in the Connery Runner.
"""
response = requests.get(
f"{self.runner_url}/v1/actions", headers=self._get_headers()
)
if not response.ok:
raise ValueError(
(
"Failed to list actions."
f"Status code: {response.status_code}."
f"Error message: {response.json()['error']['message']}"
)
)
return [Action(**action) for action in response.json()["data"]]
def _get_action(self, action_id: str) -> Action:
"""
Returns the specified action available in the Connery Runner.
Parameters:
action_id (str): The ID of the action to return.
Returns:
Action: The action with the specified ID.
"""
actions = self._list_actions()
action = next((action for action in actions if action.id == action_id), None)
if not action:
raise ValueError(
(
f"The action with ID {action_id} was not found in the list"
"of available actions in the Connery Runner."
)
)
return action
def _run_action(self, action_id: str, input: Dict[str, str] = {}) -> Dict[str, str]:
"""
Runs the specified Connery Action with the provided input.
Parameters:
action_id (str): The ID of the action to run.
prompt (str): This is a plain English prompt
with all the information needed to run the action.
input (Dict[str, str]): The input object expected by the action.
If provided together with the prompt,
the input takes precedence over the input specified in the prompt.
Returns:
Dict[str, str]: The output of the action.
"""
response = requests.post(
f"{self.runner_url}/v1/actions/{action_id}/run",
headers=self._get_headers(),
data=json.dumps({"input": input}),
)
if not response.ok:
raise ValueError(
(
"Failed to run action."
f"Status code: {response.status_code}."
f"Error message: {response.json()['error']['message']}"
)
)
if not response.json()["data"]["output"]:
return {}
else:
return response.json()["data"]["output"]
def _get_headers(self) -> Dict[str, str]:
"""
Returns a standard set of HTTP headers
to be used in API calls to the Connery runner.
Returns:
Dict[str, str]: The standard set of HTTP headers.
"""
return {"Content-Type": "application/json", "x-api-key": self.api_key or ""}
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/connery/models.py | from typing import Any, List, Optional
from pydantic import BaseModel
class Validation(BaseModel):
"""Connery Action parameter validation model."""
required: Optional[bool] = None
class Parameter(BaseModel):
"""Connery Action parameter model."""
key: str
title: str
description: Optional[str] = None
type: Any
validation: Optional[Validation] = None
class Action(BaseModel):
"""Connery Action model."""
id: str
key: str
title: str
description: Optional[str] = None
type: str
inputParameters: List[Parameter]
outputParameters: List[Parameter]
pluginId: str
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/connery/tool.py | import asyncio
from functools import partial
from typing import Any, Dict, List, Optional, Type
from langchain_core.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field, create_model, model_validator
from langchain_community.tools.connery.models import Action, Parameter
class ConneryAction(BaseTool): # type: ignore[override, override]
"""Connery Action tool."""
name: str
description: str
args_schema: Type[BaseModel]
action: Action
connery_service: Any
def _run(
self,
run_manager: Optional[CallbackManagerForToolRun] = None,
**kwargs: Any,
) -> Dict[str, str]:
"""
Runs the Connery Action with the provided input.
Parameters:
kwargs (Dict[str, str]): The input dictionary expected by the action.
Returns:
Dict[str, str]: The output of the action.
"""
return self.connery_service.run_action(self.action.id, kwargs)
async def _arun(
self,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
**kwargs: Any,
) -> Dict[str, str]:
"""
Runs the Connery Action asynchronously with the provided input.
Parameters:
kwargs (Dict[str, str]): The input dictionary expected by the action.
Returns:
Dict[str, str]: The output of the action.
"""
func = partial(self._run, **kwargs)
return await asyncio.get_event_loop().run_in_executor(None, func)
def get_schema_json(self) -> str:
"""
Returns the JSON representation of the Connery Action Tool schema.
This is useful for debugging.
Returns:
str: The JSON representation of the Connery Action Tool schema.
"""
return self.args_schema.schema_json(indent=2)
@model_validator(mode="before")
@classmethod
def validate_attributes(cls, values: dict) -> Any:
"""
Validate the attributes of the ConneryAction class.
Parameters:
values (dict): The arguments to validate.
Returns:
dict: The validated arguments.
"""
# Import ConneryService here and check if it is an instance
# of ConneryService to avoid circular imports
from .service import ConneryService
if not isinstance(values.get("connery_service"), ConneryService):
raise ValueError(
"The attribute 'connery_service' must be an instance of ConneryService."
)
if not values.get("name"):
raise ValueError("The attribute 'name' must be set.")
if not values.get("description"):
raise ValueError("The attribute 'description' must be set.")
if not values.get("args_schema"):
raise ValueError("The attribute 'args_schema' must be set.")
if not values.get("action"):
raise ValueError("The attribute 'action' must be set.")
if not values.get("connery_service"):
raise ValueError("The attribute 'connery_service' must be set.")
return values
@classmethod
def create_instance(cls, action: Action, connery_service: Any) -> "ConneryAction":
"""
Creates a Connery Action Tool from a Connery Action.
Parameters:
action (Action): The Connery Action to wrap in a Connery Action Tool.
connery_service (ConneryService): The Connery Service
to run the Connery Action. We use Any here to avoid circular imports.
Returns:
ConneryAction: The Connery Action Tool.
"""
# Import ConneryService here and check if it is an instance
# of ConneryService to avoid circular imports
from .service import ConneryService
if not isinstance(connery_service, ConneryService):
raise ValueError(
"The connery_service must be an instance of ConneryService."
)
input_schema = cls._create_input_schema(action.inputParameters)
description = action.title + (
": " + action.description if action.description else ""
)
instance = cls(
name=action.id,
description=description,
args_schema=input_schema,
action=action,
connery_service=connery_service,
)
return instance
@classmethod
def _create_input_schema(cls, inputParameters: List[Parameter]) -> Type[BaseModel]:
"""
Creates an input schema for a Connery Action Tool
based on the input parameters of the Connery Action.
Parameters:
inputParameters: List of input parameters of the Connery Action.
Returns:
Type[BaseModel]: The input schema for the Connery Action Tool.
"""
dynamic_input_fields: Dict[str, Any] = {}
for param in inputParameters:
default = ... if param.validation and param.validation.required else None
title = param.title
description = param.title + (
": " + param.description if param.description else ""
)
type = param.type
dynamic_input_fields[param.key] = (
type,
Field(default, title=title, description=description),
)
InputModel = create_model("InputSchema", **dynamic_input_fields)
return InputModel
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/connery/__init__.py | """
This module contains the ConneryAction Tool and ConneryService.
"""
from .service import ConneryService
from .tool import ConneryAction
__all__ = ["ConneryAction", "ConneryService"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/file_management/list_dir.py | import os
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileToolMixin,
FileValidationError,
)
class DirectoryListingInput(BaseModel):
"""Input for ListDirectoryTool."""
dir_path: str = Field(default=".", description="Subdirectory to list.")
class ListDirectoryTool(BaseFileToolMixin, BaseTool): # type: ignore[override, override]
"""Tool that lists files and directories in a specified folder."""
name: str = "list_directory"
args_schema: Type[BaseModel] = DirectoryListingInput
description: str = "List files and directories in a specified folder"
def _run(
self,
dir_path: str = ".",
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
dir_path_ = self.get_relative_path(dir_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name="dir_path", value=dir_path)
try:
entries = os.listdir(dir_path_)
if entries:
return "\n".join(entries)
else:
return f"No files found in directory {dir_path}"
except Exception as e:
return "Error: " + str(e)
# TODO: Add aiofiles method
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/file_management/read.py | from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileToolMixin,
FileValidationError,
)
class ReadFileInput(BaseModel):
"""Input for ReadFileTool."""
file_path: str = Field(..., description="name of file")
class ReadFileTool(BaseFileToolMixin, BaseTool): # type: ignore[override, override]
"""Tool that reads a file."""
name: str = "read_file"
args_schema: Type[BaseModel] = ReadFileInput
description: str = "Read file from disk"
def _run(
self,
file_path: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
read_path = self.get_relative_path(file_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name="file_path", value=file_path)
if not read_path.exists():
return f"Error: no such file or directory: {file_path}"
try:
with read_path.open("r", encoding="utf-8") as f:
content = f.read()
return content
except Exception as e:
return "Error: " + str(e)
# TODO: Add aiofiles method
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/file_management/write.py | from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileToolMixin,
FileValidationError,
)
class WriteFileInput(BaseModel):
"""Input for WriteFileTool."""
file_path: str = Field(..., description="name of file")
text: str = Field(..., description="text to write to file")
append: bool = Field(
default=False, description="Whether to append to an existing file."
)
class WriteFileTool(BaseFileToolMixin, BaseTool): # type: ignore[override, override]
"""Tool that writes a file to disk."""
name: str = "write_file"
args_schema: Type[BaseModel] = WriteFileInput
description: str = "Write file to disk"
def _run(
self,
file_path: str,
text: str,
append: bool = False,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
write_path = self.get_relative_path(file_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name="file_path", value=file_path)
try:
write_path.parent.mkdir(exist_ok=True, parents=False)
mode = "a" if append else "w"
with write_path.open(mode, encoding="utf-8") as f:
f.write(text)
return f"File written successfully to {file_path}."
except Exception as e:
return "Error: " + str(e)
# TODO: Add aiofiles method
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/file_management/copy.py | import shutil
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileToolMixin,
FileValidationError,
)
class FileCopyInput(BaseModel):
"""Input for CopyFileTool."""
source_path: str = Field(..., description="Path of the file to copy")
destination_path: str = Field(..., description="Path to save the copied file")
class CopyFileTool(BaseFileToolMixin, BaseTool): # type: ignore[override, override]
"""Tool that copies a file."""
name: str = "copy_file"
args_schema: Type[BaseModel] = FileCopyInput
description: str = "Create a copy of a file in a specified location"
def _run(
self,
source_path: str,
destination_path: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
source_path_ = self.get_relative_path(source_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(
arg_name="source_path", value=source_path
)
try:
destination_path_ = self.get_relative_path(destination_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(
arg_name="destination_path", value=destination_path
)
try:
shutil.copy2(source_path_, destination_path_, follow_symlinks=False)
return f"File copied successfully from {source_path} to {destination_path}."
except Exception as e:
return "Error: " + str(e)
# TODO: Add aiofiles method
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/file_management/delete.py | import os
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileToolMixin,
FileValidationError,
)
class FileDeleteInput(BaseModel):
"""Input for DeleteFileTool."""
file_path: str = Field(..., description="Path of the file to delete")
class DeleteFileTool(BaseFileToolMixin, BaseTool): # type: ignore[override, override]
"""Tool that deletes a file."""
name: str = "file_delete"
args_schema: Type[BaseModel] = FileDeleteInput
description: str = "Delete a file"
def _run(
self,
file_path: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
file_path_ = self.get_relative_path(file_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name="file_path", value=file_path)
if not file_path_.exists():
return f"Error: no such file or directory: {file_path}"
try:
os.remove(file_path_)
return f"File deleted successfully: {file_path}."
except Exception as e:
return "Error: " + str(e)
# TODO: Add aiofiles method
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/file_management/move.py | import shutil
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileToolMixin,
FileValidationError,
)
class FileMoveInput(BaseModel):
"""Input for MoveFileTool."""
source_path: str = Field(..., description="Path of the file to move")
destination_path: str = Field(..., description="New path for the moved file")
class MoveFileTool(BaseFileToolMixin, BaseTool): # type: ignore[override, override]
"""Tool that moves a file."""
name: str = "move_file"
args_schema: Type[BaseModel] = FileMoveInput
description: str = "Move or rename a file from one location to another"
def _run(
self,
source_path: str,
destination_path: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
source_path_ = self.get_relative_path(source_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(
arg_name="source_path", value=source_path
)
try:
destination_path_ = self.get_relative_path(destination_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(
arg_name="destination_path_", value=destination_path_
)
if not source_path_.exists():
return f"Error: no such file or directory {source_path}"
try:
# shutil.move expects str args in 3.8
shutil.move(str(source_path_), destination_path_)
return f"File moved successfully from {source_path} to {destination_path}."
except Exception as e:
return "Error: " + str(e)
# TODO: Add aiofiles method
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/file_management/utils.py | import sys
from pathlib import Path
from typing import Optional
from pydantic import BaseModel
def is_relative_to(path: Path, root: Path) -> bool:
"""Check if path is relative to root."""
if sys.version_info >= (3, 9):
# No need for a try/except block in Python 3.8+.
return path.is_relative_to(root)
try:
path.relative_to(root)
return True
except ValueError:
return False
INVALID_PATH_TEMPLATE = (
"Error: Access denied to {arg_name}: {value}."
" Permission granted exclusively to the current working directory"
)
class FileValidationError(ValueError):
"""Error for paths outside the root directory."""
class BaseFileToolMixin(BaseModel):
"""Mixin for file system tools."""
root_dir: Optional[str] = None
"""The final path will be chosen relative to root_dir if specified."""
def get_relative_path(self, file_path: str) -> Path:
"""Get the relative path, returning an error if unsupported."""
if self.root_dir is None:
return Path(file_path)
return get_validated_relative_path(Path(self.root_dir), file_path)
def get_validated_relative_path(root: Path, user_path: str) -> Path:
"""Resolve a relative path, raising an error if not within the root directory."""
# Note, this still permits symlinks from outside that point within the root.
# Further validation would be needed if those are to be disallowed.
root = root.resolve()
full_path = (root / user_path).resolve()
if not is_relative_to(full_path, root):
raise FileValidationError(
f"Path {user_path} is outside of the allowed directory {root}"
)
return full_path
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/file_management/__init__.py | """File Management Tools."""
from langchain_community.tools.file_management.copy import CopyFileTool
from langchain_community.tools.file_management.delete import DeleteFileTool
from langchain_community.tools.file_management.file_search import FileSearchTool
from langchain_community.tools.file_management.list_dir import ListDirectoryTool
from langchain_community.tools.file_management.move import MoveFileTool
from langchain_community.tools.file_management.read import ReadFileTool
from langchain_community.tools.file_management.write import WriteFileTool
__all__ = [
"CopyFileTool",
"DeleteFileTool",
"FileSearchTool",
"MoveFileTool",
"ReadFileTool",
"WriteFileTool",
"ListDirectoryTool",
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/file_management/file_search.py | import fnmatch
import os
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, Field
from langchain_community.tools.file_management.utils import (
INVALID_PATH_TEMPLATE,
BaseFileToolMixin,
FileValidationError,
)
class FileSearchInput(BaseModel):
"""Input for FileSearchTool."""
dir_path: str = Field(
default=".",
description="Subdirectory to search in.",
)
pattern: str = Field(
...,
description="Unix shell regex, where * matches everything.",
)
class FileSearchTool(BaseFileToolMixin, BaseTool): # type: ignore[override, override]
"""Tool that searches for files in a subdirectory that match a regex pattern."""
name: str = "file_search"
args_schema: Type[BaseModel] = FileSearchInput
description: str = (
"Recursively search for files in a subdirectory that match the regex pattern"
)
def _run(
self,
pattern: str,
dir_path: str = ".",
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
dir_path_ = self.get_relative_path(dir_path)
except FileValidationError:
return INVALID_PATH_TEMPLATE.format(arg_name="dir_path", value=dir_path)
matches = []
try:
for root, _, filenames in os.walk(dir_path_):
for filename in fnmatch.filter(filenames, pattern):
absolute_path = os.path.join(root, filename)
relative_path = os.path.relpath(absolute_path, dir_path_)
matches.append(relative_path)
if matches:
return "\n".join(matches)
else:
return f"No files found for pattern {pattern} in directory {dir_path}"
except Exception as e:
return "Error: " + str(e)
# TODO: Add aiofiles method
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/edenai/text_moderation.py | from __future__ import annotations
import logging
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
logger = logging.getLogger(__name__)
class TextModerationInput(BaseModel):
query: str = Field(description="Text to moderate")
class EdenAiTextModerationTool(EdenaiTool): # type: ignore[override, override, override]
"""Tool that queries the Eden AI Explicit text detection.
for api reference check edenai documentation:
https://docs.edenai.co/reference/image_explicit_content_create.
To use, you should have
the environment variable ``EDENAI_API_KEY`` set with your API token.
You can find your token here: https://app.edenai.run/admin/account/settings
"""
name: str = "edenai_explicit_content_detection_text"
description: str = (
"A wrapper around edenai Services explicit content detection for text. "
"""Useful for when you have to scan text for offensive,
sexually explicit or suggestive content,
it checks also if there is any content of self-harm,
violence, racist or hate speech."""
"""the structure of the output is :
'the type of the explicit content : the likelihood of it being explicit'
the likelihood is a number
between 1 and 5, 1 being the lowest and 5 the highest.
something is explicit if the likelihood is equal or higher than 3.
for example :
nsfw_likelihood: 1
this is not explicit.
for example :
nsfw_likelihood: 3
this is explicit.
"""
"Input should be a string."
)
args_schema: Type[BaseModel] = TextModerationInput
language: str
feature: str = "text"
subfeature: str = "moderation"
def _parse_response(self, response: list) -> str:
formatted_result = []
for result in response:
if "nsfw_likelihood" in result.keys():
formatted_result.append(
"nsfw_likelihood: " + str(result["nsfw_likelihood"])
)
for label, likelihood in zip(result["label"], result["likelihood"]):
formatted_result.append(f'"{label}": {str(likelihood)}')
return "\n".join(formatted_result)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
query_params = {"text": query, "language": self.language}
return self._call_eden_ai(query_params)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/edenai/audio_text_to_speech.py | from __future__ import annotations
import logging
from typing import Any, Dict, List, Literal, Optional, Type
import requests
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field, model_validator, validator
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
logger = logging.getLogger(__name__)
class TextToSpeechInput(BaseModel):
query: str = Field(description="text to generate audio from")
class EdenAiTextToSpeechTool(EdenaiTool): # type: ignore[override, override, override]
"""Tool that queries the Eden AI Text to speech API.
for api reference check edenai documentation:
https://docs.edenai.co/reference/audio_text_to_speech_create.
To use, you should have
the environment variable ``EDENAI_API_KEY`` set with your API token.
You can find your token here: https://app.edenai.run/admin/account/settings
"""
name: str = "edenai_text_to_speech"
description: str = (
"A wrapper around edenai Services text to speech."
"Useful for when you need to convert text to speech."
"""the output is a string representing the URL of the audio file,
or the path to the downloaded wav file """
)
args_schema: Type[BaseModel] = TextToSpeechInput
language: Optional[str] = "en"
"""
language of the text passed to the model.
"""
# optional params see api documentation for more info
return_type: Literal["url", "wav"] = "url"
rate: Optional[int] = None
pitch: Optional[int] = None
volume: Optional[int] = None
audio_format: Optional[str] = None
sampling_rate: Optional[int] = None
voice_models: Dict[str, str] = Field(default_factory=dict)
voice: Literal["MALE", "FEMALE"]
"""voice option : 'MALE' or 'FEMALE' """
feature: str = "audio"
subfeature: str = "text_to_speech"
@validator("providers")
def check_only_one_provider_selected(cls, v: List[str]) -> List[str]:
"""
This tool has no feature to combine providers results.
Therefore we only allow one provider
"""
if len(v) > 1:
raise ValueError(
"Please select only one provider. "
"The feature to combine providers results is not available "
"for this tool."
)
return v
@model_validator(mode="before")
@classmethod
def check_voice_models_key_is_provider_name(cls, values: dict) -> Any:
for key in values.get("voice_models", {}).keys():
if key not in values.get("providers", []):
raise ValueError(
"voice_model should be formatted like this "
"{<provider_name>: <its_voice_model>}"
)
return values
def _download_wav(self, url: str, save_path: str) -> None:
response = requests.get(url)
if response.status_code == 200:
with open(save_path, "wb") as f:
f.write(response.content)
else:
raise ValueError("Error while downloading wav file")
def _parse_response(self, response: list) -> str:
result = response[0]
if self.return_type == "url":
return result["audio_resource_url"]
else:
self._download_wav(result["audio_resource_url"], "audio.wav")
return "audio.wav"
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
all_params = {
"text": query,
"language": self.language,
"option": self.voice,
"return_type": self.return_type,
"rate": self.rate,
"pitch": self.pitch,
"volume": self.volume,
"audio_format": self.audio_format,
"sampling_rate": self.sampling_rate,
"settings": self.voice_models,
}
# filter so we don't send val to api when val is `None
query_params = {k: v for k, v in all_params.items() if v is not None}
return self._call_eden_ai(query_params)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/edenai/edenai_base_tool.py | from __future__ import annotations
import logging
from abc import abstractmethod
from typing import Any, Dict, List, Optional
import requests
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_core.utils import secret_from_env
from pydantic import Field, SecretStr
logger = logging.getLogger(__name__)
class EdenaiTool(BaseTool): # type: ignore[override]
"""
the base tool for all the EdenAI Tools .
you should have
the environment variable ``EDENAI_API_KEY`` set with your API token.
You can find your token here: https://app.edenai.run/admin/account/settings
"""
feature: str
subfeature: str
edenai_api_key: Optional[SecretStr] = Field(
default_factory=secret_from_env("EDENAI_API_KEY", default=None)
)
is_async: bool = False
providers: List[str]
"""provider to use for the API call."""
@staticmethod
def get_user_agent() -> str:
from langchain_community import __version__
return f"langchain/{__version__}"
def _call_eden_ai(self, query_params: Dict[str, Any]) -> str:
"""
Make an API call to the EdenAI service with the specified query parameters.
Args:
query_params (dict): The parameters to include in the API call.
Returns:
requests.Response: The response from the EdenAI API call.
"""
api_key = self.edenai_api_key.get_secret_value() if self.edenai_api_key else ""
headers = {
"Authorization": f"Bearer {api_key}",
"User-Agent": self.get_user_agent(),
}
url = f"https://api.edenai.run/v2/{self.feature}/{self.subfeature}"
payload = {
"providers": str(self.providers),
"response_as_dict": False,
"attributes_as_list": True,
"show_original_response": False,
}
payload.update(query_params)
response = requests.post(url, json=payload, headers=headers)
self._raise_on_error(response)
try:
return self._parse_response(response.json())
except Exception as e:
raise RuntimeError(f"An error occurred while running tool: {e}")
def _raise_on_error(self, response: requests.Response) -> None:
if response.status_code >= 500:
raise Exception(f"EdenAI Server: Error {response.status_code}")
elif response.status_code >= 400:
raise ValueError(f"EdenAI received an invalid payload: {response.text}")
elif response.status_code != 200:
raise Exception(
f"EdenAI returned an unexpected response with status "
f"{response.status_code}: {response.text}"
)
# case where edenai call succeeded but provider returned an error
# (eg: rate limit, server error, etc.)
if self.is_async is False:
# async call are different and only return a job_id,
# not the provider response directly
provider_response = response.json()[0]
if provider_response.get("status") == "fail":
err_msg = provider_response["error"]["message"]
raise ValueError(err_msg)
@abstractmethod
def _run(
self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
pass
@abstractmethod
def _parse_response(self, response: Any) -> str:
"""Take a dict response and condense it's data in a human readable string"""
pass
def _get_edenai(self, url: str) -> requests.Response:
headers = {
"accept": "application/json",
"authorization": f"Bearer {self.edenai_api_key}",
"User-Agent": self.get_user_agent(),
}
response = requests.get(url, headers=headers)
self._raise_on_error(response)
return response
def _parse_json_multilevel(
self, extracted_data: dict, formatted_list: list, level: int = 0
) -> None:
for section, subsections in extracted_data.items():
indentation = " " * level
if isinstance(subsections, str):
subsections = subsections.replace("\n", ",")
formatted_list.append(f"{indentation}{section} : {subsections}")
elif isinstance(subsections, list):
formatted_list.append(f"{indentation}{section} : ")
self._list_handling(subsections, formatted_list, level + 1)
elif isinstance(subsections, dict):
formatted_list.append(f"{indentation}{section} : ")
self._parse_json_multilevel(subsections, formatted_list, level + 1)
def _list_handling(
self, subsection_list: list, formatted_list: list, level: int
) -> None:
for list_item in subsection_list:
if isinstance(list_item, dict):
self._parse_json_multilevel(list_item, formatted_list, level)
elif isinstance(list_item, list):
self._list_handling(list_item, formatted_list, level + 1)
else:
formatted_list.append(f"{' ' * level}{list_item}")
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/edenai/ocr_identityparser.py | from __future__ import annotations
import logging
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field, HttpUrl
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
logger = logging.getLogger(__name__)
class IDParsingInput(BaseModel):
query: HttpUrl = Field(description="url of the document to parse")
class EdenAiParsingIDTool(EdenaiTool): # type: ignore[override, override, override]
"""Tool that queries the Eden AI Identity parsing API.
for api reference check edenai documentation:
https://docs.edenai.co/reference/ocr_identity_parser_create.
To use, you should have
the environment variable ``EDENAI_API_KEY`` set with your API token.
You can find your token here: https://app.edenai.run/admin/account/settings
"""
name: str = "edenai_identity_parsing"
description: str = (
"A wrapper around edenai Services Identity parsing. "
"Useful for when you have to extract information from an ID Document "
"Input should be the string url of the document to parse."
)
args_schema: Type[BaseModel] = IDParsingInput
feature: str = "ocr"
subfeature: str = "identity_parser"
language: Optional[str] = None
"""
language of the text passed to the model.
"""
def _parse_response(self, response: list) -> str:
formatted_list: list = []
if len(response) == 1:
self._parse_json_multilevel(
response[0]["extracted_data"][0], formatted_list
)
else:
for entry in response:
if entry.get("provider") == "eden-ai":
self._parse_json_multilevel(
entry["extracted_data"][0], formatted_list
)
return "\n".join(formatted_list)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
query_params = {
"file_url": query,
"language": self.language,
"attributes_as_list": False,
}
return self._call_eden_ai(query_params)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/edenai/image_explicitcontent.py | from __future__ import annotations
import logging
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field, HttpUrl
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
logger = logging.getLogger(__name__)
class ExplicitImageInput(BaseModel):
query: HttpUrl = Field(description="url of the image to analyze")
class EdenAiExplicitImageTool(EdenaiTool): # type: ignore[override, override, override]
"""Tool that queries the Eden AI Explicit image detection.
for api reference check edenai documentation:
https://docs.edenai.co/reference/image_explicit_content_create.
To use, you should have
the environment variable ``EDENAI_API_KEY`` set with your API token.
You can find your token here: https://app.edenai.run/admin/account/settings
"""
name: str = "edenai_image_explicit_content_detection"
description: str = (
"A wrapper around edenai Services Explicit image detection. "
"""Useful for when you have to extract Explicit Content from images.
it detects adult only content in images,
that is generally inappropriate for people under
the age of 18 and includes nudity, sexual activity,
pornography, violence, gore content, etc."""
"Input should be the string url of the image ."
)
args_schema: Type[BaseModel] = ExplicitImageInput
combine_available: bool = True
feature: str = "image"
subfeature: str = "explicit_content"
def _parse_json(self, json_data: dict) -> str:
result_str = f"nsfw_likelihood: {json_data['nsfw_likelihood']}\n"
for idx, found_obj in enumerate(json_data["items"]):
label = found_obj["label"].lower()
likelihood = found_obj["likelihood"]
result_str += f"{idx}: {label} likelihood {likelihood},\n"
return result_str[:-2]
def _parse_response(self, json_data: list) -> str:
if len(json_data) == 1:
result = self._parse_json(json_data[0])
else:
for entry in json_data:
if entry.get("provider") == "eden-ai":
result = self._parse_json(entry)
return result
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
query_params = {"file_url": query, "attributes_as_list": False}
return self._call_eden_ai(query_params)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/edenai/audio_speech_to_text.py | from __future__ import annotations
import json
import logging
import time
from typing import List, Optional, Type
import requests
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field, HttpUrl, validator
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
logger = logging.getLogger(__name__)
class SpeechToTextInput(BaseModel):
query: HttpUrl = Field(description="url of the audio to analyze")
class EdenAiSpeechToTextTool(EdenaiTool): # type: ignore[override, override, override]
"""Tool that queries the Eden AI Speech To Text API.
for api reference check edenai documentation:
https://app.edenai.run/bricks/speech/asynchronous-speech-to-text.
To use, you should have
the environment variable ``EDENAI_API_KEY`` set with your API token.
You can find your token here: https://app.edenai.run/admin/account/settings
"""
name: str = "edenai_speech_to_text"
description: str = (
"A wrapper around edenai Services speech to text "
"Useful for when you have to convert audio to text."
"Input should be a url to an audio file."
)
args_schema: Type[BaseModel] = SpeechToTextInput
is_async: bool = True
language: Optional[str] = "en"
speakers: Optional[int]
profanity_filter: bool = False
custom_vocabulary: Optional[List[str]]
feature: str = "audio"
subfeature: str = "speech_to_text_async"
base_url: str = "https://api.edenai.run/v2/audio/speech_to_text_async/"
@validator("providers")
def check_only_one_provider_selected(cls, v: List[str]) -> List[str]:
"""
This tool has no feature to combine providers results.
Therefore we only allow one provider
"""
if len(v) > 1:
raise ValueError(
"Please select only one provider. "
"The feature to combine providers results is not available "
"for this tool."
)
return v
def _wait_processing(self, url: str) -> requests.Response:
for _ in range(10):
time.sleep(1)
audio_analysis_result = self._get_edenai(url)
temp = audio_analysis_result.json()
if temp["status"] == "finished":
if temp["results"][self.providers[0]]["error"] is not None:
raise Exception(
f"""EdenAI returned an unexpected response
{temp['results'][self.providers[0]]['error']}"""
)
else:
return audio_analysis_result
raise Exception("Edenai speech to text job id processing Timed out")
def _parse_response(self, response: dict) -> str:
return response["public_id"]
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
all_params = {
"file_url": query,
"language": self.language,
"speakers": self.speakers,
"profanity_filter": self.profanity_filter,
"custom_vocabulary": self.custom_vocabulary,
}
# filter so we don't send val to api when val is `None
query_params = {k: v for k, v in all_params.items() if v is not None}
job_id = self._call_eden_ai(query_params)
url = self.base_url + job_id
audio_analysis_result = self._wait_processing(url)
result = audio_analysis_result.text
formatted_text = json.loads(result)
return formatted_text["results"][self.providers[0]]["text"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/edenai/ocr_invoiceparser.py | from __future__ import annotations
import logging
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field, HttpUrl
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
logger = logging.getLogger(__name__)
class InvoiceParsingInput(BaseModel):
query: HttpUrl = Field(description="url of the document to parse")
class EdenAiParsingInvoiceTool(EdenaiTool): # type: ignore[override, override, override]
"""Tool that queries the Eden AI Invoice parsing API.
for api reference check edenai documentation:
https://docs.edenai.co/reference/ocr_invoice_parser_create.
To use, you should have
the environment variable ``EDENAI_API_KEY`` set with your API token.
You can find your token here: https://app.edenai.run/admin/account/settings
"""
name: str = "edenai_invoice_parsing"
description: str = (
"A wrapper around edenai Services invoice parsing. "
"""Useful for when you have to extract information from
an image it enables to take invoices
in a variety of formats and returns the data in contains
(items, prices, addresses, vendor name, etc.)
in a structured format to automate the invoice processing """
"Input should be the string url of the document to parse."
)
args_schema: Type[BaseModel] = InvoiceParsingInput
language: Optional[str] = None
"""
language of the image passed to the model.
"""
feature: str = "ocr"
subfeature: str = "invoice_parser"
def _parse_response(self, response: list) -> str:
formatted_list: list = []
if len(response) == 1:
self._parse_json_multilevel(
response[0]["extracted_data"][0], formatted_list
)
else:
for entry in response:
if entry.get("provider") == "eden-ai":
self._parse_json_multilevel(
entry["extracted_data"][0], formatted_list
)
return "\n".join(formatted_list)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
query_params = {
"file_url": query,
"language": self.language,
"attributes_as_list": False,
}
return self._call_eden_ai(query_params)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/edenai/image_objectdetection.py | from __future__ import annotations
import logging
from typing import Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field, HttpUrl
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
logger = logging.getLogger(__name__)
class ObjectDetectionInput(BaseModel):
query: HttpUrl = Field(description="url of the image to analyze")
class EdenAiObjectDetectionTool(EdenaiTool): # type: ignore[override, override, override]
"""Tool that queries the Eden AI Object detection API.
for api reference check edenai documentation:
https://docs.edenai.co/reference/image_object_detection_create.
To use, you should have
the environment variable ``EDENAI_API_KEY`` set with your API token.
You can find your token here: https://app.edenai.run/admin/account/settings
"""
name: str = "edenai_object_detection"
description: str = (
"A wrapper around edenai Services Object Detection . "
"""Useful for when you have to do an to identify and locate
(with bounding boxes) objects in an image """
"Input should be the string url of the image to identify."
)
args_schema: Type[BaseModel] = ObjectDetectionInput
show_positions: bool = False
feature: str = "image"
subfeature: str = "object_detection"
def _parse_json(self, json_data: dict) -> str:
result = []
label_info = []
for found_obj in json_data["items"]:
label_str = f"{found_obj['label']} - Confidence {found_obj['confidence']}"
x_min = found_obj.get("x_min")
x_max = found_obj.get("x_max")
y_min = found_obj.get("y_min")
y_max = found_obj.get("y_max")
if self.show_positions and all(
[x_min, x_max, y_min, y_max]
): # some providers don't return positions
label_str += f""",at the position x_min: {x_min}, x_max: {x_max},
y_min: {y_min}, y_max: {y_max}"""
label_info.append(label_str)
result.append("\n".join(label_info))
return "\n\n".join(result)
def _parse_response(self, response: list) -> str:
if len(response) == 1:
result = self._parse_json(response[0])
else:
for entry in response:
if entry.get("provider") == "eden-ai":
result = self._parse_json(entry)
return result
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
query_params = {"file_url": query, "attributes_as_list": False}
return self._call_eden_ai(query_params)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/edenai/__init__.py | """Edenai Tools."""
from langchain_community.tools.edenai.audio_speech_to_text import (
EdenAiSpeechToTextTool,
)
from langchain_community.tools.edenai.audio_text_to_speech import (
EdenAiTextToSpeechTool,
)
from langchain_community.tools.edenai.edenai_base_tool import EdenaiTool
from langchain_community.tools.edenai.image_explicitcontent import (
EdenAiExplicitImageTool,
)
from langchain_community.tools.edenai.image_objectdetection import (
EdenAiObjectDetectionTool,
)
from langchain_community.tools.edenai.ocr_identityparser import (
EdenAiParsingIDTool,
)
from langchain_community.tools.edenai.ocr_invoiceparser import (
EdenAiParsingInvoiceTool,
)
from langchain_community.tools.edenai.text_moderation import (
EdenAiTextModerationTool,
)
__all__ = [
"EdenAiExplicitImageTool",
"EdenAiObjectDetectionTool",
"EdenAiParsingIDTool",
"EdenAiParsingInvoiceTool",
"EdenAiTextToSpeechTool",
"EdenAiSpeechToTextTool",
"EdenAiTextModerationTool",
"EdenaiTool",
]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/memorize/tool.py | from abc import abstractmethod
from typing import Any, Optional, Protocol, Sequence, runtime_checkable
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.llms.gradient_ai import TrainResult
@runtime_checkable
class TrainableLLM(Protocol):
"""Protocol for trainable language models."""
@abstractmethod
def train_unsupervised(
self,
inputs: Sequence[str],
**kwargs: Any,
) -> TrainResult: ...
@abstractmethod
async def atrain_unsupervised(
self,
inputs: Sequence[str],
**kwargs: Any,
) -> TrainResult: ...
class Memorize(BaseTool): # type: ignore[override]
"""Tool that trains a language model."""
name: str = "memorize"
description: str = (
"Useful whenever you observed novel information "
"from previous conversation history, "
"i.e., another tool's action outputs or human comments. "
"The action input should include observed information in detail, "
"then the tool will fine-tune yourself to remember it."
)
llm: TrainableLLM = Field()
def _run(
self,
information_to_learn: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
train_result = self.llm.train_unsupervised((information_to_learn,))
return f"Train complete. Loss: {train_result['loss']}"
async def _arun(
self,
information_to_learn: str,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
) -> str:
train_result = await self.llm.atrain_unsupervised((information_to_learn,))
return f"Train complete. Loss: {train_result['loss']}"
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/memorize/__init__.py | """Unsupervised learning based memorization."""
from langchain_community.tools.memorize.tool import Memorize
__all__ = ["Memorize"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/cassandra_database/tool.py | """Tools for interacting with an Apache Cassandra database."""
from __future__ import annotations
import traceback
from typing import TYPE_CHECKING, Any, Dict, Optional, Sequence, Type, Union
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import BaseModel, ConfigDict, Field
from langchain_community.utilities.cassandra_database import CassandraDatabase
if TYPE_CHECKING:
from cassandra.cluster import ResultSet
class BaseCassandraDatabaseTool(BaseModel):
"""Base tool for interacting with an Apache Cassandra database."""
db: CassandraDatabase = Field(exclude=True)
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
class _QueryCassandraDatabaseToolInput(BaseModel):
query: str = Field(..., description="A detailed and correct CQL query.")
class QueryCassandraDatabaseTool(BaseCassandraDatabaseTool, BaseTool): # type: ignore[override, override]
"""Tool for querying an Apache Cassandra database with provided CQL."""
name: str = "cassandra_db_query"
description: str = """
Execute a CQL query against the database and get back the result.
If the query is not correct, an error message will be returned.
If an error is returned, rewrite the query, check the query, and try again.
"""
args_schema: Type[BaseModel] = _QueryCassandraDatabaseToolInput
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> Union[str, Sequence[Dict[str, Any]], ResultSet]:
"""Execute the query, return the results or an error message."""
try:
return self.db.run(query)
except Exception as e:
"""Format the error message"""
return f"Error: {e}\n{traceback.format_exc()}"
class _GetSchemaCassandraDatabaseToolInput(BaseModel):
keyspace: str = Field(
...,
description=("The name of the keyspace for which to return the schema."),
)
class GetSchemaCassandraDatabaseTool(BaseCassandraDatabaseTool, BaseTool): # type: ignore[override, override]
"""Tool for getting the schema of a keyspace in an Apache Cassandra database."""
name: str = "cassandra_db_schema"
description: str = """
Input to this tool is a keyspace name, output is a table description
of Apache Cassandra tables.
If the query is not correct, an error message will be returned.
If an error is returned, report back to the user that the keyspace
doesn't exist and stop.
"""
args_schema: Type[BaseModel] = _GetSchemaCassandraDatabaseToolInput
def _run(
self,
keyspace: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Get the schema for a keyspace."""
try:
tables = self.db.get_keyspace_tables(keyspace)
return "".join([table.as_markdown() + "\n\n" for table in tables])
except Exception as e:
"""Format the error message"""
return f"Error: {e}\n{traceback.format_exc()}"
class _GetTableDataCassandraDatabaseToolInput(BaseModel):
keyspace: str = Field(
...,
description=("The name of the keyspace containing the table."),
)
table: str = Field(
...,
description=("The name of the table for which to return data."),
)
predicate: str = Field(
...,
description=("The predicate for the query that uses the primary key."),
)
limit: int = Field(
...,
description=("The maximum number of rows to return."),
)
class GetTableDataCassandraDatabaseTool(BaseCassandraDatabaseTool, BaseTool): # type: ignore[override, override]
"""
Tool for getting data from a table in an Apache Cassandra database.
Use the WHERE clause to specify the predicate for the query that uses the
primary key. A blank predicate will return all rows. Avoid this if possible.
Use the limit to specify the number of rows to return. A blank limit will
return all rows.
"""
name: str = "cassandra_db_select_table_data"
description: str = """
Tool for getting data from a table in an Apache Cassandra database.
Use the WHERE clause to specify the predicate for the query that uses the
primary key. A blank predicate will return all rows. Avoid this if possible.
Use the limit to specify the number of rows to return. A blank limit will
return all rows.
"""
args_schema: Type[BaseModel] = _GetTableDataCassandraDatabaseToolInput
def _run(
self,
keyspace: str,
table: str,
predicate: str,
limit: int,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Get data from a table in a keyspace."""
try:
return self.db.get_table_data(keyspace, table, predicate, limit)
except Exception as e:
"""Format the error message"""
return f"Error: {e}\n{traceback.format_exc()}"
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/cassandra_database/__init__.py | """Cassandra Tool"""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/cassandra_database/prompt.py | """Tools for interacting with an Apache Cassandra database."""
QUERY_PATH_PROMPT = """"
You are an Apache Cassandra expert query analysis bot with the following features
and rules:
- You will take a question from the end user about finding certain
data in the database.
- You will examine the schema of the database and create a query path.
- You will provide the user with the correct query to find the data they are looking
for showing the steps provided by the query path.
- You will use best practices for querying Apache Cassandra using partition keys
and clustering columns.
- Avoid using ALLOW FILTERING in the query.
- The goal is to find a query path, so it may take querying other tables to get
to the final answer.
The following is an example of a query path in JSON format:
{
"query_paths": [
{
"description": "Direct query to users table using email",
"steps": [
{
"table": "user_credentials",
"query":
"SELECT userid FROM user_credentials WHERE email = 'example@example.com';"
},
{
"table": "users",
"query": "SELECT * FROM users WHERE userid = ?;"
}
]
}
]
}"""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/wikidata/tool.py | """Tool for the Wikidata API."""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.wikidata import WikidataAPIWrapper
class WikidataQueryRun(BaseTool): # type: ignore[override]
"""Tool that searches the Wikidata API."""
name: str = "Wikidata"
description: str = (
"A wrapper around Wikidata. "
"Useful for when you need to answer general questions about "
"people, places, companies, facts, historical events, or other subjects. "
"Input should be the exact name of the item you want information about "
"or a Wikidata QID."
)
api_wrapper: WikidataAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Wikidata tool."""
return self.api_wrapper.run(query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/wikidata/__init__.py | """Wikidata API toolkit."""
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/google_trends/tool.py | """Tool for the Google Trends"""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.google_trends import GoogleTrendsAPIWrapper
class GoogleTrendsQueryRun(BaseTool): # type: ignore[override]
"""Tool that queries the Google trends API."""
name: str = "google_trends"
description: str = (
"A wrapper around Google Trends Search. "
"Useful for when you need to get information about"
"google search trends from Google Trends"
"Input should be a search query."
)
api_wrapper: GoogleTrendsAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
return self.api_wrapper.run(query)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/google_trends/__init__.py | """Google Trends API Toolkit."""
from langchain_community.tools.google_trends.tool import GoogleTrendsQueryRun
__all__ = ["GoogleTrendsQueryRun"]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/amadeus/base.py | """Base class for Amadeus tools."""
from __future__ import annotations
from typing import TYPE_CHECKING
from langchain_core.tools import BaseTool
from pydantic import Field
from langchain_community.tools.amadeus.utils import authenticate
if TYPE_CHECKING:
from amadeus import Client
class AmadeusBaseTool(BaseTool): # type: ignore[override]
"""Base Tool for Amadeus."""
client: Client = Field(default_factory=authenticate)
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/amadeus/flight_search.py | import logging
from datetime import datetime as dt
from typing import Dict, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from pydantic import BaseModel, Field
from langchain_community.tools.amadeus.base import AmadeusBaseTool
logger = logging.getLogger(__name__)
class FlightSearchSchema(BaseModel):
"""Schema for the AmadeusFlightSearch tool."""
originLocationCode: str = Field(
description=(
" The three letter International Air Transport "
" Association (IATA) Location Identifier for the "
" search's origin airport. "
)
)
destinationLocationCode: str = Field(
description=(
" The three letter International Air Transport "
" Association (IATA) Location Identifier for the "
" search's destination airport. "
)
)
departureDateTimeEarliest: str = Field(
description=(
" The earliest departure datetime from the origin airport "
" for the flight search in the following format: "
' "YYYY-MM-DDTHH:MM:SS", where "T" separates the date and time '
' components. For example: "2023-06-09T10:30:00" represents '
" June 9th, 2023, at 10:30 AM. "
)
)
departureDateTimeLatest: str = Field(
description=(
" The latest departure datetime from the origin airport "
" for the flight search in the following format: "
' "YYYY-MM-DDTHH:MM:SS", where "T" separates the date and time '
' components. For example: "2023-06-09T10:30:00" represents '
" June 9th, 2023, at 10:30 AM. "
)
)
page_number: int = Field(
default=1,
description="The specific page number of flight results to retrieve",
)
class AmadeusFlightSearch(AmadeusBaseTool): # type: ignore[override, override]
"""Tool for searching for a single flight between two airports."""
name: str = "single_flight_search"
description: str = (
" Use this tool to search for a single flight between the origin and "
" destination airports at a departure between an earliest and "
" latest datetime. "
)
args_schema: Type[FlightSearchSchema] = FlightSearchSchema
def _run(
self,
originLocationCode: str,
destinationLocationCode: str,
departureDateTimeEarliest: str,
departureDateTimeLatest: str,
page_number: int = 1,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> list:
try:
from amadeus import ResponseError
except ImportError as e:
raise ImportError(
"Unable to import amadeus, please install with `pip install amadeus`."
) from e
RESULTS_PER_PAGE = 10
# Authenticate and retrieve a client
client = self.client
# Check that earliest and latest dates are in the same day
earliestDeparture = dt.strptime(departureDateTimeEarliest, "%Y-%m-%dT%H:%M:%S")
latestDeparture = dt.strptime(departureDateTimeLatest, "%Y-%m-%dT%H:%M:%S")
if earliestDeparture.date() != latestDeparture.date():
logger.error(
" Error: Earliest and latest departure dates need to be the "
" same date. If you're trying to search for round-trip "
" flights, call this function for the outbound flight first, "
" and then call again for the return flight. "
)
return [None]
# Collect all results from the Amadeus Flight Offers Search API
response = None
try:
response = client.shopping.flight_offers_search.get(
originLocationCode=originLocationCode,
destinationLocationCode=destinationLocationCode,
departureDate=latestDeparture.strftime("%Y-%m-%d"),
adults=1,
)
except ResponseError as error:
print(error) # noqa: T201
# Generate output dictionary
output = []
if response is not None:
for offer in response.data:
itinerary: Dict = {}
itinerary["price"] = {}
itinerary["price"]["total"] = offer["price"]["total"]
currency = offer["price"]["currency"]
currency = response.result["dictionaries"]["currencies"][currency]
itinerary["price"]["currency"] = {}
itinerary["price"]["currency"] = currency
segments = []
for segment in offer["itineraries"][0]["segments"]:
flight = {}
flight["departure"] = segment["departure"]
flight["arrival"] = segment["arrival"]
flight["flightNumber"] = segment["number"]
carrier = segment["carrierCode"]
carrier = response.result["dictionaries"]["carriers"][carrier]
flight["carrier"] = carrier
segments.append(flight)
itinerary["segments"] = []
itinerary["segments"] = segments
output.append(itinerary)
# Filter out flights after latest departure time
for index, offer in enumerate(output):
offerDeparture = dt.strptime(
offer["segments"][0]["departure"]["at"], "%Y-%m-%dT%H:%M:%S"
)
if offerDeparture > latestDeparture:
output.pop(index)
# Return the paginated results
startIndex = (page_number - 1) * RESULTS_PER_PAGE
endIndex = startIndex + RESULTS_PER_PAGE
return output[startIndex:endIndex]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/amadeus/closest_airport.py | from typing import Any, Dict, Optional, Type
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.language_models import BaseLanguageModel
from pydantic import BaseModel, Field, model_validator
from langchain_community.chat_models import ChatOpenAI
from langchain_community.tools.amadeus.base import AmadeusBaseTool
class ClosestAirportSchema(BaseModel):
"""Schema for the AmadeusClosestAirport tool."""
location: str = Field(
description=(
" The location for which you would like to find the nearest airport "
" along with optional details such as country, state, region, or "
" province, allowing for easy processing and identification of "
" the closest airport. Examples of the format are the following:\n"
" Cali, Colombia\n "
" Lincoln, Nebraska, United States\n"
" New York, United States\n"
" Sydney, New South Wales, Australia\n"
" Rome, Lazio, Italy\n"
" Toronto, Ontario, Canada\n"
)
)
class AmadeusClosestAirport(AmadeusBaseTool): # type: ignore[override, override, override]
"""Tool for finding the closest airport to a particular location."""
name: str = "closest_airport"
description: str = (
"Use this tool to find the closest airport to a particular location."
)
args_schema: Type[ClosestAirportSchema] = ClosestAirportSchema
llm: Optional[BaseLanguageModel] = Field(default=None)
"""Tool's llm used for calculating the closest airport. Defaults to `ChatOpenAI`."""
@model_validator(mode="before")
@classmethod
def set_llm(cls, values: Dict[str, Any]) -> Any:
if not values.get("llm"):
# For backward-compatibility
values["llm"] = ChatOpenAI(temperature=0)
return values
def _run(
self,
location: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
content = (
f" What is the nearest airport to {location}? Please respond with the "
" airport's International Air Transport Association (IATA) Location "
' Identifier in the following JSON format. JSON: "iataCode": "IATA '
' Location Identifier" '
)
return self.llm.invoke(content) # type: ignore[union-attr]
|
0 | lc_public_repos/langchain/libs/community/langchain_community/tools | lc_public_repos/langchain/libs/community/langchain_community/tools/amadeus/utils.py | """O365 tool utils."""
from __future__ import annotations
import logging
import os
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from amadeus import Client
logger = logging.getLogger(__name__)
def authenticate() -> Client:
"""Authenticate using the Amadeus API"""
try:
from amadeus import Client
except ImportError as e:
raise ImportError(
"Cannot import amadeus. Please install the package with "
"`pip install amadeus`."
) from e
if "AMADEUS_CLIENT_ID" in os.environ and "AMADEUS_CLIENT_SECRET" in os.environ:
client_id = os.environ["AMADEUS_CLIENT_ID"]
client_secret = os.environ["AMADEUS_CLIENT_SECRET"]
else:
logger.error(
"Error: The AMADEUS_CLIENT_ID and AMADEUS_CLIENT_SECRET environmental "
"variables have not been set. Visit the following link on how to "
"acquire these authorization tokens: "
"https://developers.amadeus.com/register"
)
return None
hostname = "test" # Default hostname
if "AMADEUS_HOSTNAME" in os.environ:
hostname = os.environ["AMADEUS_HOSTNAME"]
client = Client(client_id=client_id, client_secret=client_secret, hostname=hostname)
return client
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.