sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/llamaindex_tool/llamaindex_tool.py | from __future__ import annotations
from typing import Any, cast
from crewai.tools import BaseTool
from pydantic import BaseModel, Field
class LlamaIndexTool(BaseTool):
"""Tool to wrap LlamaIndex tools/query engines."""
llama_index_tool: Any
def _run(
self,
*args: Any,
**kwargs: Any,
) -> Any:
"""Run tool."""
from llama_index.core.tools import ( # type: ignore[import-not-found]
BaseTool as LlamaBaseTool,
)
tool = cast(LlamaBaseTool, self.llama_index_tool)
if self.result_as_answer:
return tool(*args, **kwargs).content
return tool(*args, **kwargs)
@classmethod
def from_tool(cls, tool: Any, **kwargs: Any) -> LlamaIndexTool:
from llama_index.core.tools import ( # type: ignore[import-not-found]
BaseTool as LlamaBaseTool,
)
if not isinstance(tool, LlamaBaseTool):
raise ValueError(f"Expected a LlamaBaseTool, got {type(tool)}")
tool = cast(LlamaBaseTool, tool)
if tool.metadata.fn_schema is None:
raise ValueError(
"The LlamaIndex tool does not have an fn_schema specified."
)
args_schema = cast(type[BaseModel], tool.metadata.fn_schema)
return cls(
name=tool.metadata.name,
description=tool.metadata.description,
args_schema=args_schema,
llama_index_tool=tool,
**kwargs,
)
@classmethod
def from_query_engine(
cls,
query_engine: Any,
name: str | None = None,
description: str | None = None,
return_direct: bool = False,
**kwargs: Any,
) -> LlamaIndexTool:
from llama_index.core.query_engine import ( # type: ignore[import-not-found]
BaseQueryEngine,
)
from llama_index.core.tools import ( # type: ignore[import-not-found]
QueryEngineTool,
)
if not isinstance(query_engine, BaseQueryEngine):
raise ValueError(f"Expected a BaseQueryEngine, got {type(query_engine)}")
# NOTE: by default the schema expects an `input` variable. However this
# confuses crewAI so we are renaming to `query`.
class QueryToolSchema(BaseModel):
"""Schema for query tool."""
query: str = Field(..., description="Search query for the query tool.")
# NOTE: setting `resolve_input_errors` to True is important because the schema expects `input` but we are using `query`
query_engine_tool = QueryEngineTool.from_defaults(
query_engine,
name=name,
description=description,
return_direct=return_direct,
resolve_input_errors=True,
)
# HACK: we are replacing the schema with our custom schema
query_engine_tool.metadata.fn_schema = QueryToolSchema
return cls.from_tool(query_engine_tool, **kwargs)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/llamaindex_tool/llamaindex_tool.py",
"license": "MIT License",
"lines": 73,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/mdx_search_tool/mdx_search_tool.py | from pydantic import BaseModel, Field
from crewai_tools.rag.data_types import DataType
from crewai_tools.tools.rag.rag_tool import RagTool
class FixedMDXSearchToolSchema(BaseModel):
"""Input for MDXSearchTool."""
search_query: str = Field(
...,
description="Mandatory search query you want to use to search the MDX's content",
)
class MDXSearchToolSchema(FixedMDXSearchToolSchema):
"""Input for MDXSearchTool."""
mdx: str = Field(..., description="File path or URL of a MDX file to be searched")
class MDXSearchTool(RagTool):
name: str = "Search a MDX's content"
description: str = (
"A tool that can be used to semantic search a query from a MDX's content."
)
args_schema: type[BaseModel] = MDXSearchToolSchema
def __init__(self, mdx: str | None = None, **kwargs):
super().__init__(**kwargs)
if mdx is not None:
self.add(mdx)
self.description = f"A tool that can be used to semantic search a query the {mdx} MDX's content."
self.args_schema = FixedMDXSearchToolSchema
self._generate_description()
def add(self, mdx: str) -> None:
super().add(mdx, data_type=DataType.MDX)
def _run( # type: ignore[override]
self,
search_query: str,
mdx: str | None = None,
similarity_threshold: float | None = None,
limit: int | None = None,
) -> str:
if mdx is not None:
self.add(mdx)
return super()._run(
query=search_query, similarity_threshold=similarity_threshold, limit=limit
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/mdx_search_tool/mdx_search_tool.py",
"license": "MIT License",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/mongodb_vector_search_tool/utils.py | from __future__ import annotations
from collections.abc import Callable
from time import monotonic, sleep
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from pymongo.collection import Collection
def _vector_search_index_definition(
dimensions: int,
path: str,
similarity: str,
filters: list[str] | None = None,
**kwargs: Any,
) -> dict[str, Any]:
# https://www.mongodb.com/docs/atlas/atlas-vector-search/vector-search-type/
fields = [
{
"numDimensions": dimensions,
"path": path,
"similarity": similarity,
"type": "vector",
},
]
if filters:
for field in filters:
fields.append({"type": "filter", "path": field}) # noqa: PERF401
definition = {"fields": fields}
definition.update(kwargs)
return definition
def create_vector_search_index(
collection: Collection,
index_name: str,
dimensions: int,
path: str,
similarity: str,
filters: list[str] | None = None,
*,
wait_until_complete: float | None = None,
**kwargs: Any,
) -> None:
"""Experimental Utility function to create a vector search index.
Args:
collection (Collection): MongoDB Collection
index_name (str): Name of Index
dimensions (int): Number of dimensions in embedding
path (str): field with vector embedding
similarity (str): The similarity score used for the index
filters (List[str]): Fields/paths to index to allow filtering in $vectorSearch
wait_until_complete (Optional[float]): If provided, number of seconds to wait
until search index is ready.
kwargs: Keyword arguments supplying any additional options to SearchIndexModel.
"""
from pymongo.operations import SearchIndexModel
if collection.name not in collection.database.list_collection_names():
collection.database.create_collection(collection.name)
collection.create_search_index(
SearchIndexModel(
definition=_vector_search_index_definition(
dimensions=dimensions,
path=path,
similarity=similarity,
filters=filters,
**kwargs,
),
name=index_name,
type="vectorSearch",
)
)
if wait_until_complete:
_wait_for_predicate(
predicate=lambda: _is_index_ready(collection, index_name),
err=f"{index_name=} did not complete in {wait_until_complete}!",
timeout=wait_until_complete,
)
def _is_index_ready(collection: Collection, index_name: str) -> bool:
"""Check for the index name in the list of available search indexes to see if the
specified index is of status READY.
Args:
collection (Collection): MongoDB Collection to for the search indexes
index_name (str): Vector Search Index name
Returns:
bool : True if the index is present and READY false otherwise
"""
for index in collection.list_search_indexes(index_name):
if index["status"] == "READY":
return True
return False
def _wait_for_predicate(
predicate: Callable, err: str, timeout: float = 120, interval: float = 0.5
) -> None:
"""Generic to block until the predicate returns true.
Args:
predicate (Callable[, bool]): A function that returns a boolean value
err (str): Error message to raise if nothing occurs
timeout (float, optional): Wait time for predicate. Defaults to TIMEOUT.
interval (float, optional): Interval to check predicate. Defaults to DELAY.
Raises:
TimeoutError: _description_
"""
start = monotonic()
while not predicate():
if monotonic() - start > timeout:
raise TimeoutError(err)
sleep(interval)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/mongodb_vector_search_tool/utils.py",
"license": "MIT License",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/mongodb_vector_search_tool/vector_search.py | from collections.abc import Iterable
from importlib.metadata import version
from logging import getLogger
import os
from typing import Any
from crewai.tools import BaseTool, EnvVar
from openai import AzureOpenAI, Client
from pydantic import BaseModel, Field
from crewai_tools.tools.mongodb_vector_search_tool.utils import (
create_vector_search_index,
)
try:
import pymongo # noqa: F401
MONGODB_AVAILABLE = True
except ImportError:
MONGODB_AVAILABLE = False
logger = getLogger(__name__)
class MongoDBVectorSearchConfig(BaseModel):
"""Configuration for MongoDB vector search queries."""
limit: int | None = Field(default=4, description="number of documents to return.")
pre_filter: dict[str, Any] | None = Field(
default=None,
description="List of MQL match expressions comparing an indexed field",
)
post_filter_pipeline: list[dict] | None = Field(
default=None,
description="Pipeline of MongoDB aggregation stages to filter/process results after $vectorSearch.",
)
oversampling_factor: int = Field(
default=10,
description="Multiple of limit used when generating number of candidates at each step in the HNSW Vector Search",
)
include_embeddings: bool = Field(
default=False,
description="Whether to include the embedding vector of each result in metadata.",
)
class MongoDBToolSchema(BaseModel):
"""Input for MongoDBTool."""
query: str = Field(
...,
description="The query to search retrieve relevant information from the MongoDB database. Pass only the query, not the question.",
)
class MongoDBVectorSearchTool(BaseTool):
"""Tool to perfrom a vector search the MongoDB database."""
name: str = "MongoDBVectorSearchTool"
description: str = "A tool to perfrom a vector search on a MongoDB database for relevant information on internal documents."
args_schema: type[BaseModel] = MongoDBToolSchema
query_config: MongoDBVectorSearchConfig | None = Field(
default=None, description="MongoDB Vector Search query configuration"
)
embedding_model: str = Field(
default="text-embedding-3-large",
description="Text OpenAI embedding model to use",
)
vector_index_name: str = Field(
default="vector_index", description="Name of the Atlas Search vector index"
)
text_key: str = Field(
default="text",
description="MongoDB field that will contain the text for each document",
)
embedding_key: str = Field(
default="embedding",
description="Field that will contain the embedding for each document",
)
database_name: str = Field(..., description="The name of the MongoDB database")
collection_name: str = Field(..., description="The name of the MongoDB collection")
connection_string: str = Field(
...,
description="The connection string of the MongoDB cluster",
)
dimensions: int = Field(
default=1536,
description="Number of dimensions in the embedding vector",
)
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="BROWSERBASE_API_KEY",
description="API key for Browserbase services",
required=False,
),
EnvVar(
name="BROWSERBASE_PROJECT_ID",
description="Project ID for Browserbase services",
required=False,
),
]
)
package_dependencies: list[str] = Field(default_factory=lambda: ["mongdb"])
def __init__(self, **kwargs):
super().__init__(**kwargs)
if not MONGODB_AVAILABLE:
import click
if click.confirm(
"You are missing the 'mongodb' crewai tool. Would you like to install it?"
):
import subprocess
subprocess.run(["uv", "add", "pymongo"], check=True) # noqa: S607
else:
raise ImportError("You are missing the 'mongodb' crewai tool.")
if "AZURE_OPENAI_ENDPOINT" in os.environ:
self._openai_client = AzureOpenAI()
elif "OPENAI_API_KEY" in os.environ:
self._openai_client = Client()
else:
raise ValueError(
"OPENAI_API_KEY environment variable is required for MongoDBVectorSearchTool and it is mandatory to use the tool."
)
from pymongo import MongoClient
from pymongo.driver_info import DriverInfo
self._client = MongoClient(
self.connection_string,
driver=DriverInfo(name="CrewAI", version=version("crewai-tools")),
)
self._coll = self._client[self.database_name][self.collection_name]
def create_vector_search_index(
self,
*,
dimensions: int,
relevance_score_fn: str = "cosine",
auto_index_timeout: int = 15,
) -> None:
"""Convenience function to create a vector search index.
Args:
dimensions: Number of dimensions in embedding. If the value is set and
the index does not exist, an index will be created.
relevance_score_fn: The similarity score used for the index
Currently supported: 'euclidean', 'cosine', and 'dotProduct'
auto_index_timeout: Timeout in seconds to wait for an auto-created index
to be ready.
"""
create_vector_search_index(
collection=self._coll,
index_name=self.vector_index_name,
dimensions=dimensions,
path=self.embedding_key,
similarity=relevance_score_fn,
wait_until_complete=auto_index_timeout,
)
def add_texts(
self,
texts: Iterable[str],
metadatas: list[dict[str, Any]] | None = None,
ids: list[str] | None = None,
batch_size: int = 100,
**kwargs: Any,
) -> list[str]:
"""Add texts, create embeddings, and add to the Collection and index.
Important notes on ids:
- If _id or id is a key in the metadatas dicts, one must
pop them and provide as separate list.
- They must be unique.
- If they are not provided, the VectorStore will create unique ones,
stored as bson.ObjectIds internally, and strings in Langchain.
These will appear in Document.metadata with key, '_id'.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of unique ids that will be used as index in VectorStore.
See note on ids.
batch_size: Number of documents to insert at a time.
Tuning this may help with performance and sidestep MongoDB limits.
Returns:
List of ids added to the vectorstore.
"""
from bson import ObjectId
_metadatas = metadatas or [{} for _ in texts]
ids = [str(ObjectId()) for _ in range(len(list(texts)))]
result_ids = []
texts_batch = []
metadatas_batch = []
size = 0
i = 0
for j, (text, metadata) in enumerate(zip(texts, _metadatas, strict=False)):
size += len(text) + len(metadata)
texts_batch.append(text)
metadatas_batch.append(metadata)
if (j + 1) % batch_size == 0 or size >= 47_000_000:
batch_res = self._bulk_embed_and_insert_texts(
texts_batch, metadatas_batch, ids[i : j + 1]
)
result_ids.extend(batch_res)
texts_batch = []
metadatas_batch = []
size = 0
i = j + 1
if texts_batch:
batch_res = self._bulk_embed_and_insert_texts(
texts_batch, metadatas_batch, ids[i : j + 1]
)
result_ids.extend(batch_res)
return result_ids
def _embed_texts(self, texts: list[str]) -> list[list[float]]:
return [
i.embedding
for i in self._openai_client.embeddings.create(
input=texts,
model=self.embedding_model,
dimensions=self.dimensions,
).data
]
def _bulk_embed_and_insert_texts(
self,
texts: list[str],
metadatas: list[dict],
ids: list[str],
) -> list[str]:
"""Bulk insert single batch of texts, embeddings, and ids."""
from bson import ObjectId
from pymongo.operations import ReplaceOne
if not texts:
return []
# Compute embedding vectors
embeddings = self._embed_texts(texts)
docs = [
{
"_id": ObjectId(i),
self.text_key: t,
self.embedding_key: embedding,
**m,
}
for i, t, m, embedding in zip(
ids, texts, metadatas, embeddings, strict=False
)
]
operations = [ReplaceOne({"_id": doc["_id"]}, doc, upsert=True) for doc in docs]
# insert the documents in MongoDB Atlas
result = self._coll.bulk_write(operations)
if result.upserted_ids is None:
raise ValueError("No documents were inserted.")
return [str(_id) for _id in result.upserted_ids.values()]
def _run(self, query: str) -> str:
from bson import json_util
try:
query_config = self.query_config or MongoDBVectorSearchConfig()
limit = query_config.limit
oversampling_factor = query_config.oversampling_factor
pre_filter = query_config.pre_filter
include_embeddings = query_config.include_embeddings
post_filter_pipeline = query_config.post_filter_pipeline
# Create the embedding for the query
query_vector = self._embed_texts([query])[0]
# Atlas Vector Search, potentially with filter
stage = {
"index": self.vector_index_name,
"path": self.embedding_key,
"queryVector": query_vector,
"numCandidates": limit * oversampling_factor, # type: ignore[operator]
"limit": limit,
}
if pre_filter:
stage["filter"] = pre_filter
pipeline = [
{"$vectorSearch": stage},
{"$set": {"score": {"$meta": "vectorSearchScore"}}},
]
# Remove embeddings unless requested
if not include_embeddings:
pipeline.append({"$project": {self.embedding_key: 0}})
# Post-processing
if post_filter_pipeline is not None:
pipeline.extend(post_filter_pipeline)
# Execution
cursor = self._coll.aggregate(pipeline) # type: ignore[arg-type]
docs = []
# Format
for doc in cursor:
docs.append(doc) # noqa: PERF402
return json_util.dumps(docs)
except Exception as e:
logger.error(f"Error: {e}")
return ""
def __del__(self):
"""Cleanup clients on deletion."""
try:
if hasattr(self, "_client") and self._client:
self._client.close()
except Exception as e:
logger.error(f"Error: {e}")
try:
if hasattr(self, "_openai_client") and self._openai_client:
self._openai_client.close()
except Exception as e:
logger.error(f"Error: {e}")
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/mongodb_vector_search_tool/vector_search.py",
"license": "MIT License",
"lines": 285,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/multion_tool/example.py | import os
from crewai import Agent, Crew, Task
from multion_tool import MultiOnTool # type: ignore[import-not-found]
os.environ["OPENAI_API_KEY"] = "Your Key"
multion_browse_tool = MultiOnTool(api_key="Your Key")
# Create a new agent
Browser = Agent(
role="Browser Agent",
goal="control web browsers using natural language ",
backstory="An expert browsing agent.",
tools=[multion_browse_tool],
verbose=True,
)
# Define tasks
browse = Task(
description="Summarize the top 3 trending AI News headlines",
expected_output="A summary of the top 3 trending AI News headlines",
agent=Browser,
)
crew = Crew(agents=[Browser], tasks=[browse])
crew.kickoff()
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/multion_tool/example.py",
"license": "MIT License",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/multion_tool/multion_tool.py | """Multion tool spec."""
import os
import subprocess
from typing import Any
from crewai.tools import BaseTool, EnvVar
from pydantic import Field
class MultiOnTool(BaseTool):
"""Tool to wrap MultiOn Browse Capabilities."""
name: str = "Multion Browse Tool"
description: str = """Multion gives the ability for LLMs to control web browsers using natural language instructions.
If the status is 'CONTINUE', reissue the same instruction to continue execution
"""
multion: Any | None = None
session_id: str | None = None
local: bool = False
max_steps: int = 3
package_dependencies: list[str] = Field(default_factory=lambda: ["multion"])
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="MULTION_API_KEY", description="API key for Multion", required=True
),
]
)
def __init__(
self,
api_key: str | None = None,
**kwargs,
):
super().__init__(**kwargs)
try:
from multion.client import MultiOn # type: ignore
except ImportError:
import click
if click.confirm(
"You are missing the 'multion' package. Would you like to install it?"
):
subprocess.run(["uv", "add", "multion"], check=True) # noqa: S607
from multion.client import MultiOn
else:
raise ImportError(
"`multion` package not found, please run `uv add multion`"
) from None
self.session_id = None
self.multion = MultiOn(api_key=api_key or os.getenv("MULTION_API_KEY"))
def _run(
self,
cmd: str,
*args: Any,
**kwargs: Any,
) -> str:
"""Run the Multion client with the given command.
Args:
cmd (str): The detailed and specific natural language instructrion for web browsing
*args (Any): Additional arguments to pass to the Multion client
**kwargs (Any): Additional keyword arguments to pass to the Multion client
"""
if self.multion is None:
raise ValueError("Multion client is not initialized.")
browse = self.multion.browse(
cmd=cmd,
session_id=self.session_id,
local=self.local,
max_steps=self.max_steps,
*args, # noqa: B026
**kwargs,
)
self.session_id = browse.session_id
return browse.message + "\n\n STATUS: " + browse.status
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/multion_tool/multion_tool.py",
"license": "MIT License",
"lines": 69,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/mysql_search_tool/mysql_search_tool.py | from typing import Any
from pydantic import BaseModel, Field
from crewai_tools.rag.data_types import DataType
from crewai_tools.tools.rag.rag_tool import RagTool
class MySQLSearchToolSchema(BaseModel):
"""Input for MySQLSearchTool."""
search_query: str = Field(
...,
description="Mandatory semantic search query you want to use to search the database's content",
)
class MySQLSearchTool(RagTool):
name: str = "Search a database's table content"
description: str = "A tool that can be used to semantic search a query from a database table's content."
args_schema: type[BaseModel] = MySQLSearchToolSchema
db_uri: str = Field(..., description="Mandatory database URI")
def __init__(self, table_name: str, **kwargs):
super().__init__(**kwargs)
self.add(table_name, data_type=DataType.MYSQL, metadata={"db_uri": self.db_uri})
self.description = f"A tool that can be used to semantic search a query the {table_name} database table's content."
self._generate_description()
def add(
self,
table_name: str,
**kwargs: Any,
) -> None:
super().add(f"SELECT * FROM {table_name};", **kwargs) # noqa: S608
def _run( # type: ignore[override]
self,
search_query: str,
similarity_threshold: float | None = None,
limit: int | None = None,
**kwargs: Any,
) -> Any:
return super()._run(
query=search_query, similarity_threshold=similarity_threshold, limit=limit
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/mysql_search_tool/mysql_search_tool.py",
"license": "MIT License",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/nl2sql/nl2sql_tool.py | from typing import Any
from crewai.tools import BaseTool
from pydantic import BaseModel, Field
try:
from sqlalchemy import create_engine, text
from sqlalchemy.orm import sessionmaker
SQLALCHEMY_AVAILABLE = True
except ImportError:
SQLALCHEMY_AVAILABLE = False
class NL2SQLToolInput(BaseModel):
sql_query: str = Field(
title="SQL Query",
description="The SQL query to execute.",
)
class NL2SQLTool(BaseTool):
name: str = "NL2SQLTool"
description: str = "Converts natural language to SQL queries and executes them."
db_uri: str = Field(
title="Database URI",
description="The URI of the database to connect to.",
)
tables: list = Field(default_factory=list)
columns: dict = Field(default_factory=dict)
args_schema: type[BaseModel] = NL2SQLToolInput
def model_post_init(self, __context: Any) -> None:
if not SQLALCHEMY_AVAILABLE:
raise ImportError(
"sqlalchemy is not installed. Please install it with `pip install crewai-tools[sqlalchemy]`"
)
data = {}
tables = self._fetch_available_tables()
for table in tables:
table_columns = self._fetch_all_available_columns(table["table_name"])
data[f"{table['table_name']}_columns"] = table_columns
self.tables = tables
self.columns = data
def _fetch_available_tables(self):
return self.execute_sql(
"SELECT table_name FROM information_schema.tables WHERE table_schema = 'public';"
)
def _fetch_all_available_columns(self, table_name: str):
return self.execute_sql(
f"SELECT column_name, data_type FROM information_schema.columns WHERE table_name = '{table_name}';" # noqa: S608
)
def _run(self, sql_query: str):
try:
data = self.execute_sql(sql_query)
except Exception as exc:
data = (
f"Based on these tables {self.tables} and columns {self.columns}, "
"you can create SQL queries to retrieve data from the database."
f"Get the original request {sql_query} and the error {exc} and create the correct SQL query."
)
return data
def execute_sql(self, sql_query: str) -> list | str:
if not SQLALCHEMY_AVAILABLE:
raise ImportError(
"sqlalchemy is not installed. Please install it with `pip install crewai-tools[sqlalchemy]`"
)
engine = create_engine(self.db_uri)
Session = sessionmaker(bind=engine) # noqa: N806
session = Session()
try:
result = session.execute(text(sql_query))
session.commit()
if result.returns_rows: # type: ignore[attr-defined]
columns = result.keys()
return [
dict(zip(columns, row, strict=False)) for row in result.fetchall()
]
return f"Query {sql_query} executed successfully"
except Exception as e:
session.rollback()
raise e
finally:
session.close()
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/nl2sql/nl2sql_tool.py",
"license": "MIT License",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/ocr_tool/ocr_tool.py | """Optical Character Recognition (OCR) Tool.
This tool provides functionality for extracting text from images using supported LLMs. Make sure your model supports the `vision` feature.
"""
import base64
from crewai.llm import LLM
from crewai.tools.base_tool import BaseTool
from crewai.utilities.types import LLMMessage
from pydantic import BaseModel, Field
class OCRToolSchema(BaseModel):
"""Input schema for Optical Character Recognition Tool.
Attributes:
image_path_url (str): Path to a local image file or URL of an image.
For local files, provide the absolute or relative path.
For remote images, provide the complete URL starting with 'http' or 'https'.
"""
image_path_url: str = Field(description="The image path or URL.")
class OCRTool(BaseTool):
"""A tool for performing Optical Character Recognition on images.
This tool leverages LLMs to extract text from images. It can process
both local image files and images available via URLs.
Attributes:
name (str): Name of the tool.
description (str): Description of the tool's functionality.
args_schema (Type[BaseModel]): Pydantic schema for input validation.
Private Attributes:
_llm (Optional[LLM]): Language model instance for making API calls.
"""
name: str = "Optical Character Recognition Tool"
description: str = "This tool uses an LLM's API to extract text from an image file."
llm: LLM = Field(default_factory=lambda: LLM(model="gpt-4o", temperature=0.7))
args_schema: type[BaseModel] = OCRToolSchema
def _run(self, **kwargs) -> str:
"""Execute the OCR operation on the provided image.
Args:
**kwargs: Keyword arguments containing the image_path_url.
Returns:
str: Extracted text from the image.
If no image path/URL is provided, returns an error message.
Note:
The method handles both local image files and remote URLs:
- For local files: The image is read and encoded to base64
- For URLs: The URL is passed directly to the Vision API
"""
image_path_url = kwargs.get("image_path_url")
if not image_path_url:
return "Image Path or URL is required."
if image_path_url.startswith("http"):
image_data = image_path_url
else:
base64_image = self._encode_image(image_path_url)
image_data = f"data:image/jpeg;base64,{base64_image}"
messages: list[LLMMessage] = [
{
"role": "system",
"content": "You are an expert OCR specialist. Extract complete text from the provided image. Provide the result as a raw text.",
},
{
"role": "user",
"content": [
{
"type": "image_url",
"image_url": {"url": image_data},
}
],
},
]
return self.llm.call(messages=messages)
@staticmethod
def _encode_image(image_path: str):
"""Encode an image file to base64 format.
Args:
image_path (str): Path to the local image file.
Returns:
str: Base64-encoded image data as a UTF-8 string.
"""
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode()
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/ocr_tool/ocr_tool.py",
"license": "MIT License",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/oxylabs_amazon_product_scraper_tool/oxylabs_amazon_product_scraper_tool.py | from importlib.metadata import version
import json
import os
from platform import architecture, python_version
from typing import Any
from crewai.tools import BaseTool, EnvVar
from pydantic import BaseModel, ConfigDict, Field
try:
from oxylabs import RealtimeClient # type: ignore[import-untyped]
from oxylabs.sources.response import ( # type: ignore[import-untyped]
Response as OxylabsResponse,
)
OXYLABS_AVAILABLE = True
except ImportError:
RealtimeClient = Any
OxylabsResponse = Any
OXYLABS_AVAILABLE = False
__all__ = ["OxylabsAmazonProductScraperConfig", "OxylabsAmazonProductScraperTool"]
class OxylabsAmazonProductScraperArgs(BaseModel):
query: str = Field(description="Amazon product ASIN")
class OxylabsAmazonProductScraperConfig(BaseModel):
"""Amazon Product Scraper configuration options:
https://developers.oxylabs.io/scraper-apis/web-scraper-api/targets/amazon/product.
"""
domain: str | None = Field(
None, description="The domain to limit the search results to."
)
geo_location: str | None = Field(None, description="The Deliver to location.")
user_agent_type: str | None = Field(None, description="Device type and browser.")
render: str | None = Field(None, description="Enables JavaScript rendering.")
callback_url: str | None = Field(None, description="URL to your callback endpoint.")
context: list | None = Field(
None,
description="Additional advanced settings and controls for specialized requirements.",
)
parse: bool | None = Field(None, description="True will return structured data.")
parsing_instructions: dict | None = Field(
None, description="Instructions for parsing the results."
)
class OxylabsAmazonProductScraperTool(BaseTool):
"""Scrape Amazon product pages with OxylabsAmazonProductScraperTool.
Get Oxylabs account:
https://dashboard.oxylabs.io/en
Args:
username (str): Oxylabs username.
password (str): Oxylabs password.
config: Configuration options. See ``OxylabsAmazonProductScraperConfig``
"""
model_config = ConfigDict(
arbitrary_types_allowed=True,
validate_assignment=True,
)
name: str = "Oxylabs Amazon Product Scraper tool"
description: str = "Scrape Amazon product pages with Oxylabs Amazon Product Scraper"
args_schema: type[BaseModel] = OxylabsAmazonProductScraperArgs
oxylabs_api: RealtimeClient
config: OxylabsAmazonProductScraperConfig
package_dependencies: list[str] = Field(default_factory=lambda: ["oxylabs"])
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="OXYLABS_USERNAME",
description="Username for Oxylabs",
required=True,
),
EnvVar(
name="OXYLABS_PASSWORD",
description="Password for Oxylabs",
required=True,
),
]
)
def __init__(
self,
username: str | None = None,
password: str | None = None,
config: OxylabsAmazonProductScraperConfig | dict | None = None,
**kwargs,
) -> None:
bits, _ = architecture()
sdk_type = (
f"oxylabs-crewai-sdk-python/"
f"{version('crewai')} "
f"({python_version()}; {bits})"
)
if username is None or password is None:
username, password = self._get_credentials_from_env()
if OXYLABS_AVAILABLE:
# import RealtimeClient to make it accessible for the current scope
from oxylabs import RealtimeClient
kwargs["oxylabs_api"] = RealtimeClient(
username=username,
password=password,
sdk_type=sdk_type,
)
else:
import click
if click.confirm(
"You are missing the 'oxylabs' package. Would you like to install it?"
):
import subprocess
try:
subprocess.run(["uv", "add", "oxylabs"], check=True) # noqa: S607
from oxylabs import RealtimeClient
kwargs["oxylabs_api"] = RealtimeClient(
username=username,
password=password,
sdk_type=sdk_type,
)
except subprocess.CalledProcessError as e:
raise ImportError("Failed to install oxylabs package") from e
else:
raise ImportError(
"`oxylabs` package not found, please run `uv add oxylabs`"
)
if config is None:
config = OxylabsAmazonProductScraperConfig()
super().__init__(config=config, **kwargs)
def _get_credentials_from_env(self) -> tuple[str, str]:
username = os.environ.get("OXYLABS_USERNAME")
password = os.environ.get("OXYLABS_PASSWORD")
if not username or not password:
raise ValueError(
"You must pass oxylabs username and password when instantiating the tool "
"or specify OXYLABS_USERNAME and OXYLABS_PASSWORD environment variables"
)
return username, password
def _run(self, query: str) -> str:
response = self.oxylabs_api.amazon.scrape_product(
query,
**self.config.model_dump(exclude_none=True),
)
content = response.results[0].content
if isinstance(content, dict):
return json.dumps(content)
return content
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/oxylabs_amazon_product_scraper_tool/oxylabs_amazon_product_scraper_tool.py",
"license": "MIT License",
"lines": 136,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/oxylabs_amazon_search_scraper_tool/oxylabs_amazon_search_scraper_tool.py | from importlib.metadata import version
import json
import os
from platform import architecture, python_version
from typing import Any
from crewai.tools import BaseTool, EnvVar
from pydantic import BaseModel, ConfigDict, Field
try:
from oxylabs import RealtimeClient # type: ignore[import-untyped]
from oxylabs.sources.response import ( # type: ignore[import-untyped]
Response as OxylabsResponse,
)
OXYLABS_AVAILABLE = True
except ImportError:
RealtimeClient = Any
OxylabsResponse = Any
OXYLABS_AVAILABLE = False
__all__ = ["OxylabsAmazonSearchScraperConfig", "OxylabsAmazonSearchScraperTool"]
class OxylabsAmazonSearchScraperArgs(BaseModel):
query: str = Field(description="Amazon search term")
class OxylabsAmazonSearchScraperConfig(BaseModel):
"""Amazon Search Scraper configuration options:
https://developers.oxylabs.io/scraper-apis/web-scraper-api/targets/amazon/search.
"""
domain: str | None = Field(
None, description="The domain to limit the search results to."
)
start_page: int | None = Field(None, description="The starting page number.")
pages: int | None = Field(None, description="The number of pages to scrape.")
geo_location: str | None = Field(None, description="The Deliver to location.")
user_agent_type: str | None = Field(None, description="Device type and browser.")
render: str | None = Field(None, description="Enables JavaScript rendering.")
callback_url: str | None = Field(None, description="URL to your callback endpoint.")
context: list | None = Field(
None,
description="Additional advanced settings and controls for specialized requirements.",
)
parse: bool | None = Field(None, description="True will return structured data.")
parsing_instructions: dict | None = Field(
None, description="Instructions for parsing the results."
)
class OxylabsAmazonSearchScraperTool(BaseTool):
"""Scrape Amazon search results with OxylabsAmazonSearchScraperTool.
Get Oxylabs account:
https://dashboard.oxylabs.io/en
Args:
username (str): Oxylabs username.
password (str): Oxylabs password.
config: Configuration options. See ``OxylabsAmazonSearchScraperConfig``
"""
model_config = ConfigDict(
arbitrary_types_allowed=True,
validate_assignment=True,
)
name: str = "Oxylabs Amazon Search Scraper tool"
description: str = "Scrape Amazon search results with Oxylabs Amazon Search Scraper"
args_schema: type[BaseModel] = OxylabsAmazonSearchScraperArgs
oxylabs_api: RealtimeClient
config: OxylabsAmazonSearchScraperConfig
package_dependencies: list[str] = Field(default_factory=lambda: ["oxylabs"])
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="OXYLABS_USERNAME",
description="Username for Oxylabs",
required=True,
),
EnvVar(
name="OXYLABS_PASSWORD",
description="Password for Oxylabs",
required=True,
),
]
)
def __init__(
self,
username: str | None = None,
password: str | None = None,
config: OxylabsAmazonSearchScraperConfig | dict | None = None,
**kwargs,
):
bits, _ = architecture()
sdk_type = (
f"oxylabs-crewai-sdk-python/"
f"{version('crewai')} "
f"({python_version()}; {bits})"
)
if username is None or password is None:
username, password = self._get_credentials_from_env()
if OXYLABS_AVAILABLE:
# import RealtimeClient to make it accessible for the current scope
from oxylabs import RealtimeClient
kwargs["oxylabs_api"] = RealtimeClient(
username=username,
password=password,
sdk_type=sdk_type,
)
else:
import click
if click.confirm(
"You are missing the 'oxylabs' package. Would you like to install it?"
):
import subprocess
try:
subprocess.run(["uv", "add", "oxylabs"], check=True) # noqa: S607
from oxylabs import RealtimeClient
kwargs["oxylabs_api"] = RealtimeClient(
username=username,
password=password,
sdk_type=sdk_type,
)
except subprocess.CalledProcessError as e:
raise ImportError("Failed to install oxylabs package") from e
else:
raise ImportError(
"`oxylabs` package not found, please run `uv add oxylabs`"
)
if config is None:
config = OxylabsAmazonSearchScraperConfig()
super().__init__(config=config, **kwargs)
def _get_credentials_from_env(self) -> tuple[str, str]:
username = os.environ.get("OXYLABS_USERNAME")
password = os.environ.get("OXYLABS_PASSWORD")
if not username or not password:
raise ValueError(
"You must pass oxylabs username and password when instantiating the tool "
"or specify OXYLABS_USERNAME and OXYLABS_PASSWORD environment variables"
)
return username, password
def _run(self, query: str) -> str:
response = self.oxylabs_api.amazon.scrape_search(
query,
**self.config.model_dump(exclude_none=True),
)
content = response.results[0].content
if isinstance(content, dict):
return json.dumps(content)
return content
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/oxylabs_amazon_search_scraper_tool/oxylabs_amazon_search_scraper_tool.py",
"license": "MIT License",
"lines": 138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/oxylabs_google_search_scraper_tool/oxylabs_google_search_scraper_tool.py | from importlib.metadata import version
import json
import os
from platform import architecture, python_version
from typing import Any
from crewai.tools import BaseTool, EnvVar
from pydantic import BaseModel, ConfigDict, Field
try:
from oxylabs import RealtimeClient # type: ignore[import-untyped]
from oxylabs.sources.response import ( # type: ignore[import-untyped]
Response as OxylabsResponse,
)
OXYLABS_AVAILABLE = True
except ImportError:
RealtimeClient = Any
OxylabsResponse = Any
OXYLABS_AVAILABLE = False
__all__ = ["OxylabsGoogleSearchScraperConfig", "OxylabsGoogleSearchScraperTool"]
class OxylabsGoogleSearchScraperArgs(BaseModel):
query: str = Field(description="Search query")
class OxylabsGoogleSearchScraperConfig(BaseModel):
"""Google Search Scraper configuration options:
https://developers.oxylabs.io/scraper-apis/web-scraper-api/targets/google/search/search.
"""
domain: str | None = Field(
None, description="The domain to limit the search results to."
)
start_page: int | None = Field(None, description="The starting page number.")
pages: int | None = Field(None, description="The number of pages to scrape.")
limit: int | None = Field(
None, description="Number of results to retrieve in each page."
)
geo_location: str | None = Field(None, description="The Deliver to location.")
user_agent_type: str | None = Field(None, description="Device type and browser.")
render: str | None = Field(None, description="Enables JavaScript rendering.")
callback_url: str | None = Field(None, description="URL to your callback endpoint.")
context: list | None = Field(
None,
description="Additional advanced settings and controls for specialized requirements.",
)
parse: bool | None = Field(None, description="True will return structured data.")
parsing_instructions: dict | None = Field(
None, description="Instructions for parsing the results."
)
class OxylabsGoogleSearchScraperTool(BaseTool):
"""Scrape Google Search results with OxylabsGoogleSearchScraperTool.
Get Oxylabs account:
https://dashboard.oxylabs.io/en
Args:
username (str): Oxylabs username.
password (str): Oxylabs password.
config: Configuration options. See ``OxylabsGoogleSearchScraperConfig``
"""
model_config = ConfigDict(
arbitrary_types_allowed=True,
validate_assignment=True,
)
name: str = "Oxylabs Google Search Scraper tool"
description: str = "Scrape Google Search results with Oxylabs Google Search Scraper"
args_schema: type[BaseModel] = OxylabsGoogleSearchScraperArgs
oxylabs_api: RealtimeClient
config: OxylabsGoogleSearchScraperConfig
package_dependencies: list[str] = Field(default_factory=lambda: ["oxylabs"])
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="OXYLABS_USERNAME",
description="Username for Oxylabs",
required=True,
),
EnvVar(
name="OXYLABS_PASSWORD",
description="Password for Oxylabs",
required=True,
),
]
)
def __init__(
self,
username: str | None = None,
password: str | None = None,
config: OxylabsGoogleSearchScraperConfig | dict | None = None,
**kwargs,
):
bits, _ = architecture()
sdk_type = (
f"oxylabs-crewai-sdk-python/"
f"{version('crewai')} "
f"({python_version()}; {bits})"
)
if username is None or password is None:
username, password = self._get_credentials_from_env()
if OXYLABS_AVAILABLE:
# import RealtimeClient to make it accessible for the current scope
from oxylabs import RealtimeClient
kwargs["oxylabs_api"] = RealtimeClient(
username=username,
password=password,
sdk_type=sdk_type,
)
else:
import click
if click.confirm(
"You are missing the 'oxylabs' package. Would you like to install it?"
):
import subprocess
try:
subprocess.run(["uv", "add", "oxylabs"], check=True) # noqa: S607
from oxylabs import RealtimeClient
kwargs["oxylabs_api"] = RealtimeClient(
username=username,
password=password,
sdk_type=sdk_type,
)
except subprocess.CalledProcessError as e:
raise ImportError("Failed to install oxylabs package") from e
else:
raise ImportError(
"`oxylabs` package not found, please run `uv add oxylabs`"
)
if config is None:
config = OxylabsGoogleSearchScraperConfig()
super().__init__(config=config, **kwargs)
def _get_credentials_from_env(self) -> tuple[str, str]:
username = os.environ.get("OXYLABS_USERNAME")
password = os.environ.get("OXYLABS_PASSWORD")
if not username or not password:
raise ValueError(
"You must pass oxylabs username and password when instantiating the tool "
"or specify OXYLABS_USERNAME and OXYLABS_PASSWORD environment variables"
)
return username, password
def _run(self, query: str, **kwargs) -> str:
response = self.oxylabs_api.google.scrape_search(
query,
**self.config.model_dump(exclude_none=True),
)
content = response.results[0].content
if isinstance(content, dict):
return json.dumps(content)
return content
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/oxylabs_google_search_scraper_tool/oxylabs_google_search_scraper_tool.py",
"license": "MIT License",
"lines": 141,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/oxylabs_universal_scraper_tool/oxylabs_universal_scraper_tool.py | from importlib.metadata import version
import json
import os
from platform import architecture, python_version
from typing import Any
from crewai.tools import BaseTool, EnvVar
from pydantic import BaseModel, ConfigDict, Field
try:
from oxylabs import RealtimeClient # type: ignore[import-untyped]
from oxylabs.sources.response import ( # type: ignore[import-untyped]
Response as OxylabsResponse,
)
OXYLABS_AVAILABLE = True
except ImportError:
RealtimeClient = Any
OxylabsResponse = Any
OXYLABS_AVAILABLE = False
__all__ = ["OxylabsUniversalScraperConfig", "OxylabsUniversalScraperTool"]
class OxylabsUniversalScraperArgs(BaseModel):
url: str = Field(description="Website URL")
class OxylabsUniversalScraperConfig(BaseModel):
"""Universal Scraper configuration options:
https://developers.oxylabs.io/scraper-apis/web-scraper-api/other-websites.
"""
geo_location: str | None = Field(None, description="The Deliver to location.")
user_agent_type: str | None = Field(None, description="Device type and browser.")
render: str | None = Field(None, description="Enables JavaScript rendering.")
callback_url: str | None = Field(None, description="URL to your callback endpoint.")
context: list | None = Field(
None,
description="Additional advanced settings and controls for specialized requirements.",
)
parse: bool | None = Field(None, description="True will return structured data.")
parsing_instructions: dict | None = Field(
None, description="Instructions for parsing the results."
)
class OxylabsUniversalScraperTool(BaseTool):
"""Scrape any website with OxylabsUniversalScraperTool.
Get Oxylabs account:
https://dashboard.oxylabs.io/en
Args:
username (str): Oxylabs username.
password (str): Oxylabs password.
config: Configuration options. See ``OxylabsUniversalScraperConfig``
"""
model_config = ConfigDict(
arbitrary_types_allowed=True,
validate_assignment=True,
)
name: str = "Oxylabs Universal Scraper tool"
description: str = "Scrape any url with Oxylabs Universal Scraper"
args_schema: type[BaseModel] = OxylabsUniversalScraperArgs
oxylabs_api: RealtimeClient
config: OxylabsUniversalScraperConfig
package_dependencies: list[str] = Field(default_factory=lambda: ["oxylabs"])
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="OXYLABS_USERNAME",
description="Username for Oxylabs",
required=True,
),
EnvVar(
name="OXYLABS_PASSWORD",
description="Password for Oxylabs",
required=True,
),
]
)
def __init__(
self,
username: str | None = None,
password: str | None = None,
config: OxylabsUniversalScraperConfig | dict | None = None,
**kwargs,
):
bits, _ = architecture()
sdk_type = (
f"oxylabs-crewai-sdk-python/"
f"{version('crewai')} "
f"({python_version()}; {bits})"
)
if username is None or password is None:
username, password = self._get_credentials_from_env()
if OXYLABS_AVAILABLE:
# import RealtimeClient to make it accessible for the current scope
from oxylabs import RealtimeClient
kwargs["oxylabs_api"] = RealtimeClient(
username=username,
password=password,
sdk_type=sdk_type,
)
else:
import click
if click.confirm(
"You are missing the 'oxylabs' package. Would you like to install it?"
):
import subprocess
try:
subprocess.run(["uv", "add", "oxylabs"], check=True) # noqa: S607
from oxylabs import RealtimeClient
kwargs["oxylabs_api"] = RealtimeClient(
username=username,
password=password,
sdk_type=sdk_type,
)
except subprocess.CalledProcessError as e:
raise ImportError("Failed to install oxylabs package") from e
else:
raise ImportError(
"`oxylabs` package not found, please run `uv add oxylabs`"
)
if config is None:
config = OxylabsUniversalScraperConfig()
super().__init__(config=config, **kwargs)
def _get_credentials_from_env(self) -> tuple[str, str]:
username = os.environ.get("OXYLABS_USERNAME")
password = os.environ.get("OXYLABS_PASSWORD")
if not username or not password:
raise ValueError(
"You must pass oxylabs username and password when instantiating the tool "
"or specify OXYLABS_USERNAME and OXYLABS_PASSWORD environment variables"
)
return username, password
def _run(self, url: str) -> str:
response = self.oxylabs_api.universal.scrape_url(
url,
**self.config.model_dump(exclude_none=True),
)
content = response.results[0].content
if isinstance(content, dict):
return json.dumps(content)
return content
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/oxylabs_universal_scraper_tool/oxylabs_universal_scraper_tool.py",
"license": "MIT License",
"lines": 133,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/parallel_tools/parallel_search_tool.py | import os
from typing import Annotated, Any
from crewai.tools import BaseTool, EnvVar
from pydantic import BaseModel, Field
import requests
class ParallelSearchInput(BaseModel):
"""Input schema for ParallelSearchTool using the Search API (v1beta).
At least one of objective or search_queries is required.
"""
objective: str | None = Field(
None,
description="Natural-language goal for the web research (<=5000 chars)",
max_length=5000,
)
search_queries: list[Annotated[str, Field(max_length=200)]] | None = Field(
default=None,
description="Optional list of keyword queries (<=5 items, each <=200 chars)",
min_length=1,
max_length=5,
)
processor: str = Field(
default="base",
description="Search processor: 'base' (fast/low cost) or 'pro' (higher quality/freshness)",
pattern=r"^(base|pro)$",
)
max_results: int = Field(
default=10,
ge=1,
le=40,
description="Maximum number of search results to return (processor limits apply)",
)
max_chars_per_result: int = Field(
default=6000,
ge=100,
description="Maximum characters per result excerpt (values >30000 not guaranteed)",
)
source_policy: dict[str, Any] | None = Field(
default=None, description="Optional source policy configuration"
)
class ParallelSearchTool(BaseTool):
name: str = "Parallel Web Search Tool"
description: str = (
"Search the web using Parallel's Search API (v1beta). Returns ranked results with "
"compressed excerpts optimized for LLMs."
)
args_schema: type[BaseModel] = ParallelSearchInput
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="PARALLEL_API_KEY",
description="API key for Parallel",
required=True,
),
]
)
package_dependencies: list[str] = Field(default_factory=lambda: ["requests"])
search_url: str = "https://api.parallel.ai/v1beta/search"
def _run(
self,
objective: str | None = None,
search_queries: list[str] | None = None,
processor: str = "base",
max_results: int = 10,
max_chars_per_result: int = 6000,
source_policy: dict[str, Any] | None = None,
**_: Any,
) -> str:
api_key = os.environ.get("PARALLEL_API_KEY")
if not api_key:
return "Error: PARALLEL_API_KEY environment variable is required"
if not objective and not search_queries:
return "Error: Provide at least one of 'objective' or 'search_queries'"
headers = {
"x-api-key": api_key,
"Content-Type": "application/json",
}
try:
payload: dict[str, Any] = {
"processor": processor,
"max_results": max_results,
"max_chars_per_result": max_chars_per_result,
}
if objective is not None:
payload["objective"] = objective
if search_queries is not None:
payload["search_queries"] = search_queries
if source_policy is not None:
payload["source_policy"] = source_policy
request_timeout = 90 if processor == "pro" else 30
resp = requests.post(
self.search_url, json=payload, headers=headers, timeout=request_timeout
)
if resp.status_code >= 300:
return (
f"Parallel Search API error: {resp.status_code} {resp.text[:200]}"
)
data = resp.json()
return self._format_output(data)
except requests.Timeout:
return "Parallel Search API timeout. Please try again later."
except Exception as exc:
return f"Unexpected error calling Parallel Search API: {exc}"
def _format_output(self, result: dict[str, Any]) -> str:
# Return the full JSON payload (search_id + results) as a compact JSON string
try:
import json
return json.dumps(result or {}, ensure_ascii=False)
except Exception:
return str(result or {})
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/parallel_tools/parallel_search_tool.py",
"license": "MIT License",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/patronus_eval_tool/example.py | import random
from crewai import Agent, Crew, Task
from patronus import ( # type: ignore[import-not-found,import-untyped]
Client,
EvaluationResult,
)
from patronus_local_evaluator_tool import ( # type: ignore[import-not-found,import-untyped]
PatronusLocalEvaluatorTool,
)
# Test the PatronusLocalEvaluatorTool where agent uses the local evaluator
client = Client()
# Example of an evaluator that returns a random pass/fail result
@client.register_local_evaluator("random_evaluator")
def random_evaluator(**kwargs):
score = random.random() # noqa: S311
return EvaluationResult(
score_raw=score,
pass_=score >= 0.5,
explanation="example explanation", # Optional justification for LLM judges
)
# 1. Uses PatronusEvalTool: agent can pick the best evaluator and criteria
# patronus_eval_tool = PatronusEvalTool()
# 2. Uses PatronusPredefinedCriteriaEvalTool: agent uses the defined evaluator and criteria
# patronus_eval_tool = PatronusPredefinedCriteriaEvalTool(
# evaluators=[{"evaluator": "judge", "criteria": "contains-code"}]
# )
# 3. Uses PatronusLocalEvaluatorTool: agent uses user defined evaluator
patronus_eval_tool = PatronusLocalEvaluatorTool(
patronus_client=client,
evaluator="random_evaluator",
evaluated_model_gold_answer="example label",
)
# Create a new agent
coding_agent = Agent(
role="Coding Agent",
goal="Generate high quality code and verify that the output is code by using Patronus AI's evaluation tool.",
backstory="You are an experienced coder who can generate high quality python code. You can follow complex instructions accurately and effectively.",
tools=[patronus_eval_tool],
verbose=True,
)
# Define tasks
generate_code = Task(
description="Create a simple program to generate the first N numbers in the Fibonacci sequence. Select the most appropriate evaluator and criteria for evaluating your output.",
expected_output="Program that generates the first N numbers in the Fibonacci sequence.",
agent=coding_agent,
)
crew = Crew(agents=[coding_agent], tasks=[generate_code])
crew.kickoff()
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/patronus_eval_tool/example.py",
"license": "MIT License",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py | import json
import os
from typing import Any
import warnings
from crewai.tools import BaseTool, EnvVar
from pydantic import Field
import requests
class PatronusEvalTool(BaseTool):
name: str = "Patronus Evaluation Tool"
evaluate_url: str = "https://api.patronus.ai/v1/evaluate"
evaluators: list[dict[str, str]] = Field(default_factory=list)
criteria: list[dict[str, str]] = Field(default_factory=list)
description: str = ""
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="PATRONUS_API_KEY",
description="API key for Patronus evaluation services",
required=True,
),
]
)
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
temp_evaluators, temp_criteria = self._init_run()
self.evaluators = temp_evaluators
self.criteria = temp_criteria
warnings.warn(
"You are allowing the agent to select the best evaluator and criteria when you use the `PatronusEvalTool`. If this is not intended then please use `PatronusPredefinedCriteriaEvalTool` instead.",
stacklevel=2,
)
def _init_run(self):
evaluators_set = json.loads(
requests.get(
"https://api.patronus.ai/v1/evaluators",
headers={
"accept": "application/json",
"X-API-KEY": os.environ["PATRONUS_API_KEY"],
},
timeout=30,
).text
)["evaluators"]
ids, evaluators = set(), []
for ev in evaluators_set:
if not ev["deprecated"] and ev["id"] not in ids:
evaluators.append(
{
"id": ev["id"],
"name": ev["name"],
"description": ev["description"],
"aliases": ev["aliases"],
}
)
ids.add(ev["id"])
criteria_set = json.loads(
requests.get(
"https://api.patronus.ai/v1/evaluator-criteria",
headers={
"accept": "application/json",
"X-API-KEY": os.environ["PATRONUS_API_KEY"],
},
timeout=30,
).text
)["evaluator_criteria"]
criteria = []
for cr in criteria_set:
if cr["config"].get("pass_criteria", None):
if cr["config"].get("rubric", None):
criteria.append(
{
"evaluator": cr["evaluator_family"],
"name": cr["name"],
"pass_criteria": cr["config"]["pass_criteria"],
"rubric": cr["config"]["rubric"],
}
)
else:
criteria.append(
{
"evaluator": cr["evaluator_family"],
"name": cr["name"],
"pass_criteria": cr["config"]["pass_criteria"],
}
)
elif cr["description"]:
criteria.append(
{
"evaluator": cr["evaluator_family"],
"name": cr["name"],
"description": cr["description"],
}
)
return evaluators, criteria
def _generate_description(self) -> None:
criteria = "\n".join([json.dumps(i) for i in self.criteria])
self.description = f"""This tool calls the Patronus Evaluation API that takes the following arguments:
1. evaluated_model_input: str: The agent's task description in simple text
2. evaluated_model_output: str: The agent's output of the task
3. evaluated_model_retrieved_context: str: The agent's context
4. evaluators: This is a list of dictionaries containing one of the following evaluators and the corresponding criteria. An example input for this field: [{{"evaluator": "Judge", "criteria": "patronus:is-code"}}]
Evaluators:
{criteria}
You must ONLY choose the most appropriate evaluator and criteria based on the "pass_criteria" or "description" fields for your evaluation task and nothing from outside of the options present."""
def _run(
self,
evaluated_model_input: str | None,
evaluated_model_output: str | None,
evaluated_model_retrieved_context: str | None,
evaluators: list[dict[str, str]],
) -> Any:
# Assert correct format of evaluators
evals = []
for ev in evaluators:
evals.append( # noqa: PERF401
{
"evaluator": ev["evaluator"].lower(),
"criteria": ev["name"] if "name" in ev else ev["criteria"],
}
)
data = {
"evaluated_model_input": evaluated_model_input,
"evaluated_model_output": evaluated_model_output,
"evaluated_model_retrieved_context": evaluated_model_retrieved_context,
"evaluators": evals,
}
headers = {
"X-API-KEY": os.getenv("PATRONUS_API_KEY"),
"accept": "application/json",
"content-type": "application/json",
}
response = requests.post(
self.evaluate_url,
headers=headers,
data=json.dumps(data),
timeout=30,
)
if response.status_code != 200:
raise Exception(
f"Failed to evaluate model input and output. Response status code: {response.status_code}. Reason: {response.text}"
)
return response.json()
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/patronus_eval_tool/patronus_eval_tool.py",
"license": "MIT License",
"lines": 141,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py | from __future__ import annotations
from typing import TYPE_CHECKING, Any
from crewai.tools import BaseTool
from pydantic import BaseModel, ConfigDict, Field
if TYPE_CHECKING:
from patronus import Client, EvaluationResult # type: ignore[import-untyped]
try:
import patronus # noqa: F401
PYPATRONUS_AVAILABLE = True
except ImportError:
PYPATRONUS_AVAILABLE = False
class FixedLocalEvaluatorToolSchema(BaseModel):
evaluated_model_input: str = Field(
..., description="The agent's task description in simple text"
)
evaluated_model_output: str = Field(
..., description="The agent's output of the task"
)
evaluated_model_retrieved_context: str = Field(
..., description="The agent's context"
)
evaluated_model_gold_answer: str = Field(
..., description="The agent's gold answer only if available"
)
evaluator: str = Field(..., description="The registered local evaluator")
class PatronusLocalEvaluatorTool(BaseTool):
name: str = "Patronus Local Evaluator Tool"
description: str = "This tool is used to evaluate the model input and output using custom function evaluators."
args_schema: type[BaseModel] = FixedLocalEvaluatorToolSchema
client: Client = None
evaluator: str
evaluated_model_gold_answer: str
model_config = ConfigDict(arbitrary_types_allowed=True)
package_dependencies: list[str] = Field(default_factory=lambda: ["patronus"])
def __init__(
self,
patronus_client: Client = None,
evaluator: str = "",
evaluated_model_gold_answer: str = "",
**kwargs: Any,
):
super().__init__(**kwargs)
self.evaluator = evaluator
self.evaluated_model_gold_answer = evaluated_model_gold_answer
self._initialize_patronus(patronus_client)
def _initialize_patronus(self, patronus_client: Client) -> None:
try:
if PYPATRONUS_AVAILABLE:
self.client = patronus_client
self._generate_description()
else:
raise ImportError
except ImportError:
import click
if click.confirm(
"You are missing the 'patronus' package. Would you like to install it?"
):
import subprocess
try:
subprocess.run(["uv", "add", "patronus"], check=True) # noqa: S607
self.client = patronus_client
self._generate_description()
except subprocess.CalledProcessError as e:
raise ImportError("Failed to install 'patronus' package") from e
else:
raise ImportError(
"`patronus` package not found, please run `uv add patronus`"
) from None
def _run(
self,
**kwargs: Any,
) -> Any:
evaluated_model_input = kwargs.get("evaluated_model_input")
evaluated_model_output = kwargs.get("evaluated_model_output")
evaluated_model_retrieved_context = kwargs.get(
"evaluated_model_retrieved_context"
)
evaluated_model_gold_answer = self.evaluated_model_gold_answer
evaluator = self.evaluator
result: EvaluationResult = self.client.evaluate(
evaluator=evaluator,
evaluated_model_input=evaluated_model_input,
evaluated_model_output=evaluated_model_output,
evaluated_model_retrieved_context=evaluated_model_retrieved_context,
evaluated_model_gold_answer=evaluated_model_gold_answer,
tags={}, # Optional metadata, supports arbitrary key-value pairs
)
return f"Evaluation result: {result.pass_}, Explanation: {result.explanation}"
try:
# Only rebuild if the class hasn't been initialized yet
if not hasattr(PatronusLocalEvaluatorTool, "_model_rebuilt"):
PatronusLocalEvaluatorTool.model_rebuild()
PatronusLocalEvaluatorTool._model_rebuilt = True # type: ignore[attr-defined]
except Exception: # noqa: S110
pass
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/patronus_eval_tool/patronus_local_evaluator_tool.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/patronus_eval_tool/patronus_predefined_criteria_eval_tool.py | import json
import os
from typing import Any
from crewai.tools import BaseTool
from pydantic import BaseModel, Field
import requests
class FixedBaseToolSchema(BaseModel):
evaluated_model_input: dict = Field(
..., description="The agent's task description in simple text"
)
evaluated_model_output: dict = Field(
..., description="The agent's output of the task"
)
evaluated_model_retrieved_context: dict = Field(
..., description="The agent's context"
)
evaluated_model_gold_answer: dict = Field(
..., description="The agent's gold answer only if available"
)
evaluators: list[dict[str, str]] = Field(
...,
description="List of dictionaries containing the evaluator and criteria to evaluate the model input and output. An example input for this field: [{'evaluator': '[evaluator-from-user]', 'criteria': '[criteria-from-user]'}]",
)
class PatronusPredefinedCriteriaEvalTool(BaseTool):
"""PatronusEvalTool is a tool to automatically evaluate and score agent interactions.
Results are logged to the Patronus platform at app.patronus.ai
"""
name: str = "Call Patronus API tool for evaluation of model inputs and outputs"
description: str = """This tool calls the Patronus Evaluation API that takes the following arguments:"""
evaluate_url: str = "https://api.patronus.ai/v1/evaluate"
args_schema: type[BaseModel] = FixedBaseToolSchema
evaluators: list[dict[str, str]] = Field(default_factory=list)
def __init__(self, evaluators: list[dict[str, str]], **kwargs: Any):
super().__init__(**kwargs)
if evaluators:
self.evaluators = evaluators
self.description = f"This tool calls the Patronus Evaluation API that takes an additional argument in addition to the following new argument:\n evaluators={evaluators}"
self._generate_description()
def _run(
self,
**kwargs: Any,
) -> Any:
evaluated_model_input = kwargs.get("evaluated_model_input")
evaluated_model_output = kwargs.get("evaluated_model_output")
evaluated_model_retrieved_context = kwargs.get(
"evaluated_model_retrieved_context"
)
evaluated_model_gold_answer = kwargs.get("evaluated_model_gold_answer")
evaluators = self.evaluators
headers = {
"X-API-KEY": os.getenv("PATRONUS_API_KEY"),
"accept": "application/json",
"content-type": "application/json",
}
data = {
"evaluated_model_input": (
evaluated_model_input
if isinstance(evaluated_model_input, str)
else evaluated_model_input.get("description") # type: ignore[union-attr]
),
"evaluated_model_output": (
evaluated_model_output
if isinstance(evaluated_model_output, str)
else evaluated_model_output.get("description") # type: ignore[union-attr]
),
"evaluated_model_retrieved_context": (
evaluated_model_retrieved_context
if isinstance(evaluated_model_retrieved_context, str)
else evaluated_model_retrieved_context.get("description") # type: ignore[union-attr]
),
"evaluated_model_gold_answer": (
evaluated_model_gold_answer
if isinstance(evaluated_model_gold_answer, str)
else evaluated_model_gold_answer.get("description") # type: ignore[union-attr]
),
"evaluators": (
evaluators
if isinstance(evaluators, list)
else evaluators.get("description")
),
}
response = requests.post(
self.evaluate_url,
headers=headers,
data=json.dumps(data),
timeout=30,
)
if response.status_code != 200:
raise Exception(
f"Failed to evaluate model input and output. Status code: {response.status_code}. Reason: {response.text}"
)
return response.json()
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/patronus_eval_tool/patronus_predefined_criteria_eval_tool.py",
"license": "MIT License",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py | from pydantic import BaseModel, Field, model_validator
from typing_extensions import Self
from crewai_tools.rag.data_types import DataType
from crewai_tools.tools.rag.rag_tool import RagTool
class FixedPDFSearchToolSchema(BaseModel):
"""Input for PDFSearchTool."""
query: str = Field(
..., description="Mandatory query you want to use to search the PDF's content"
)
class PDFSearchToolSchema(FixedPDFSearchToolSchema):
"""Input for PDFSearchTool."""
pdf: str = Field(..., description="File path or URL of a PDF file to be searched")
class PDFSearchTool(RagTool):
name: str = "Search a PDF's content"
description: str = (
"A tool that can be used to semantic search a query from a PDF's content."
)
args_schema: type[BaseModel] = PDFSearchToolSchema
pdf: str | None = None
@model_validator(mode="after")
def _configure_for_pdf(self) -> Self:
"""Configure tool for specific PDF if provided."""
if self.pdf is not None:
self.add(self.pdf)
self.description = f"A tool that can be used to semantic search a query the {self.pdf} PDF's content."
self.args_schema = FixedPDFSearchToolSchema
self._generate_description()
return self
def add(self, pdf: str) -> None:
super().add(pdf, data_type=DataType.PDF_FILE)
def _run( # type: ignore[override]
self,
query: str,
pdf: str | None = None,
similarity_threshold: float | None = None,
limit: int | None = None,
) -> str:
if pdf is not None:
self.add(pdf)
return super()._run(
query=query, similarity_threshold=similarity_threshold, limit=limit
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/pdf_search_tool/pdf_search_tool.py",
"license": "MIT License",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py | from __future__ import annotations
from collections.abc import Callable
import importlib
import json
import os
from typing import Any
from crewai.tools import BaseTool, EnvVar
from pydantic import BaseModel, ConfigDict, Field, model_validator
from pydantic.types import ImportString
class QdrantToolSchema(BaseModel):
query: str = Field(
..., description="Query to search in Qdrant DB - always required."
)
filter_by: str | None = Field(
default=None,
description="Parameter to filter the search by. When filtering, needs to be used in conjunction with filter_value.",
)
filter_value: Any | None = Field(
default=None,
description="Value to filter the search by. When filtering, needs to be used in conjunction with filter_by.",
)
class QdrantConfig(BaseModel):
"""All Qdrant connection and search settings."""
qdrant_url: str
qdrant_api_key: str | None = None
collection_name: str
limit: int = 3
score_threshold: float = 0.35
filter: Any | None = Field(
default=None, description="Qdrant Filter instance for advanced filtering."
)
class QdrantVectorSearchTool(BaseTool):
"""Vector search tool for Qdrant."""
model_config = ConfigDict(arbitrary_types_allowed=True)
# --- Metadata ---
name: str = "QdrantVectorSearchTool"
description: str = "Search Qdrant vector DB for relevant documents."
args_schema: type[BaseModel] = QdrantToolSchema
package_dependencies: list[str] = Field(default_factory=lambda: ["qdrant-client"])
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="OPENAI_API_KEY", description="API key for OpenAI", required=True
)
]
)
qdrant_config: QdrantConfig
qdrant_package: ImportString[Any] = Field(
default="qdrant_client",
description="Base package path for Qdrant. Will dynamically import client and models.",
)
custom_embedding_fn: ImportString[Callable[[str], list[float]]] | None = Field(
default=None,
description="Optional embedding function or import path.",
)
client: Any | None = None
@model_validator(mode="after")
def _setup_qdrant(self) -> QdrantVectorSearchTool:
# Import the qdrant_package if it's a string
if isinstance(self.qdrant_package, str):
self.qdrant_package = importlib.import_module(self.qdrant_package)
if not self.client:
self.client = self.qdrant_package.QdrantClient(
url=self.qdrant_config.qdrant_url,
api_key=self.qdrant_config.qdrant_api_key or None,
)
return self
def _run(
self,
query: str,
filter_by: str | None = None,
filter_value: Any | None = None,
) -> str:
"""Perform vector similarity search."""
search_filter = (
self.qdrant_config.filter.model_copy()
if self.qdrant_config.filter is not None
else self.qdrant_package.http.models.Filter(must=[])
)
if filter_by and filter_value is not None:
if not hasattr(search_filter, "must") or not isinstance(
search_filter.must, list
):
search_filter.must = []
search_filter.must.append(
self.qdrant_package.http.models.FieldCondition(
key=filter_by,
match=self.qdrant_package.http.models.MatchValue(
value=filter_value
),
)
)
query_vector = (
self.custom_embedding_fn(query)
if self.custom_embedding_fn
else (
lambda: __import__("openai")
.Client(api_key=os.getenv("OPENAI_API_KEY"))
.embeddings.create(input=[query], model="text-embedding-3-large")
.data[0]
.embedding
)()
)
results = self.client.query_points(
collection_name=self.qdrant_config.collection_name,
query=query_vector,
query_filter=search_filter,
limit=self.qdrant_config.limit,
score_threshold=self.qdrant_config.score_threshold,
)
return json.dumps(
[
{
"distance": p.score,
"metadata": p.payload.get("metadata", {}) if p.payload else {},
"context": p.payload.get("text", "") if p.payload else {},
}
for p in results.points
],
indent=2,
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/qdrant_vector_search_tool/qdrant_search_tool.py",
"license": "MIT License",
"lines": 121,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/rag/rag_tool.py | from abc import ABC, abstractmethod
from typing import Any, Literal, cast
from crewai.rag.core.base_embeddings_callable import EmbeddingFunction
from crewai.rag.embeddings.factory import build_embedder
from crewai.rag.embeddings.types import ProviderSpec
from crewai.tools import BaseTool
from pydantic import (
BaseModel,
ConfigDict,
Field,
TypeAdapter,
ValidationError,
field_validator,
model_validator,
)
from typing_extensions import Self, Unpack
from crewai_tools.tools.rag.types import (
AddDocumentParams,
ContentItem,
RagToolConfig,
VectorDbConfig,
)
def _validate_embedding_config(
value: dict[str, Any] | ProviderSpec,
) -> dict[str, Any] | ProviderSpec:
"""Validate embedding config and provide clearer error messages for union validation.
This pre-validator catches Pydantic ValidationErrors from the ProviderSpec union
and provides a cleaner, more focused error message that only shows the relevant
provider's validation errors instead of all 18 union members.
Args:
value: The embedding configuration dictionary or validated ProviderSpec.
Returns:
A validated ProviderSpec instance, or the original value if already validated
or missing required fields.
Raises:
ValueError: If the configuration is invalid for the specified provider.
"""
if not isinstance(value, dict):
return value
provider = value.get("provider")
if not provider:
return value
try:
type_adapter: TypeAdapter[ProviderSpec] = TypeAdapter(ProviderSpec)
return type_adapter.validate_python(value)
except ValidationError as e:
provider_key = f"{provider.lower()}providerspec"
provider_errors = [
err for err in e.errors() if provider_key in str(err.get("loc", "")).lower()
]
if provider_errors:
error_msgs = []
for err in provider_errors:
loc_parts = err["loc"]
if str(loc_parts[0]).lower() == provider_key:
loc_parts = loc_parts[1:]
loc = ".".join(str(x) for x in loc_parts)
error_msgs.append(f" - {loc}: {err['msg']}")
raise ValueError(
f"Invalid configuration for embedding provider '{provider}':\n"
+ "\n".join(error_msgs)
) from e
raise
class Adapter(BaseModel, ABC):
"""Abstract base class for RAG adapters."""
model_config = ConfigDict(arbitrary_types_allowed=True)
@abstractmethod
def query(
self,
question: str,
similarity_threshold: float | None = None,
limit: int | None = None,
) -> str:
"""Query the knowledge base with a question and return the answer."""
@abstractmethod
def add(
self,
*args: ContentItem,
**kwargs: Unpack[AddDocumentParams],
) -> None:
"""Add content to the knowledge base."""
class RagTool(BaseTool):
class _AdapterPlaceholder(Adapter):
def query(
self,
question: str,
similarity_threshold: float | None = None,
limit: int | None = None,
) -> str:
raise NotImplementedError
def add(
self,
*args: ContentItem,
**kwargs: Unpack[AddDocumentParams],
) -> None:
raise NotImplementedError
name: str = "Knowledge base"
description: str = "A knowledge base that can be used to answer questions."
summarize: bool = False
similarity_threshold: float = 0.6
limit: int = 5
collection_name: str = "rag_tool_collection"
adapter: Adapter = Field(default_factory=_AdapterPlaceholder)
config: RagToolConfig = Field(
default_factory=RagToolConfig,
description="Configuration format accepted by RagTool.",
)
@field_validator("config", mode="before")
@classmethod
def _validate_config(cls, value: Any) -> Any:
"""Validate config with improved error messages for embedding providers."""
if not isinstance(value, dict):
return value
embedding_model = value.get("embedding_model")
if embedding_model:
try:
value["embedding_model"] = _validate_embedding_config(embedding_model)
except ValueError:
raise
return value
@model_validator(mode="after")
def _ensure_adapter(self) -> Self:
if isinstance(self.adapter, RagTool._AdapterPlaceholder):
from crewai_tools.adapters.crewai_rag_adapter import CrewAIRagAdapter
provider_cfg = self._parse_config(self.config)
self.adapter = CrewAIRagAdapter(
collection_name=self.collection_name,
summarize=self.summarize,
similarity_threshold=self.similarity_threshold,
limit=self.limit,
config=provider_cfg,
)
return self
def _parse_config(self, config: RagToolConfig) -> Any:
"""Normalize the RagToolConfig into a provider-specific config object.
Defaults to 'chromadb' with no extra provider config if none is supplied.
"""
if not config:
return self._create_provider_config("chromadb", {}, None)
vectordb_cfg = cast(VectorDbConfig, config.get("vectordb", {}))
provider: Literal["chromadb", "qdrant"] = vectordb_cfg.get(
"provider", "chromadb"
)
provider_config: dict[str, Any] = vectordb_cfg.get("config", {})
supported = ("chromadb", "qdrant")
if provider not in supported:
raise ValueError(
f"Unsupported vector database provider: '{provider}'. "
f"CrewAI RAG currently supports: {', '.join(supported)}."
)
embedding_spec: ProviderSpec | None = config.get("embedding_model")
if embedding_spec:
embedding_spec = cast(
ProviderSpec, _validate_embedding_config(embedding_spec)
)
embedding_function = build_embedder(embedding_spec) if embedding_spec else None
return self._create_provider_config(
provider, provider_config, embedding_function
)
@staticmethod
def _create_provider_config(
provider: Literal["chromadb", "qdrant"],
provider_config: dict[str, Any],
embedding_function: EmbeddingFunction[Any] | None,
) -> Any:
"""Instantiate provider config with optional embedding_function injected."""
if provider == "chromadb":
from crewai.rag.chromadb.config import ChromaDBConfig
kwargs = dict(provider_config)
if embedding_function is not None:
kwargs["embedding_function"] = embedding_function
return ChromaDBConfig(**kwargs)
if provider == "qdrant":
from crewai.rag.qdrant.config import QdrantConfig
kwargs = dict(provider_config)
if embedding_function is not None:
kwargs["embedding_function"] = embedding_function
return QdrantConfig(**kwargs)
raise ValueError(f"Unhandled provider: {provider}")
def add(
self,
*args: ContentItem,
**kwargs: Unpack[AddDocumentParams],
) -> None:
"""Add content to the knowledge base.
Args:
*args: Content items to add (strings, paths, or document dicts)
data_type: DataType enum or string (e.g., "file", "pdf_file", "text")
path: Path to file or directory, alias to positional arg
file_path: Alias for path
metadata: Additional metadata to attach to documents
url: URL to fetch content from
website: Website URL to scrape
github_url: GitHub repository URL
youtube_url: YouTube video URL
directory_path: Path to directory
Examples:
rag_tool.add("path/to/document.pdf", data_type=DataType.PDF_FILE)
# Keyword argument (documented API)
rag_tool.add(path="path/to/document.pdf", data_type="file")
rag_tool.add(file_path="path/to/document.pdf", data_type="pdf_file")
# Auto-detect type from extension
rag_tool.add("path/to/document.pdf") # auto-detects PDF
"""
self.adapter.add(*args, **kwargs)
def _run(
self,
query: str,
similarity_threshold: float | None = None,
limit: int | None = None,
) -> str:
threshold = (
similarity_threshold
if similarity_threshold is not None
else self.similarity_threshold
)
result_limit = limit if limit is not None else self.limit
return f"Relevant Content:\n{self.adapter.query(query, similarity_threshold=threshold, limit=result_limit)}"
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/rag/rag_tool.py",
"license": "MIT License",
"lines": 218,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py | import os
from typing import Any
from crewai.tools import BaseTool
from pydantic import BaseModel, Field
import requests
try:
from bs4 import BeautifulSoup
BEAUTIFULSOUP_AVAILABLE = True
except ImportError:
BEAUTIFULSOUP_AVAILABLE = False
class FixedScrapeElementFromWebsiteToolSchema(BaseModel):
"""Input for ScrapeElementFromWebsiteTool."""
class ScrapeElementFromWebsiteToolSchema(FixedScrapeElementFromWebsiteToolSchema):
"""Input for ScrapeElementFromWebsiteTool."""
website_url: str = Field(..., description="Mandatory website url to read the file")
css_element: str = Field(
...,
description="Mandatory css reference for element to scrape from the website",
)
class ScrapeElementFromWebsiteTool(BaseTool):
name: str = "Read a website content"
description: str = "A tool that can be used to read a website content."
args_schema: type[BaseModel] = ScrapeElementFromWebsiteToolSchema
website_url: str | None = None
cookies: dict | None = None
css_element: str | None = None
headers: dict | None = Field(
default_factory=lambda: {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Language": "en-US,en;q=0.9",
"Referer": "https://www.google.com/",
"Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1",
"Accept-Encoding": "gzip, deflate, br",
}
)
def __init__(
self,
website_url: str | None = None,
cookies: dict | None = None,
css_element: str | None = None,
**kwargs,
):
super().__init__(**kwargs)
if website_url is not None:
self.website_url = website_url
self.css_element = css_element
self.description = (
f"A tool that can be used to read {website_url}'s content."
)
self.args_schema = FixedScrapeElementFromWebsiteToolSchema
self._generate_description()
if cookies is not None:
self.cookies = {cookies["name"]: os.getenv(cookies["value"])}
def _run(
self,
**kwargs: Any,
) -> Any:
if not BEAUTIFULSOUP_AVAILABLE:
raise ImportError(
"beautifulsoup4 is not installed. Please install it with `pip install crewai-tools[beautifulsoup4]`"
)
website_url = kwargs.get("website_url", self.website_url)
css_element = kwargs.get("css_element", self.css_element)
if website_url is None or css_element is None:
raise ValueError("Both website_url and css_element must be provided.")
page = requests.get(
website_url,
headers=self.headers,
cookies=self.cookies if self.cookies else {},
timeout=30,
)
parsed = BeautifulSoup(page.content, "html.parser")
elements = parsed.select(css_element)
return "\n".join([element.get_text() for element in elements])
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/scrape_element_from_website/scrape_element_from_website.py",
"license": "MIT License",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py | import os
import re
from typing import Any
from pydantic import Field
import requests
try:
from bs4 import BeautifulSoup
BEAUTIFULSOUP_AVAILABLE = True
except ImportError:
BEAUTIFULSOUP_AVAILABLE = False
from crewai.tools import BaseTool
from pydantic import BaseModel
class FixedScrapeWebsiteToolSchema(BaseModel):
"""Input for ScrapeWebsiteTool."""
class ScrapeWebsiteToolSchema(FixedScrapeWebsiteToolSchema):
"""Input for ScrapeWebsiteTool."""
website_url: str = Field(..., description="Mandatory website url to read the file")
class ScrapeWebsiteTool(BaseTool):
name: str = "Read website content"
description: str = "A tool that can be used to read a website content."
args_schema: type[BaseModel] = ScrapeWebsiteToolSchema
website_url: str | None = None
cookies: dict | None = None
headers: dict | None = Field(
default_factory=lambda: {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Language": "en-US,en;q=0.9",
"Referer": "https://www.google.com/",
"Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1",
}
)
def __init__(
self,
website_url: str | None = None,
cookies: dict | None = None,
**kwargs,
):
super().__init__(**kwargs)
if not BEAUTIFULSOUP_AVAILABLE:
raise ImportError(
"beautifulsoup4 is not installed. Please install it with `pip install crewai-tools[beautifulsoup4]`"
)
if website_url is not None:
self.website_url = website_url
self.description = (
f"A tool that can be used to read {website_url}'s content."
)
self.args_schema = FixedScrapeWebsiteToolSchema
self._generate_description()
if cookies is not None:
self.cookies = {cookies["name"]: os.getenv(cookies["value"])}
def _run(
self,
**kwargs: Any,
) -> Any:
website_url: str | None = kwargs.get("website_url", self.website_url)
if website_url is None:
raise ValueError("Website URL must be provided.")
page = requests.get(
website_url,
timeout=15,
headers=self.headers,
cookies=self.cookies if self.cookies else {},
)
page.encoding = page.apparent_encoding
parsed = BeautifulSoup(page.text, "html.parser")
text = "The following text is scraped website content:\n\n"
text += parsed.get_text(" ")
text = re.sub("[ \t]+", " ", text)
return re.sub("\\s+\n\\s+", "\n", text)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/scrape_website_tool/scrape_website_tool.py",
"license": "MIT License",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py | from __future__ import annotations
import os
from typing import TYPE_CHECKING, Any
from urllib.parse import urlparse
from crewai.tools import BaseTool, EnvVar
from pydantic import BaseModel, ConfigDict, Field, field_validator
# Type checking import
if TYPE_CHECKING:
from scrapegraph_py import Client # type: ignore[import-untyped]
class ScrapegraphError(Exception):
"""Base exception for Scrapegraph-related errors."""
class RateLimitError(ScrapegraphError):
"""Raised when API rate limits are exceeded."""
class FixedScrapegraphScrapeToolSchema(BaseModel):
"""Input for ScrapegraphScrapeTool when website_url is fixed."""
class ScrapegraphScrapeToolSchema(FixedScrapegraphScrapeToolSchema):
"""Input for ScrapegraphScrapeTool."""
website_url: str = Field(..., description="Mandatory website url to scrape")
user_prompt: str = Field(
default="Extract the main content of the webpage",
description="Prompt to guide the extraction of content",
)
@field_validator("website_url")
@classmethod
def validate_url(cls, v):
"""Validate URL format."""
try:
result = urlparse(v)
if not all([result.scheme, result.netloc]):
raise ValueError
return v
except Exception as e:
raise ValueError(
"Invalid URL format. URL must include scheme (http/https) and domain"
) from e
class ScrapegraphScrapeTool(BaseTool):
"""A tool that uses Scrapegraph AI to intelligently scrape website content.
Raises:
ValueError: If API key is missing or URL format is invalid
RateLimitError: If API rate limits are exceeded
RuntimeError: If scraping operation fails
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
name: str = "Scrapegraph website scraper"
description: str = (
"A tool that uses Scrapegraph AI to intelligently scrape website content."
)
args_schema: type[BaseModel] = ScrapegraphScrapeToolSchema
website_url: str | None = None
user_prompt: str | None = None
api_key: str | None = None
enable_logging: bool = False
_client: Client | None = None
package_dependencies: list[str] = Field(default_factory=lambda: ["scrapegraph-py"])
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="SCRAPEGRAPH_API_KEY",
description="API key for Scrapegraph AI services",
required=False,
),
]
)
def __init__(
self,
website_url: str | None = None,
user_prompt: str | None = None,
api_key: str | None = None,
enable_logging: bool = False,
**kwargs,
):
super().__init__(**kwargs)
try:
from scrapegraph_py import Client # type: ignore[import-not-found]
from scrapegraph_py.logger import ( # type: ignore[import-not-found]
sgai_logger,
)
except ImportError:
import click
if click.confirm(
"You are missing the 'scrapegraph-py' package. Would you like to install it?"
):
import subprocess
subprocess.run(["uv", "add", "scrapegraph-py"], check=True) # noqa: S607
from scrapegraph_py import Client # type: ignore[import-untyped]
from scrapegraph_py.logger import ( # type: ignore[import-untyped]
sgai_logger,
)
else:
raise ImportError(
"`scrapegraph-py` package not found, please run `uv add scrapegraph-py`"
) from None
self.api_key = api_key or os.getenv("SCRAPEGRAPH_API_KEY")
self._client = Client(api_key=self.api_key)
if not self.api_key:
raise ValueError("Scrapegraph API key is required")
if website_url is not None:
self._validate_url(website_url)
self.website_url = website_url
self.description = f"A tool that uses Scrapegraph AI to intelligently scrape {website_url}'s content."
self.args_schema = FixedScrapegraphScrapeToolSchema
if user_prompt is not None:
self.user_prompt = user_prompt
# Configure logging only if enabled
if self.enable_logging:
sgai_logger.set_logging(level="INFO")
@staticmethod
def _validate_url(url: str) -> None:
"""Validate URL format."""
try:
result = urlparse(url)
if not all([result.scheme, result.netloc]):
raise ValueError
except Exception as e:
raise ValueError(
"Invalid URL format. URL must include scheme (http/https) and domain"
) from e
def _handle_api_response(self, response: dict) -> str:
"""Handle and validate API response."""
if not response:
raise RuntimeError("Empty response from Scrapegraph API")
if "error" in response:
error_msg = response.get("error", {}).get("message", "Unknown error")
if "rate limit" in error_msg.lower():
raise RateLimitError(f"Rate limit exceeded: {error_msg}")
raise RuntimeError(f"API error: {error_msg}")
if "result" not in response:
raise RuntimeError("Invalid response format from Scrapegraph API")
return response["result"]
def _run(
self,
**kwargs: Any,
) -> Any:
website_url = kwargs.get("website_url", self.website_url)
user_prompt = (
kwargs.get("user_prompt", self.user_prompt)
or "Extract the main content of the webpage"
)
if not website_url:
raise ValueError("website_url is required")
# Validate URL format
self._validate_url(website_url)
try:
# Make the SmartScraper request
if self._client is None:
raise RuntimeError("Client not initialized")
return self._client.smartscraper(
website_url=website_url,
user_prompt=user_prompt,
)
except RateLimitError:
raise # Re-raise rate limit errors
except Exception as e:
raise RuntimeError(f"Scraping failed: {e!s}") from e
finally:
# Always close the client
if self._client is not None:
self._client.close()
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/scrapegraph_scrape_tool/scrapegraph_scrape_tool.py",
"license": "MIT License",
"lines": 158,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py | import logging
import os
from typing import Any, Literal
from crewai.tools import BaseTool, EnvVar
from pydantic import BaseModel, Field
logger = logging.getLogger(__file__)
class ScrapflyScrapeWebsiteToolSchema(BaseModel):
url: str = Field(description="Webpage URL")
scrape_format: Literal["raw", "markdown", "text"] | None = Field(
default="markdown", description="Webpage extraction format"
)
scrape_config: dict[str, Any] | None = Field(
default=None, description="Scrapfly request scrape config"
)
ignore_scrape_failures: bool | None = Field(
default=None, description="whether to ignore failures"
)
class ScrapflyScrapeWebsiteTool(BaseTool):
name: str = "Scrapfly web scraping API tool"
description: str = (
"Scrape a webpage url using Scrapfly and return its content as markdown or text"
)
args_schema: type[BaseModel] = ScrapflyScrapeWebsiteToolSchema
api_key: str | None = None
scrapfly: Any | None = None
package_dependencies: list[str] = Field(default_factory=lambda: ["scrapfly-sdk"])
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="SCRAPFLY_API_KEY",
description="API key for Scrapfly",
required=True,
),
]
)
def __init__(self, api_key: str):
super().__init__(
name="Scrapfly web scraping API tool",
description="Scrape a webpage url using Scrapfly and return its content as markdown or text",
)
try:
from scrapfly import ScrapflyClient # type: ignore[import-untyped]
except ImportError:
import click
if click.confirm(
"You are missing the 'scrapfly-sdk' package. Would you like to install it?"
):
import subprocess
subprocess.run(["uv", "add", "scrapfly-sdk"], check=True) # noqa: S607
else:
raise ImportError(
"`scrapfly-sdk` package not found, please run `uv add scrapfly-sdk`"
) from None
self.scrapfly = ScrapflyClient(key=api_key or os.getenv("SCRAPFLY_API_KEY"))
def _run(
self,
url: str,
scrape_format: str = "markdown",
scrape_config: dict[str, Any] | None = None,
ignore_scrape_failures: bool | None = None,
):
from scrapfly import ScrapeApiResponse, ScrapeConfig
scrape_config = scrape_config if scrape_config is not None else {}
try:
response: ScrapeApiResponse = self.scrapfly.scrape( # type: ignore[union-attr]
ScrapeConfig(url, format=scrape_format, **scrape_config)
)
return response.scrape_result["content"]
except Exception as e:
if ignore_scrape_failures:
logger.error(f"Error fetching data from {url}, exception: {e}")
return None
raise e
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/scrapfly_scrape_website_tool/scrapfly_scrape_website_tool.py",
"license": "MIT License",
"lines": 73,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py | import re
import time
from typing import Any
from urllib.parse import urlparse
from crewai.tools import BaseTool
from pydantic import BaseModel, Field, field_validator
class FixedSeleniumScrapingToolSchema(BaseModel):
"""Input for SeleniumScrapingTool."""
class SeleniumScrapingToolSchema(FixedSeleniumScrapingToolSchema):
"""Input for SeleniumScrapingTool."""
website_url: str = Field(
...,
description="Mandatory website url to read the file. Must start with http:// or https://",
)
css_element: str = Field(
...,
description="Mandatory css reference for element to scrape from the website",
)
@field_validator("website_url")
@classmethod
def validate_website_url(cls, v):
if not v:
raise ValueError("Website URL cannot be empty")
if len(v) > 2048: # Common maximum URL length
raise ValueError("URL is too long (max 2048 characters)")
if not re.match(r"^https?://", v):
raise ValueError("URL must start with http:// or https://")
try:
result = urlparse(v)
if not all([result.scheme, result.netloc]):
raise ValueError("Invalid URL format")
except Exception as e:
raise ValueError(f"Invalid URL: {e!s}") from e
if re.search(r"\s", v):
raise ValueError("URL cannot contain whitespace")
return v
class SeleniumScrapingTool(BaseTool):
name: str = "Read a website content"
description: str = "A tool that can be used to read a website content."
args_schema: type[BaseModel] = SeleniumScrapingToolSchema
website_url: str | None = None
driver: Any | None = None
cookie: dict | None = None
wait_time: int | None = 3
css_element: str | None = None
return_html: bool | None = False
_by: Any | None = None
package_dependencies: list[str] = Field(
default_factory=lambda: ["selenium", "webdriver-manager"]
)
def __init__(
self,
website_url: str | None = None,
cookie: dict | None = None,
css_element: str | None = None,
**kwargs,
):
super().__init__(**kwargs)
try:
from selenium import webdriver # type: ignore[import-not-found]
from selenium.webdriver.chrome.options import ( # type: ignore[import-not-found]
Options,
)
from selenium.webdriver.common.by import ( # type: ignore[import-not-found]
By,
)
except ImportError:
import click
if click.confirm(
"You are missing the 'selenium' and 'webdriver-manager' packages. Would you like to install it?"
):
import subprocess
subprocess.run(
["uv", "pip", "install", "selenium", "webdriver-manager"], # noqa: S607
check=True,
)
from selenium import webdriver # type: ignore[import-not-found]
from selenium.webdriver.chrome.options import ( # type: ignore[import-not-found]
Options,
)
from selenium.webdriver.common.by import ( # type: ignore[import-not-found]
By,
)
else:
raise ImportError(
"`selenium` and `webdriver-manager` package not found, please run `uv add selenium webdriver-manager`"
) from None
if "driver" not in kwargs:
if "options" not in kwargs:
options: Options = Options()
options.add_argument("--headless")
else:
options = kwargs["options"]
self.driver = webdriver.Chrome(options=options)
else:
self.driver = kwargs["driver"]
self._by = By
if cookie is not None:
self.cookie = cookie
if css_element is not None:
self.css_element = css_element
if website_url is not None:
self.website_url = website_url
self.description = (
f"A tool that can be used to read {website_url}'s content."
)
self.args_schema = FixedSeleniumScrapingToolSchema
self._generate_description()
def _run(
self,
**kwargs: Any,
) -> Any:
website_url = kwargs.get("website_url", self.website_url)
css_element = kwargs.get("css_element", self.css_element)
return_html = kwargs.get("return_html", self.return_html)
try:
self._make_request(website_url, self.cookie, self.wait_time)
content = self._get_content(css_element, return_html)
return "\n".join(content)
except Exception as e:
return f"Error scraping website: {e!s}"
finally:
if self.driver is not None:
self.driver.close()
def _get_content(self, css_element, return_html):
content = []
if self._is_css_element_empty(css_element):
content.append(self._get_body_content(return_html))
else:
content.extend(self._get_elements_content(css_element, return_html))
return content
def _is_css_element_empty(self, css_element):
return css_element is None or css_element.strip() == ""
def _get_body_content(self, return_html):
body_element = self.driver.find_element(self._by.TAG_NAME, "body")
return (
body_element.get_attribute("outerHTML")
if return_html
else body_element.text
)
def _get_elements_content(self, css_element, return_html):
elements_content = []
for element in self.driver.find_elements(self._by.CSS_SELECTOR, css_element):
elements_content.append( # noqa: PERF401
element.get_attribute("outerHTML") if return_html else element.text
)
return elements_content
def _make_request(self, url, cookie, wait_time):
if not url:
raise ValueError("URL cannot be empty")
# Validate URL format
if not re.match(r"^https?://", url):
raise ValueError("URL must start with http:// or https://")
self.driver.get(url)
time.sleep(wait_time)
if cookie:
self.driver.add_cookie(cookie)
time.sleep(wait_time)
self.driver.get(url)
time.sleep(wait_time)
def close(self):
self.driver.close()
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/selenium_scraping_tool/selenium_scraping_tool.py",
"license": "MIT License",
"lines": 162,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py | import os
import re
from typing import Any
from crewai.tools import BaseTool, EnvVar
from pydantic import Field
class SerpApiBaseTool(BaseTool):
"""Base class for SerpApi functionality with shared capabilities."""
package_dependencies: list[str] = Field(default_factory=lambda: ["serpapi"])
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="SERPAPI_API_KEY",
description="API key for SerpApi searches",
required=True,
),
]
)
client: Any | None = None
def __init__(self, **kwargs):
super().__init__(**kwargs)
try:
from serpapi import Client # type: ignore
except ImportError:
import click
if click.confirm(
"You are missing the 'serpapi' package. Would you like to install it?"
):
import subprocess
subprocess.run(["uv", "add", "serpapi"], check=True) # noqa: S607
from serpapi import Client # type: ignore[import-untyped]
else:
raise ImportError(
"`serpapi` package not found, please install with `uv add serpapi`"
) from None
api_key = os.getenv("SERPAPI_API_KEY")
if not api_key:
raise ValueError(
"Missing API key, you can get the key from https://serpapi.com/manage-api-key"
)
self.client = Client(api_key=api_key)
def _omit_fields(self, data: dict | list, omit_patterns: list[str]) -> None:
if isinstance(data, dict):
for field in list(data.keys()):
if any(re.compile(p).match(field) for p in omit_patterns):
data.pop(field, None)
else:
if isinstance(data[field], (dict, list)):
self._omit_fields(data[field], omit_patterns)
elif isinstance(data, list):
for item in data:
self._omit_fields(item, omit_patterns)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/serpapi_tool/serpapi_base_tool.py",
"license": "MIT License",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/serpapi_tool/serpapi_google_search_tool.py | from typing import Any
from pydantic import BaseModel, ConfigDict, Field
from crewai_tools.tools.serpapi_tool.serpapi_base_tool import SerpApiBaseTool
try:
from serpapi import HTTPError # type: ignore[import-untyped]
except ImportError:
HTTPError = Any
class SerpApiGoogleSearchToolSchema(BaseModel):
"""Input for Google Search."""
search_query: str = Field(
..., description="Mandatory search query you want to use to Google search."
)
location: str | None = Field(
None, description="Location you want the search to be performed in."
)
class SerpApiGoogleSearchTool(SerpApiBaseTool):
model_config = ConfigDict(
arbitrary_types_allowed=True, validate_assignment=True, frozen=False
)
name: str = "Google Search"
description: str = (
"A tool to perform to perform a Google search with a search_query."
)
args_schema: type[BaseModel] = SerpApiGoogleSearchToolSchema
def _run(
self,
**kwargs: Any,
) -> Any:
try:
results = self.client.search( # type: ignore[union-attr]
{
"q": kwargs.get("search_query"),
"location": kwargs.get("location"),
}
).as_dict()
self._omit_fields(
results,
[
r"search_metadata",
r"search_parameters",
r"serpapi_.+",
r".+_token",
r"displayed_link",
r"pagination",
],
)
return results
except HTTPError as e:
return f"An error occurred: {e!s}. Some parameters may be invalid."
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/serpapi_tool/serpapi_google_search_tool.py",
"license": "MIT License",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/serpapi_tool/serpapi_google_shopping_tool.py | from typing import Any
from pydantic import BaseModel, ConfigDict, Field
from crewai_tools.tools.serpapi_tool.serpapi_base_tool import SerpApiBaseTool
try:
from serpapi import HTTPError # type: ignore[import-untyped]
except ImportError:
HTTPError = Any
class SerpApiGoogleShoppingToolSchema(BaseModel):
"""Input for Google Shopping."""
search_query: str = Field(
..., description="Mandatory search query you want to use to Google shopping."
)
location: str | None = Field(
None, description="Location you want the search to be performed in."
)
class SerpApiGoogleShoppingTool(SerpApiBaseTool):
model_config = ConfigDict(
arbitrary_types_allowed=True, validate_assignment=True, frozen=False
)
name: str = "Google Shopping"
description: str = (
"A tool to perform search on Google shopping with a search_query."
)
args_schema: type[BaseModel] = SerpApiGoogleShoppingToolSchema
def _run(
self,
**kwargs: Any,
) -> Any:
try:
results = self.client.search( # type: ignore[union-attr]
{
"engine": "google_shopping",
"q": kwargs.get("search_query"),
"location": kwargs.get("location"),
}
).as_dict()
self._omit_fields(
results,
[
r"search_metadata",
r"search_parameters",
r"serpapi_.+",
r"filters",
r"pagination",
],
)
return results
except HTTPError as e:
return f"An error occurred: {e!s}. Some parameters may be invalid."
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/serpapi_tool/serpapi_google_shopping_tool.py",
"license": "MIT License",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py | import datetime
import json
import logging
import os
from typing import Any, TypedDict
from crewai.tools import BaseTool, EnvVar
from pydantic import BaseModel, Field
import requests
logger = logging.getLogger(__name__)
class KnowledgeGraph(TypedDict, total=False):
"""Knowledge graph data from search results."""
title: str
type: str
website: str
imageUrl: str
description: str
descriptionSource: str
descriptionLink: str
attributes: dict[str, Any]
class Sitelink(TypedDict):
"""Sitelink data for organic search results."""
title: str
link: str
class OrganicResult(TypedDict, total=False):
"""Organic search result data."""
title: str
link: str
snippet: str
position: int | None
sitelinks: list[Sitelink]
class PeopleAlsoAskResult(TypedDict):
"""People Also Ask result data."""
question: str
snippet: str
title: str
link: str
class RelatedSearchResult(TypedDict):
"""Related search result data."""
query: str
class NewsResult(TypedDict):
"""News search result data."""
title: str
link: str
snippet: str
date: str
source: str
imageUrl: str
class SearchParameters(TypedDict, total=False):
"""Search parameters used for the query."""
q: str
type: str
class FormattedResults(TypedDict, total=False):
"""Formatted search results from Serper API."""
searchParameters: SearchParameters
knowledgeGraph: KnowledgeGraph
organic: list[OrganicResult]
peopleAlsoAsk: list[PeopleAlsoAskResult]
relatedSearches: list[RelatedSearchResult]
news: list[NewsResult]
credits: int
def _save_results_to_file(content: str) -> None:
"""Saves the search results to a file."""
try:
filename = f"search_results_{datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.txt"
with open(filename, "w") as file:
file.write(content)
logger.info(f"Results saved to {filename}")
except IOError as e:
logger.error(f"Failed to save results to file: {e}")
raise
class SerperDevToolSchema(BaseModel):
"""Input for SerperDevTool."""
search_query: str = Field(
..., description="Mandatory search query you want to use to search the internet"
)
class SerperDevTool(BaseTool):
name: str = "Search the internet with Serper"
description: str = (
"A tool that can be used to search the internet with a search_query. "
"Supports different search types: 'search' (default), 'news'"
)
args_schema: type[BaseModel] = SerperDevToolSchema
base_url: str = "https://google.serper.dev"
n_results: int = 10
save_file: bool = False
search_type: str = "search"
country: str | None = ""
location: str | None = ""
locale: str | None = ""
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="SERPER_API_KEY", description="API key for Serper", required=True
),
]
)
def _get_search_url(self, search_type: str) -> str:
"""Get the appropriate endpoint URL based on search type."""
search_type = search_type.lower()
allowed_search_types = ["search", "news"]
if search_type not in allowed_search_types:
raise ValueError(
f"Invalid search type: {search_type}. Must be one of: {', '.join(allowed_search_types)}"
)
return f"{self.base_url}/{search_type}"
@staticmethod
def _process_knowledge_graph(kg: dict[str, Any]) -> KnowledgeGraph:
"""Process knowledge graph data from search results."""
return {
"title": kg.get("title", ""),
"type": kg.get("type", ""),
"website": kg.get("website", ""),
"imageUrl": kg.get("imageUrl", ""),
"description": kg.get("description", ""),
"descriptionSource": kg.get("descriptionSource", ""),
"descriptionLink": kg.get("descriptionLink", ""),
"attributes": kg.get("attributes", {}),
}
def _process_organic_results(
self, organic_results: list[dict[str, Any]]
) -> list[OrganicResult]:
"""Process organic search results."""
processed_results: list[OrganicResult] = []
for result in organic_results[: self.n_results]:
try:
result_data: OrganicResult = { # type: ignore[typeddict-item]
"title": result["title"],
"link": result["link"],
"snippet": result.get("snippet", ""),
"position": result.get("position"),
}
if "sitelinks" in result:
result_data["sitelinks"] = [ # type: ignore[typeddict-unknown-key]
{
"title": sitelink.get("title", ""),
"link": sitelink.get("link", ""),
}
for sitelink in result["sitelinks"]
]
processed_results.append(result_data)
except KeyError: # noqa: PERF203
logger.warning(f"Skipping malformed organic result: {result}")
continue
return processed_results # type: ignore[return-value]
def _process_people_also_ask(
self, paa_results: list[dict[str, Any]]
) -> list[PeopleAlsoAskResult]:
"""Process 'People Also Ask' results."""
processed_results: list[PeopleAlsoAskResult] = []
for result in paa_results[: self.n_results]:
try:
result_data: PeopleAlsoAskResult = { # type: ignore[typeddict-item]
"question": result["question"],
"snippet": result.get("snippet", ""),
"title": result.get("title", ""),
"link": result.get("link", ""),
}
processed_results.append(result_data)
except KeyError: # noqa: PERF203
logger.warning(f"Skipping malformed PAA result: {result}")
continue
return processed_results # type: ignore[return-value]
def _process_related_searches(
self, related_results: list[dict[str, Any]]
) -> list[RelatedSearchResult]:
"""Process related search results."""
processed_results: list[RelatedSearchResult] = []
for result in related_results[: self.n_results]:
try:
processed_results.append({"query": result["query"]}) # type: ignore[typeddict-item]
except KeyError: # noqa: PERF203
logger.warning(f"Skipping malformed related search result: {result}")
continue
return processed_results # type: ignore[return-value]
def _process_news_results(
self, news_results: list[dict[str, Any]]
) -> list[NewsResult]:
"""Process news search results."""
processed_results: list[NewsResult] = []
for result in news_results[: self.n_results]:
try:
result_data: NewsResult = { # type: ignore[typeddict-item]
"title": result["title"],
"link": result["link"],
"snippet": result.get("snippet", ""),
"date": result.get("date", ""),
"source": result.get("source", ""),
"imageUrl": result.get("imageUrl", ""),
}
processed_results.append(result_data)
except KeyError: # noqa: PERF203
logger.warning(f"Skipping malformed news result: {result}")
continue
return processed_results # type: ignore[return-value]
def _make_api_request(self, search_query: str, search_type: str) -> dict[str, Any]:
"""Make API request to Serper."""
search_url = self._get_search_url(search_type)
payload = {"q": search_query, "num": self.n_results}
if self.country != "":
payload["gl"] = self.country
if self.location != "":
payload["location"] = self.location
if self.locale != "":
payload["hl"] = self.locale
headers = {
"X-API-KEY": os.environ["SERPER_API_KEY"],
"content-type": "application/json",
}
response = None
try:
response = requests.post(
search_url, headers=headers, json=payload, timeout=10
)
response.raise_for_status()
results = response.json()
if not results:
logger.error("Empty response from Serper API")
raise ValueError("Empty response from Serper API")
return results
except requests.exceptions.RequestException as e:
error_msg = f"Error making request to Serper API: {e}"
if response is not None and hasattr(response, "content"):
error_msg += f"\nResponse content: {response.content.decode('utf-8', errors='replace')}"
logger.error(error_msg)
raise
except json.JSONDecodeError as e:
if response is not None and hasattr(response, "content"):
logger.error(f"Error decoding JSON response: {e}")
logger.error(
f"Response content: {response.content.decode('utf-8', errors='replace')}"
)
else:
logger.error(
f"Error decoding JSON response: {e} (No response content available)"
)
raise
def _process_search_results(
self, results: dict[str, Any], search_type: str
) -> dict[str, Any]:
"""Process search results based on search type."""
formatted_results: dict[str, Any] = {}
if search_type == "search":
if "knowledgeGraph" in results:
formatted_results["knowledgeGraph"] = self._process_knowledge_graph(
results["knowledgeGraph"]
)
if "organic" in results:
formatted_results["organic"] = self._process_organic_results(
results["organic"]
)
if "peopleAlsoAsk" in results:
formatted_results["peopleAlsoAsk"] = self._process_people_also_ask(
results["peopleAlsoAsk"]
)
if "relatedSearches" in results:
formatted_results["relatedSearches"] = self._process_related_searches(
results["relatedSearches"]
)
elif search_type == "news":
if "news" in results:
formatted_results["news"] = self._process_news_results(results["news"])
return formatted_results
def _run(self, **kwargs: Any) -> FormattedResults:
"""Execute the search operation."""
search_query: str | None = kwargs.get("search_query") or kwargs.get("query")
search_type: str = kwargs.get("search_type", self.search_type)
save_file = kwargs.get("save_file", self.save_file)
if not search_query:
raise ValueError("search_query is required")
results = self._make_api_request(search_query, search_type)
formatted_results = {
"searchParameters": {
"q": search_query,
"type": search_type,
**results.get("searchParameters", {}),
}
}
formatted_results.update(self._process_search_results(results, search_type))
formatted_results["credits"] = results.get("credits", 1)
if save_file:
_save_results_to_file(json.dumps(formatted_results, indent=2))
return formatted_results # type: ignore[return-value]
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/serper_dev_tool/serper_dev_tool.py",
"license": "MIT License",
"lines": 282,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/serper_scrape_website_tool/serper_scrape_website_tool.py | import json
import os
from crewai.tools import BaseTool, EnvVar
from pydantic import BaseModel, Field
import requests
class SerperScrapeWebsiteInput(BaseModel):
"""Input schema for SerperScrapeWebsite."""
url: str = Field(..., description="The URL of the website to scrape")
include_markdown: bool = Field(
default=True,
description="Whether to include markdown formatting in the scraped content",
)
class SerperScrapeWebsiteTool(BaseTool):
name: str = "serper_scrape_website"
description: str = (
"Scrapes website content using Serper's scraping API. "
"This tool can extract clean, readable content from any website URL, "
"optionally including markdown formatting for better structure."
)
args_schema: type[BaseModel] = SerperScrapeWebsiteInput
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="SERPER_API_KEY", description="API key for Serper", required=True
),
]
)
def _run(self, url: str, include_markdown: bool = True) -> str:
"""Scrape website content using Serper API.
Args:
url: The URL to scrape
include_markdown: Whether to include markdown formatting
Returns:
Scraped website content as a string
"""
try:
# Serper API endpoint
api_url = "https://scrape.serper.dev"
# Get API key from environment variable for security
api_key = os.getenv("SERPER_API_KEY")
# Prepare the payload
payload = json.dumps({"url": url, "includeMarkdown": include_markdown})
# Set headers
headers = {"X-API-KEY": api_key, "Content-Type": "application/json"}
# Make the API request
response = requests.post(
api_url,
headers=headers,
data=payload,
timeout=30,
)
# Check if request was successful
if response.status_code == 200:
result = response.json()
# Extract the scraped content
if "text" in result:
return result["text"]
return f"Successfully scraped {url}, but no text content found in response: {response.text}"
return (
f"Error scraping {url}: HTTP {response.status_code} - {response.text}"
)
except requests.exceptions.RequestException as e:
return f"Network error while scraping {url}: {e!s}"
except json.JSONDecodeError as e:
return f"Error parsing JSON response while scraping {url}: {e!s}"
except Exception as e:
return f"Unexpected error while scraping {url}: {e!s}"
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/serper_scrape_website_tool/serper_scrape_website_tool.py",
"license": "MIT License",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/serply_api_tool/serply_job_search_tool.py | import os
from urllib.parse import urlencode
from crewai.tools import EnvVar
from pydantic import BaseModel, Field
import requests
from crewai_tools.tools.rag.rag_tool import RagTool
class SerplyJobSearchToolSchema(BaseModel):
"""Input for Job Search."""
search_query: str = Field(
...,
description="Mandatory search query you want to use to fetch jobs postings.",
)
class SerplyJobSearchTool(RagTool):
name: str = "Job Search"
description: str = (
"A tool to perform to perform a job search in the US with a search_query."
)
args_schema: type[BaseModel] = SerplyJobSearchToolSchema
request_url: str = "https://api.serply.io/v1/job/search/"
proxy_location: str | None = "US"
"""
proxy_location: (str): Where to get jobs, specifically for a specific country results.
- Currently only supports US
"""
headers: dict | None = Field(default_factory=dict)
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="SERPLY_API_KEY",
description="API key for Serply services",
required=True,
),
]
)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.headers = {
"X-API-KEY": os.environ["SERPLY_API_KEY"],
"User-Agent": "crew-tools",
"X-Proxy-Location": self.proxy_location,
}
def _run( # type: ignore[override]
self,
query: str | None = None,
search_query: str | None = None,
) -> str:
query_payload = {}
if query is not None:
query_payload["q"] = query
elif search_query is not None:
query_payload["q"] = search_query
# build the url
url = f"{self.request_url}{urlencode(query_payload)}"
response = requests.request("GET", url, headers=self.headers, timeout=30)
jobs = response.json().get("jobs", "")
if not jobs:
return ""
string = []
for job in jobs:
try:
string.append(
"\n".join(
[
f"Position: {job['position']}",
f"Employer: {job['employer']}",
f"Location: {job['location']}",
f"Link: {job['link']}",
f"""Highest: {", ".join([h for h in job["highlights"]])}""",
f"Is Remote: {job['is_remote']}",
f"Is Hybrid: {job['is_remote']}",
"---",
]
)
)
except KeyError: # noqa: PERF203
continue
content = "\n".join(string)
return f"\nSearch results: {content}\n"
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/serply_api_tool/serply_job_search_tool.py",
"license": "MIT License",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/serply_api_tool/serply_news_search_tool.py | import os
from typing import Any
from urllib.parse import urlencode
from crewai.tools import BaseTool, EnvVar
from pydantic import BaseModel, Field
import requests
class SerplyNewsSearchToolSchema(BaseModel):
"""Input for Serply News Search."""
search_query: str = Field(
..., description="Mandatory search query you want to use to fetch news articles"
)
class SerplyNewsSearchTool(BaseTool):
name: str = "News Search"
description: str = "A tool to perform News article search with a search_query."
args_schema: type[BaseModel] = SerplyNewsSearchToolSchema
search_url: str = "https://api.serply.io/v1/news/"
proxy_location: str | None = "US"
headers: dict | None = Field(default_factory=dict)
limit: int | None = 10
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="SERPLY_API_KEY",
description="API key for Serply services",
required=True,
),
]
)
def __init__(
self, limit: int | None = 10, proxy_location: str | None = "US", **kwargs
):
"""param: limit (int): The maximum number of results to return [10-100, defaults to 10]
proxy_location: (str): Where to get news, specifically for a specific country results.
['US', 'CA', 'IE', 'GB', 'FR', 'DE', 'SE', 'IN', 'JP', 'KR', 'SG', 'AU', 'BR'] (defaults to US).
"""
super().__init__(**kwargs)
self.limit = limit
self.proxy_location = proxy_location
self.headers = {
"X-API-KEY": os.environ["SERPLY_API_KEY"],
"User-Agent": "crew-tools",
"X-Proxy-Location": proxy_location,
}
def _run(
self,
**kwargs: Any,
) -> Any:
# build query parameters
query_payload = {}
if "query" in kwargs:
query_payload["q"] = kwargs["query"]
elif "search_query" in kwargs:
query_payload["q"] = kwargs["search_query"]
# build the url
url = f"{self.search_url}{urlencode(query_payload)}"
response = requests.request(
"GET",
url,
headers=self.headers,
timeout=30,
)
results = response.json()
if "entries" in results:
results = results["entries"]
string = []
for result in results[: self.limit]:
try:
# follow url
r = requests.get(
result["link"],
timeout=30,
)
final_link = r.history[-1].headers["Location"]
string.append(
"\n".join(
[
f"Title: {result['title']}",
f"Link: {final_link}",
f"Source: {result['source']['title']}",
f"Published: {result['published']}",
"---",
]
)
)
except KeyError: # noqa: PERF203
continue
content = "\n".join(string)
return f"\nSearch results: {content}\n"
return results
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/serply_api_tool/serply_news_search_tool.py",
"license": "MIT License",
"lines": 89,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/serply_api_tool/serply_scholar_search_tool.py | import os
from typing import Any
from urllib.parse import urlencode
from crewai.tools import BaseTool, EnvVar
from pydantic import BaseModel, Field
import requests
class SerplyScholarSearchToolSchema(BaseModel):
"""Input for Serply Scholar Search."""
search_query: str = Field(
...,
description="Mandatory search query you want to use to fetch scholarly literature",
)
class SerplyScholarSearchTool(BaseTool):
name: str = "Scholar Search"
description: str = (
"A tool to perform scholarly literature search with a search_query."
)
args_schema: type[BaseModel] = SerplyScholarSearchToolSchema
search_url: str = "https://api.serply.io/v1/scholar/"
hl: str | None = "us"
proxy_location: str | None = "US"
headers: dict | None = Field(default_factory=dict)
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="SERPLY_API_KEY",
description="API key for Serply services",
required=True,
),
]
)
def __init__(self, hl: str = "us", proxy_location: str | None = "US", **kwargs):
"""param: hl (str): host Language code to display results in
(reference https://developers.google.com/custom-search/docs/xml_results?hl=en#wsInterfaceLanguages)
proxy_location: (str): Specify the proxy location for the search, specifically for a specific country results.
['US', 'CA', 'IE', 'GB', 'FR', 'DE', 'SE', 'IN', 'JP', 'KR', 'SG', 'AU', 'BR'] (defaults to US).
"""
super().__init__(**kwargs)
self.hl = hl
self.proxy_location = proxy_location
self.headers = {
"X-API-KEY": os.environ["SERPLY_API_KEY"],
"User-Agent": "crew-tools",
"X-Proxy-Location": proxy_location,
}
def _run(
self,
**kwargs: Any,
) -> Any:
query_payload = {"hl": self.hl}
if "query" in kwargs:
query_payload["q"] = kwargs["query"]
elif "search_query" in kwargs:
query_payload["q"] = kwargs["search_query"]
# build the url
url = f"{self.search_url}{urlencode(query_payload)}"
response = requests.request(
"GET",
url,
headers=self.headers,
timeout=30,
)
articles = response.json().get("articles", "")
if not articles:
return ""
string = []
for article in articles:
try:
if "doc" in article:
link = article["doc"]["link"]
else:
link = article["link"]
authors = [author["name"] for author in article["author"]["authors"]]
string.append(
"\n".join(
[
f"Title: {article['title']}",
f"Link: {link}",
f"Description: {article['description']}",
f"Cite: {article['cite']}",
f"Authors: {', '.join(authors)}",
"---",
]
)
)
except KeyError: # noqa: PERF203
continue
content = "\n".join(string)
return f"\nSearch results: {content}\n"
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/serply_api_tool/serply_scholar_search_tool.py",
"license": "MIT License",
"lines": 89,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/serply_api_tool/serply_web_search_tool.py | import os
from typing import Any
from urllib.parse import urlencode
from crewai.tools import BaseTool, EnvVar
from pydantic import BaseModel, Field
import requests
class SerplyWebSearchToolSchema(BaseModel):
"""Input for Serply Web Search."""
search_query: str = Field(
..., description="Mandatory search query you want to use to Google search"
)
class SerplyWebSearchTool(BaseTool):
name: str = "Google Search"
description: str = "A tool to perform Google search with a search_query."
args_schema: type[BaseModel] = SerplyWebSearchToolSchema
search_url: str = "https://api.serply.io/v1/search/"
hl: str | None = "us"
limit: int | None = 10
device_type: str | None = "desktop"
proxy_location: str | None = "US"
query_payload: dict | None = Field(default_factory=dict)
headers: dict | None = Field(default_factory=dict)
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="SERPLY_API_KEY",
description="API key for Serply services",
required=True,
),
]
)
def __init__(
self,
hl: str = "us",
limit: int = 10,
device_type: str = "desktop",
proxy_location: str = "US",
**kwargs,
):
"""param: query (str): The query to search for
param: hl (str): host Language code to display results in
(reference https://developers.google.com/custom-search/docs/xml_results?hl=en#wsInterfaceLanguages)
param: limit (int): The maximum number of results to return [10-100, defaults to 10]
param: device_type (str): desktop/mobile results (defaults to desktop)
proxy_location: (str): Where to perform the search, specifically for local/regional results.
['US', 'CA', 'IE', 'GB', 'FR', 'DE', 'SE', 'IN', 'JP', 'KR', 'SG', 'AU', 'BR'] (defaults to US).
"""
super().__init__(**kwargs)
self.limit = limit
self.device_type = device_type
self.proxy_location = proxy_location
# build query parameters
self.query_payload = {
"num": limit,
"gl": proxy_location.upper(),
"hl": hl.lower(),
}
self.headers = {
"X-API-KEY": os.environ["SERPLY_API_KEY"],
"X-User-Agent": device_type,
"User-Agent": "crew-tools",
"X-Proxy-Location": proxy_location,
}
def _run(
self,
**kwargs: Any,
) -> Any:
if "query" in kwargs:
self.query_payload["q"] = kwargs["query"] # type: ignore[index]
elif "search_query" in kwargs:
self.query_payload["q"] = kwargs["search_query"] # type: ignore[index]
# build the url
url = f"{self.search_url}{urlencode(self.query_payload)}" # type: ignore[arg-type]
response = requests.request(
"GET",
url,
headers=self.headers,
timeout=30,
)
results = response.json()
if "results" in results:
results = results["results"]
string = []
for result in results:
try:
string.append(
"\n".join(
[
f"Title: {result['title']}",
f"Link: {result['link']}",
f"Description: {result['description'].strip()}",
"---",
]
)
)
except KeyError: # noqa: PERF203
continue
content = "\n".join(string)
return f"\nSearch results: {content}\n"
return results
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/serply_api_tool/serply_web_search_tool.py",
"license": "MIT License",
"lines": 100,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py | import os
from typing import Any, Literal
from crewai.tools import EnvVar
from pydantic import BaseModel, Field
import requests
from crewai_tools.tools.rag.rag_tool import RagTool
class SerplyWebpageToMarkdownToolSchema(BaseModel):
"""Input for Serply Search."""
url: str = Field(
...,
description="Mandatory url you want to use to fetch and convert to markdown",
)
class SerplyWebpageToMarkdownTool(RagTool):
name: str = "Webpage to Markdown"
description: str = "A tool to perform convert a webpage to markdown to make it easier for LLMs to understand"
args_schema: type[BaseModel] = SerplyWebpageToMarkdownToolSchema
request_url: str = "https://api.serply.io/v1/request"
proxy_location: Literal[
"US", "CA", "IE", "GB", "FR", "DE", "SE", "IN", "JP", "KR", "SG", "AU", "BR"
] = "US"
headers: dict[str, Any] = Field(
default_factory=lambda: {
"X-API-KEY": os.environ["SERPLY_API_KEY"],
"User-Agent": "crew-tools",
}
)
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="SERPLY_API_KEY",
description="API key for Serply services",
required=True,
),
]
)
def _run( # type: ignore[override]
self,
url: str,
) -> str:
if self.proxy_location and not self.headers.get("X-Proxy-Location"):
self.headers["X-Proxy-Location"] = self.proxy_location
data = {"url": url, "method": "GET", "response_type": "markdown"}
response = requests.request(
"POST",
self.request_url,
headers=self.headers,
json=data,
timeout=30,
)
return response.text
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/serply_api_tool/serply_webpage_to_markdown_tool.py",
"license": "MIT License",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/singlestore_search_tool/singlestore_search_tool.py | from collections.abc import Callable
from typing import Any
from crewai.tools import BaseTool, EnvVar
from pydantic import BaseModel, Field
try:
from singlestoredb import connect
from sqlalchemy.pool import QueuePool
SINGLSTORE_AVAILABLE = True
except ImportError:
SINGLSTORE_AVAILABLE = False
class SingleStoreSearchToolSchema(BaseModel):
"""Input schema for SingleStoreSearchTool.
This schema defines the expected input format for the search tool,
ensuring that only valid SELECT and SHOW queries are accepted.
"""
search_query: str = Field(
...,
description=(
"Mandatory semantic search query you want to use to search the database's content. "
"Only SELECT and SHOW queries are supported."
),
)
class SingleStoreSearchTool(BaseTool):
"""A tool for performing semantic searches on SingleStore database tables.
This tool provides a safe interface for executing SELECT and SHOW queries
against a SingleStore database with connection pooling for optimal performance.
"""
name: str = "Search a database's table(s) content"
description: str = (
"A tool that can be used to semantic search a query from a database."
)
args_schema: type[BaseModel] = SingleStoreSearchToolSchema
package_dependencies: list[str] = Field(
default_factory=lambda: ["singlestoredb", "SQLAlchemy"]
)
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="SINGLESTOREDB_URL",
description="A comprehensive URL string that can encapsulate host, port,"
" username, password, and database information, often used in environments"
" like SingleStore notebooks or specific frameworks."
" For example: 'me:p455w0rd@s2-host.com/my_db'",
required=False,
default=None,
),
EnvVar(
name="SINGLESTOREDB_HOST",
description="Specifies the hostname, IP address, or URL of"
" the SingleStoreDB workspace or cluster",
required=False,
default=None,
),
EnvVar(
name="SINGLESTOREDB_PORT",
description="Defines the port number on which the"
" SingleStoreDB server is listening",
required=False,
default=None,
),
EnvVar(
name="SINGLESTOREDB_USER",
description="Specifies the database user name",
required=False,
default=None,
),
EnvVar(
name="SINGLESTOREDB_PASSWORD",
description="Specifies the database user password",
required=False,
default=None,
),
EnvVar(
name="SINGLESTOREDB_DATABASE",
description="Name of the database to connect to",
required=False,
default=None,
),
EnvVar(
name="SINGLESTOREDB_SSL_KEY",
description="File containing SSL key",
required=False,
default=None,
),
EnvVar(
name="SINGLESTOREDB_SSL_CERT",
description="File containing SSL certificate",
required=False,
default=None,
),
EnvVar(
name="SINGLESTOREDB_SSL_CA",
description="File containing SSL certificate authority",
required=False,
default=None,
),
EnvVar(
name="SINGLESTOREDB_CONNECT_TIMEOUT",
description="The timeout for connecting to the database in seconds",
required=False,
default=None,
),
]
)
connection_args: dict = Field(default_factory=dict)
connection_pool: Any | None = None
def __init__(
self,
tables: list[str] | None = None,
# Basic connection parameters
host: str | None = None,
user: str | None = None,
password: str | None = None,
port: int | None = None,
database: str | None = None,
driver: str | None = None,
# Connection behavior options
pure_python: bool | None = None,
local_infile: bool | None = None,
charset: str | None = None,
# SSL/TLS configuration
ssl_key: str | None = None,
ssl_cert: str | None = None,
ssl_ca: str | None = None,
ssl_disabled: bool | None = None,
ssl_cipher: str | None = None,
ssl_verify_cert: bool | None = None,
tls_sni_servername: str | None = None,
ssl_verify_identity: bool | None = None,
# Advanced connection options
conv: dict[int, Callable[..., Any]] | None = None,
credential_type: str | None = None,
autocommit: bool | None = None,
# Result formatting options
results_type: str | None = None,
buffered: bool | None = None,
results_format: str | None = None,
program_name: str | None = None,
conn_attrs: dict[str, str] | None = None,
# Query execution options
multi_statements: bool | None = None,
client_found_rows: bool | None = None,
connect_timeout: int | None = None,
# Data type handling
nan_as_null: bool | None = None,
inf_as_null: bool | None = None,
encoding_errors: str | None = None,
track_env: bool | None = None,
enable_extended_data_types: bool | None = None,
vector_data_format: str | None = None,
parse_json: bool | None = None,
# Connection pool configuration
pool_size: int | None = 5,
max_overflow: int | None = 10,
timeout: float | None = 30,
**kwargs,
):
"""Initialize the SingleStore search tool.
Args:
tables: List of table names to work with. If empty, all tables will be used.
host: Database host address
user: Database username
password: Database password
port: Database port number
database: Database name
pool_size: Maximum number of connections in the pool
max_overflow: Maximum overflow connections beyond pool_size
timeout: Connection timeout in seconds
**kwargs: Additional arguments passed to the parent class
"""
if conn_attrs is None:
conn_attrs = {}
if tables is None:
tables = []
if not SINGLSTORE_AVAILABLE:
import click
if click.confirm(
"You are missing the 'singlestore' package. Would you like to install it?"
):
import subprocess
try:
subprocess.run(
["uv", "add", "crewai-tools[singlestore]"], # noqa: S607
check=True,
)
except subprocess.CalledProcessError as e:
raise ImportError("Failed to install singlestore package") from e
else:
raise ImportError(
"`singlestore` package not found, please run `uv add crewai-tools[singlestore]`"
)
# Set the data type for the parent class
kwargs["data_type"] = "singlestore"
super().__init__(**kwargs)
# Build connection arguments dictionary with sensible defaults
self.connection_args = {
# Basic connection parameters
"host": host,
"user": user,
"password": password,
"port": port,
"database": database,
"driver": driver,
# Connection behavior
"pure_python": pure_python,
"local_infile": local_infile,
"charset": charset,
# SSL/TLS settings
"ssl_key": ssl_key,
"ssl_cert": ssl_cert,
"ssl_ca": ssl_ca,
"ssl_disabled": ssl_disabled,
"ssl_cipher": ssl_cipher,
"ssl_verify_cert": ssl_verify_cert,
"tls_sni_servername": tls_sni_servername,
"ssl_verify_identity": ssl_verify_identity,
# Advanced options
"conv": conv or {},
"credential_type": credential_type,
"autocommit": autocommit,
# Result formatting
"results_type": results_type,
"buffered": buffered,
"results_format": results_format,
"program_name": program_name,
"conn_attrs": conn_attrs or {},
# Query execution
"multi_statements": multi_statements,
"client_found_rows": client_found_rows,
"connect_timeout": connect_timeout or 10, # Default: 10 seconds
# Data type handling with defaults
"nan_as_null": nan_as_null or False,
"inf_as_null": inf_as_null or False,
"encoding_errors": encoding_errors or "strict",
"track_env": track_env or False,
"enable_extended_data_types": enable_extended_data_types or False,
"vector_data_format": vector_data_format or "binary",
"parse_json": parse_json or True,
}
# Ensure connection attributes are properly initialized
if "conn_attrs" not in self.connection_args or not self.connection_args.get(
"conn_attrs"
):
self.connection_args["conn_attrs"] = dict()
# Add tool identification to connection attributes
self.connection_args["conn_attrs"]["_connector_name"] = (
"crewAI SingleStore Tool"
)
self.connection_args["conn_attrs"]["_connector_version"] = "1.0"
# Initialize connection pool for efficient connection management
self.connection_pool = QueuePool(
creator=self._create_connection, # type: ignore[arg-type]
pool_size=pool_size or 5,
max_overflow=max_overflow or 10,
timeout=timeout or 30.0,
)
# Validate database schema and initialize table information
self._initialize_tables(tables)
def _initialize_tables(self, tables: list[str]) -> None:
"""Initialize and validate the tables that this tool will work with.
Args:
tables: List of table names to validate and use
Raises:
ValueError: If no tables exist or specified tables don't exist
"""
conn = self._get_connection()
try:
with conn.cursor() as cursor:
# Get all existing tables in the database
cursor.execute("SHOW TABLES")
existing_tables = {table[0] for table in cursor.fetchall()}
# Validate that the database has tables
if not existing_tables or len(existing_tables) == 0:
raise ValueError(
"No tables found in the database. "
"Please ensure the database is initialized with the required tables."
)
# Use all tables if none specified
if not tables or len(tables) == 0:
tables = list(existing_tables)
# Build table definitions for description
table_definitions = []
for table in tables:
if table not in existing_tables:
raise ValueError(
f"Table {table} does not exist in the database. "
f"Please ensure the table is created."
)
# Get column information for each table
cursor.execute(f"SHOW COLUMNS FROM {table}")
columns = cursor.fetchall()
column_info = ", ".join(f"{row[0]} {row[1]}" for row in columns)
table_definitions.append(f"{table}({column_info})")
finally:
# Ensure the connection is returned to the pool
conn.close()
# Update the tool description with actual table information
self.description = (
f"A tool that can be used to semantic search a query from a SingleStore "
f"database's {', '.join(table_definitions)} table(s) content."
)
self._generate_description()
def _get_connection(self) -> Any:
"""Get a connection from the connection pool.
Returns:
Connection: A SingleStore database connection
Raises:
Exception: If connection cannot be established
"""
try:
return self.connection_pool.connect() # type: ignore[union-attr]
except Exception:
# Re-raise the exception to be handled by the caller
raise
def _create_connection(self) -> Any:
"""Create a new SingleStore connection.
This method is used by the connection pool to create new connections
when needed.
Returns:
Connection: A new SingleStore database connection
Raises:
Exception: If connection cannot be created
"""
try:
return connect(**self.connection_args)
except Exception:
# Re-raise the exception to be handled by the caller
raise
def _validate_query(self, search_query: str) -> tuple[bool, str]:
"""Validate the search query to ensure it's safe to execute.
Only SELECT and SHOW statements are allowed for security reasons.
Args:
search_query: The SQL query to validate
Returns:
tuple: (is_valid: bool, message: str)
"""
# Check if the input is a string
if not isinstance(search_query, str):
return False, "Search query must be a string."
# Remove leading/trailing whitespace and convert to lowercase for checking
query_lower = search_query.strip().lower()
# Allow only SELECT and SHOW statements
if not (query_lower.startswith(("select", "show"))):
return (
False,
"Only SELECT and SHOW queries are supported for security reasons.",
)
return True, "Valid query"
def _run(self, search_query: str) -> Any:
"""Execute the search query against the SingleStore database.
Args:
search_query: The SQL query to execute
**kwargs: Additional keyword arguments (unused)
Returns:
str: Formatted search results or error message
"""
# Validate the query before execution
valid, message = self._validate_query(search_query)
if not valid:
return f"Invalid search query: {message}"
# Execute the query using a connection from the pool
conn = self._get_connection()
try:
with conn.cursor() as cursor:
try:
# Execute the validated search query
cursor.execute(search_query)
results = cursor.fetchall()
# Handle empty results
if not results:
return "No results found."
# Format the results for readable output
formatted_results = "\n".join(
[", ".join([str(item) for item in row]) for row in results]
)
return f"Search Results:\n{formatted_results}"
except Exception as e:
return f"Error executing search query: {e}"
finally:
# Ensure the connection is returned to the pool
conn.close()
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/singlestore_search_tool/singlestore_search_tool.py",
"license": "MIT License",
"lines": 381,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/snowflake_search_tool/snowflake_search_tool.py | from __future__ import annotations
import asyncio
from concurrent.futures import ThreadPoolExecutor
import logging
from typing import TYPE_CHECKING, Any
from crewai.tools.base_tool import BaseTool
from pydantic import BaseModel, ConfigDict, Field, SecretStr
if TYPE_CHECKING:
# Import types for type checking only
from snowflake.connector.connection import ( # type: ignore[import-not-found]
SnowflakeConnection,
)
from snowflake.connector.errors import ( # type: ignore[import-not-found]
DatabaseError,
OperationalError,
)
try:
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
import snowflake.connector # type: ignore[import-not-found]
SNOWFLAKE_AVAILABLE = True
except ImportError:
SNOWFLAKE_AVAILABLE = False
# Configure logging
logger = logging.getLogger(__name__)
# Cache for query results
_query_cache: dict[str, list[dict[str, Any]]] = {}
class SnowflakeConfig(BaseModel):
"""Configuration for Snowflake connection."""
model_config = ConfigDict(protected_namespaces=())
account: str = Field(
..., description="Snowflake account identifier", pattern=r"^[a-zA-Z0-9\-_]+$"
)
user: str = Field(..., description="Snowflake username")
password: SecretStr | None = Field(None, description="Snowflake password")
private_key_path: str | None = Field(None, description="Path to private key file")
warehouse: str | None = Field(None, description="Snowflake warehouse")
database: str | None = Field(None, description="Default database")
snowflake_schema: str | None = Field(None, description="Default schema")
role: str | None = Field(None, description="Snowflake role")
session_parameters: dict[str, Any] | None = Field(
default_factory=dict, description="Session parameters"
)
@property
def has_auth(self) -> bool:
return bool(self.password or self.private_key_path)
def model_post_init(self, *args, **kwargs):
if not self.has_auth:
raise ValueError("Either password or private_key_path must be provided")
class SnowflakeSearchToolInput(BaseModel):
"""Input schema for SnowflakeSearchTool."""
model_config = ConfigDict(protected_namespaces=())
query: str = Field(..., description="SQL query or semantic search query to execute")
database: str | None = Field(None, description="Override default database")
snowflake_schema: str | None = Field(None, description="Override default schema")
timeout: int | None = Field(300, description="Query timeout in seconds")
class SnowflakeSearchTool(BaseTool):
"""Tool for executing queries and semantic search on Snowflake."""
name: str = "Snowflake Database Search"
description: str = (
"Execute SQL queries or semantic search on Snowflake data warehouse. "
"Supports both raw SQL and natural language queries."
)
args_schema: type[BaseModel] = SnowflakeSearchToolInput
# Define Pydantic fields
config: SnowflakeConfig = Field(
..., description="Snowflake connection configuration"
)
pool_size: int = Field(default=5, description="Size of connection pool")
max_retries: int = Field(default=3, description="Maximum retry attempts")
retry_delay: float = Field(
default=1.0, description="Delay between retries in seconds"
)
enable_caching: bool = Field(
default=True, description="Enable query result caching"
)
model_config = ConfigDict(
arbitrary_types_allowed=True, validate_assignment=True, frozen=False
)
_connection_pool: list[SnowflakeConnection] | None = None
_pool_lock: asyncio.Lock | None = None
_thread_pool: ThreadPoolExecutor | None = None
_model_rebuilt: bool = False
package_dependencies: list[str] = Field(
default_factory=lambda: [
"snowflake-connector-python",
"snowflake-sqlalchemy",
"cryptography",
]
)
def __init__(self, **data):
"""Initialize SnowflakeSearchTool."""
super().__init__(**data)
self._initialize_snowflake()
def _initialize_snowflake(self) -> None:
try:
if SNOWFLAKE_AVAILABLE:
self._connection_pool = []
self._pool_lock = asyncio.Lock()
self._thread_pool = ThreadPoolExecutor(max_workers=self.pool_size)
else:
raise ImportError
except ImportError:
import click
if click.confirm(
"You are missing the 'snowflake-connector-python' package. Would you like to install it?"
):
import subprocess
try:
subprocess.run(
[ # noqa: S607
"uv",
"add",
"cryptography",
"snowflake-connector-python",
"snowflake-sqlalchemy",
],
check=True,
)
self._connection_pool = []
self._pool_lock = asyncio.Lock()
self._thread_pool = ThreadPoolExecutor(max_workers=self.pool_size)
except subprocess.CalledProcessError as e:
raise ImportError("Failed to install Snowflake dependencies") from e
else:
raise ImportError(
"Snowflake dependencies not found. Please install them by running "
"`uv add cryptography snowflake-connector-python snowflake-sqlalchemy`"
) from None
async def _get_connection(self) -> SnowflakeConnection:
"""Get a connection from the pool or create a new one."""
if self._pool_lock is None:
raise RuntimeError("Pool lock not initialized")
if self._connection_pool is None:
raise RuntimeError("Connection pool not initialized")
async with self._pool_lock:
if not self._connection_pool:
conn = await asyncio.get_event_loop().run_in_executor(
self._thread_pool, self._create_connection
)
self._connection_pool.append(conn)
return self._connection_pool.pop()
def _create_connection(self) -> SnowflakeConnection:
"""Create a new Snowflake connection."""
conn_params: dict[str, Any] = {
"account": self.config.account,
"user": self.config.user,
"warehouse": self.config.warehouse,
"database": self.config.database,
"schema": self.config.snowflake_schema,
"role": self.config.role,
"session_parameters": self.config.session_parameters,
}
if self.config.password:
conn_params["password"] = self.config.password.get_secret_value()
elif self.config.private_key_path and serialization:
with open(self.config.private_key_path, "rb") as key_file:
p_key = serialization.load_pem_private_key(
key_file.read(), password=None, backend=default_backend()
)
conn_params["private_key"] = p_key
return snowflake.connector.connect(**conn_params)
def _get_cache_key(self, query: str, timeout: int) -> str:
"""Generate a cache key for the query."""
return f"{self.config.account}:{self.config.database}:{self.config.snowflake_schema}:{query}:{timeout}"
async def _execute_query(
self, query: str, timeout: int = 300
) -> list[dict[str, Any]]:
"""Execute a query with retries and return results."""
if self.enable_caching:
cache_key = self._get_cache_key(query, timeout)
if cache_key in _query_cache:
logger.info("Returning cached result")
return _query_cache[cache_key]
for attempt in range(self.max_retries):
try:
conn = await self._get_connection()
try:
cursor = conn.cursor()
cursor.execute(query, timeout=timeout)
if not cursor.description:
return []
columns = [col[0] for col in cursor.description]
results = [
dict(zip(columns, row, strict=False))
for row in cursor.fetchall()
]
if self.enable_caching:
_query_cache[self._get_cache_key(query, timeout)] = results
return results
finally:
cursor.close()
if (
self._pool_lock is not None
and self._connection_pool is not None
):
async with self._pool_lock:
self._connection_pool.append(conn)
except (DatabaseError, OperationalError) as e: # noqa: PERF203
if attempt == self.max_retries - 1:
raise
await asyncio.sleep(self.retry_delay * (2**attempt))
logger.warning(f"Query failed, attempt {attempt + 1}: {e!s}")
continue
raise RuntimeError("Query failed after all retries")
async def _run(
self,
query: str,
database: str | None = None,
snowflake_schema: str | None = None,
timeout: int = 300,
**kwargs: Any,
) -> Any:
"""Execute the search query."""
try:
# Override database/schema if provided
if database:
await self._execute_query(f"USE DATABASE {database}")
if snowflake_schema:
await self._execute_query(f"USE SCHEMA {snowflake_schema}")
return await self._execute_query(query, timeout)
except Exception as e:
logger.error(f"Error executing query: {e!s}")
raise
def __del__(self):
"""Cleanup connections on deletion."""
try:
if self._connection_pool:
for conn in self._connection_pool:
try:
conn.close()
except Exception: # noqa: PERF203, S110
pass
if self._thread_pool:
self._thread_pool.shutdown()
except Exception: # noqa: S110
pass
try:
# Only rebuild if the class hasn't been initialized yet
if not hasattr(SnowflakeSearchTool, "_model_rebuilt"):
SnowflakeSearchTool.model_rebuild()
SnowflakeSearchTool._model_rebuilt = True
except Exception: # noqa: S110
pass
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/snowflake_search_tool/snowflake_search_tool.py",
"license": "MIT License",
"lines": 244,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/spider_tool/spider_tool.py | import logging
import subprocess
from typing import Any, Literal
from urllib.parse import unquote, urlparse
from crewai.tools import BaseTool, EnvVar
from pydantic import BaseModel, Field
logger = logging.getLogger(__file__)
class SpiderToolSchema(BaseModel):
"""Input schema for SpiderTool."""
website_url: str = Field(
..., description="Mandatory website URL to scrape or crawl"
)
mode: Literal["scrape", "crawl"] = Field(
default="scrape",
description="The mode of the SpiderTool. The only two allowed modes are `scrape` or `crawl`. Crawl mode will follow up to 5 links and return their content in markdown format.",
)
class SpiderToolConfig(BaseModel):
"""Configuration settings for SpiderTool.
Contains all default values and constants used by SpiderTool.
Centralizes configuration management for easier maintenance.
"""
# Crawling settings
DEFAULT_CRAWL_LIMIT: int = 5
DEFAULT_RETURN_FORMAT: str = "markdown"
# Request parameters
DEFAULT_REQUEST_MODE: str = "smart"
FILTER_SVG: bool = True
class SpiderTool(BaseTool):
"""Tool for scraping and crawling websites.
This tool provides functionality to either scrape a single webpage or crawl multiple
pages, returning content in a format suitable for LLM processing.
"""
name: str = "SpiderTool"
description: str = (
"A tool to scrape or crawl a website and return LLM-ready content."
)
args_schema: type[BaseModel] = SpiderToolSchema
custom_params: dict[str, Any] | None = None
website_url: str | None = None
api_key: str | None = None
spider: Any = None
log_failures: bool = True
config: SpiderToolConfig = SpiderToolConfig()
package_dependencies: list[str] = Field(default_factory=lambda: ["spider-client"])
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="SPIDER_API_KEY",
description="API key for Spider.cloud",
required=True,
),
]
)
def __init__(
self,
api_key: str | None = None,
website_url: str | None = None,
custom_params: dict[str, Any] | None = None,
log_failures: bool = True,
**kwargs,
):
"""Initialize SpiderTool for web scraping and crawling.
Args:
api_key (Optional[str]): Spider API key for authentication. Required for production use.
website_url (Optional[str]): Default website URL to scrape/crawl. Can be overridden during execution.
custom_params (Optional[Dict[str, Any]]): Additional parameters to pass to Spider API.
These override any parameters set by the LLM.
log_failures (bool): If True, logs errors. Defaults to True.
**kwargs: Additional arguments passed to BaseTool.
Raises:
ImportError: If spider-client package is not installed.
RuntimeError: If Spider client initialization fails.
"""
super().__init__(**kwargs)
if website_url is not None:
self.website_url = website_url
self.log_failures = log_failures
self.custom_params = custom_params
try:
from spider import Spider # type: ignore
except ImportError:
import click
if click.confirm(
"You are missing the 'spider-client' package. Would you like to install it?"
):
subprocess.run(["uv", "pip", "install", "spider-client"], check=True) # noqa: S607
from spider import Spider # type: ignore[import-untyped]
else:
raise ImportError(
"`spider-client` package not found, please run `uv add spider-client`"
) from None
self.spider = Spider(api_key=api_key)
def _validate_url(self, url: str) -> bool:
"""Validate URL format and security constraints.
Args:
url (str): URL to validate. Must be a properly formatted HTTP(S) URL
Returns:
bool: True if URL is valid and meets security requirements, False otherwise.
"""
try:
url = url.strip()
decoded_url = unquote(url)
result = urlparse(decoded_url)
if not all([result.scheme, result.netloc]):
return False
if result.scheme not in ["http", "https"]:
return False
return True
except Exception:
return False
def _run(
self,
website_url: str,
mode: Literal["scrape", "crawl"] = "scrape",
) -> str | None:
"""Execute the spider tool to scrape or crawl the specified website.
Args:
website_url (str): The URL to process. Must be a valid HTTP(S) URL.
mode (Literal["scrape", "crawl"]): Operation mode.
- "scrape": Extract content from single page
- "crawl": Follow links and extract content from multiple pages
Returns:
Optional[str]: Extracted content in markdown format, or None if extraction fails
and log_failures is True.
Raises:
ValueError: If URL is invalid or missing, or if mode is invalid.
ImportError: If spider-client package is not properly installed.
ConnectionError: If network connection fails while accessing the URL.
Exception: For other runtime errors.
"""
try:
params = {}
url = website_url or self.website_url
if not url:
raise ValueError(
"Website URL must be provided either during initialization or execution"
)
if not self._validate_url(url):
raise ValueError(f"Invalid URL format: {url}")
if mode not in ["scrape", "crawl"]:
raise ValueError(
f"Invalid mode: {mode}. Must be either 'scrape' or 'crawl'"
)
params = {
"request": self.config.DEFAULT_REQUEST_MODE,
"filter_output_svg": self.config.FILTER_SVG,
"return_format": self.config.DEFAULT_RETURN_FORMAT,
}
if mode == "crawl":
params["limit"] = self.config.DEFAULT_CRAWL_LIMIT
if self.custom_params:
params.update(self.custom_params)
action = (
self.spider.scrape_url if mode == "scrape" else self.spider.crawl_url
)
return action(url=url, params=params)
except ValueError as ve:
if self.log_failures:
logger.error(f"Validation error for URL {url}: {ve!s}")
return None
raise ve
except ImportError as ie:
logger.error(f"Spider client import error: {ie!s}")
raise ie
except ConnectionError as ce:
if self.log_failures:
logger.error(f"Connection error while accessing {url}: {ce!s}")
return None
raise ce
except Exception as e:
if self.log_failures:
logger.error(
f"Unexpected error during {mode} operation on {url}: {e!s}"
)
return None
raise e
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/spider_tool/spider_tool.py",
"license": "MIT License",
"lines": 176,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/stagehand_tool/example.py | """
StagehandTool Example
This example demonstrates how to use the StagehandTool in a CrewAI workflow.
It shows how to use the three main primitives: act, extract, and observe.
Prerequisites:
1. A Browserbase account with API key and project ID
2. An LLM API key (OpenAI or Anthropic)
3. Installed dependencies: crewai, crewai-tools, stagehand-py
Usage:
- Set your API keys in environment variables (recommended)
- Or modify the script to include your API keys directly
- Run the script: python stagehand_example.py
"""
import os
from crewai.utilities.printer import Printer
from dotenv import load_dotenv
from stagehand.schemas import AvailableModel # type: ignore[import-untyped]
from crewai import Agent, Crew, Process, Task
from crewai_tools import StagehandTool
_printer = Printer()
# Load environment variables from .env file
load_dotenv()
# Get API keys from environment variables
# You can set these in your shell or in a .env file
browserbase_api_key = os.environ.get("BROWSERBASE_API_KEY")
browserbase_project_id = os.environ.get("BROWSERBASE_PROJECT_ID")
model_api_key = os.environ.get("OPENAI_API_KEY") # or OPENAI_API_KEY
# Initialize the StagehandTool with your credentials and use context manager
with StagehandTool(
api_key=browserbase_api_key, # New parameter naming
project_id=browserbase_project_id, # New parameter naming
model_api_key=model_api_key,
model_name=AvailableModel.GPT_4O, # Using the enum from schemas
) as stagehand_tool:
# Create a web researcher agent with the StagehandTool
researcher = Agent(
role="Web Researcher",
goal="Find and extract information from websites using different Stagehand primitives",
backstory=(
"You are an expert web automation agent equipped with the StagehandTool. "
"Your primary function is to interact with websites based on natural language instructions. "
"You must carefully choose the correct command (`command_type`) for each task:\n"
"- Use 'act' (the default) for general interactions like clicking buttons ('Click the login button'), "
"filling forms ('Fill the form with username user and password pass'), scrolling, or navigating within the site.\n"
"- Use 'navigate' specifically when you need to go to a new web page; you MUST provide the target URL "
"in the `url` parameter along with the instruction (e.g., instruction='Go to Google', url='https://google.com').\n"
"- Use 'extract' when the goal is to pull structured data from the page. Provide a clear `instruction` "
"describing what data to extract (e.g., 'Extract all product names and prices').\n"
"- Use 'observe' to identify and analyze elements on the current page based on an `instruction` "
"(e.g., 'Find all images in the main content area').\n\n"
"Remember to break down complex tasks into simple, sequential steps in your `instruction`. For example, "
"instead of 'Search for OpenAI on Google and click the first result', use multiple steps with the tool:\n"
"1. Use 'navigate' with url='https://google.com'.\n"
"2. Use 'act' with instruction='Type OpenAI in the search bar'.\n"
"3. Use 'act' with instruction='Click the search button'.\n"
"4. Use 'act' with instruction='Click the first search result link for OpenAI'.\n\n"
"Always be precise in your instructions and choose the most appropriate command and parameters (`instruction`, `url`, `command_type`, `selector`) for the task at hand."
),
llm="gpt-4o",
verbose=True,
allow_delegation=False,
tools=[stagehand_tool],
)
# Define a research task that demonstrates all three primitives
research_task = Task(
description=(
"Demonstrate Stagehand capabilities by performing the following steps:\n"
"1. Go to https://www.stagehand.dev\n"
"2. Extract all the text content from the page\n"
"3. Find the Docs link and click on it\n"
"4. Go to https://httpbin.org/forms/post and observe what elements are available on the page\n"
"5. Provide a summary of what you learned about using these different commands"
),
expected_output=(
"A demonstration of all three Stagehand primitives (act, extract, observe) "
"with examples of how each was used and what information was gathered."
),
agent=researcher,
)
# Alternative task: Real research using the primitives
web_research_task = Task(
description=(
"Go to google.com and search for 'Stagehand'.\n"
"Then extract the first search result."
),
expected_output=(
"A summary report about Stagehand's capabilities and pricing, demonstrating how "
"the different primitives can be used together for effective web research."
),
agent=researcher,
)
# Set up the crew
crew = Crew(
agents=[researcher],
tasks=[research_task], # You can switch this to web_research_task if you prefer
verbose=True,
process=Process.sequential,
)
# Run the crew and get the result
result = crew.kickoff()
_printer.print("\n==== RESULTS ====\n", color="cyan")
_printer.print(str(result))
# Resources are automatically cleaned up when exiting the context manager
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/stagehand_tool/example.py",
"license": "MIT License",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py | import asyncio
import json
import os
import re
from typing import Any
from crewai.tools import BaseTool, EnvVar
from pydantic import BaseModel, Field
# Define a flag to track whether stagehand is available
_HAS_STAGEHAND = False
try:
from stagehand import ( # type: ignore[import-untyped]
Stagehand,
StagehandConfig,
StagehandPage,
configure_logging,
)
from stagehand.schemas import ( # type: ignore[import-untyped]
ActOptions,
AvailableModel,
ExtractOptions,
ObserveOptions,
)
_HAS_STAGEHAND = True
except ImportError:
# Define type stubs for when stagehand is not installed
Stagehand = Any
StagehandPage = Any
StagehandConfig = Any
ActOptions = Any
ExtractOptions = Any
ObserveOptions = Any
# Mock configure_logging function
def configure_logging(level=None, remove_logger_name=None, quiet_dependencies=None):
pass
# Define only what's needed for class defaults
class AvailableModel: # type: ignore[no-redef]
CLAUDE_3_7_SONNET_LATEST = "anthropic.claude-3-7-sonnet-20240607"
class StagehandResult(BaseModel):
"""Result from a Stagehand operation.
Attributes:
success: Whether the operation completed successfully
data: The result data from the operation
error: Optional error message if the operation failed
"""
success: bool = Field(
..., description="Whether the operation completed successfully"
)
data: str | dict | list = Field(
..., description="The result data from the operation"
)
error: str | None = Field(
None, description="Optional error message if the operation failed"
)
class StagehandToolSchema(BaseModel):
"""Input for StagehandTool."""
instruction: str | None = Field(
None,
description="Single atomic action with location context. For reliability on complex pages, use ONE specific action with location hints. Good examples: 'Click the search input field in the header', 'Type Italy in the focused field', 'Press Enter', 'Click the first link in the results area'. Avoid combining multiple actions. For 'navigate' command type, this can be omitted if only URL is provided.",
)
url: str | None = Field(
None,
description="The URL to navigate to before executing the instruction. MUST be used with 'navigate' command. ",
)
command_type: str | None = Field(
"act",
description="""The type of command to execute (choose one):
- 'act': Perform an action like clicking buttons, filling forms, etc. (default)
- 'navigate': Specifically navigate to a URL
- 'extract': Extract structured data from the page
- 'observe': Identify and analyze elements on the page
""",
)
class StagehandTool(BaseTool):
"""A tool that uses Stagehand to automate web browser interactions using natural language with atomic action handling.
Stagehand allows AI agents to interact with websites through a browser,
performing actions like clicking buttons, filling forms, and extracting data.
The tool supports four main command types:
1. act - Perform actions like clicking, typing, scrolling, or navigating
2. navigate - Specifically navigate to a URL (shorthand for act with navigation)
3. extract - Extract structured data from web pages
4. observe - Identify and analyze elements on a page
Usage examples:
- Navigate to a website: instruction="Go to the homepage", url="https://example.com"
- Click a button: instruction="Click the login button"
- Fill a form: instruction="Fill the login form with username 'user' and password 'pass'"
- Extract data: instruction="Extract all product prices and names", command_type="extract"
- Observe elements: instruction="Find all navigation menu items", command_type="observe"
- Complex tasks: instruction="Step 1: Navigate to https://example.com; Step 2: Scroll down to the 'Features' section; Step 3: Click 'Learn More'", command_type="act"
Example of breaking down "Search for OpenAI" into multiple steps:
1. First navigation: instruction="Go to Google", url="https://google.com", command_type="navigate"
2. Enter search term: instruction="Type 'OpenAI' in the search box", command_type="act"
3. Submit search: instruction="Press the Enter key or click the search button", command_type="act"
4. Click on result: instruction="Click on the OpenAI website link in the search results", command_type="act"
"""
name: str = "Web Automation Tool"
description: str = """Use this tool to control a web browser and interact with websites using natural language.
Capabilities:
- Navigate to websites and follow links
- Click buttons, links, and other elements
- Fill in forms and input fields
- Search within websites
- Extract information from web pages
- Identify and analyze elements on a page
To use this tool, provide a natural language instruction describing what you want to do.
For reliability on complex pages, use specific, atomic instructions with location hints:
- Good: "Click the search box in the header"
- Good: "Type 'Italy' in the focused field"
- Bad: "Search for Italy and click the first result"
For different types of tasks, specify the command_type:
- 'act': For performing one atomic action (default)
- 'navigate': For navigating to a URL
- 'extract': For getting data from a specific page section
- 'observe': For finding elements in a specific area
"""
args_schema: type[BaseModel] = StagehandToolSchema
package_dependencies: list[str] = Field(default_factory=lambda: ["stagehand<=0.5.9"])
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="BROWSERBASE_API_KEY",
description="API key for Browserbase services",
required=False,
),
EnvVar(
name="BROWSERBASE_PROJECT_ID",
description="Project ID for Browserbase services",
required=False,
),
]
)
# Stagehand configuration
api_key: str | None = None
project_id: str | None = None
model_api_key: str | None = None
model_name: AvailableModel | None = AvailableModel.CLAUDE_3_7_SONNET_LATEST
server_url: str | None = "https://api.stagehand.browserbase.com/v1"
headless: bool = False
dom_settle_timeout_ms: int = 3000
self_heal: bool = True
wait_for_captcha_solves: bool = True
verbose: int = 1
# Token management settings
max_retries_on_token_limit: int = 3
use_simplified_dom: bool = True
# Instance variables
_stagehand: Stagehand | None = None
_page: StagehandPage | None = None
_session_id: str | None = None
_testing: bool = False
def __init__(
self,
api_key: str | None = None,
project_id: str | None = None,
model_api_key: str | None = None,
model_name: str | None = None,
server_url: str | None = None,
session_id: str | None = None,
headless: bool | None = None,
dom_settle_timeout_ms: int | None = None,
self_heal: bool | None = None,
wait_for_captcha_solves: bool | None = None,
verbose: int | None = None,
_testing: bool = False,
**kwargs,
):
# Set testing flag early so that other init logic can rely on it
self._testing = _testing
super().__init__(**kwargs)
# Set up logger
import logging
self._logger = logging.getLogger(__name__)
# Set configuration from parameters or environment
self.api_key = api_key or os.getenv("BROWSERBASE_API_KEY")
self.project_id = project_id or os.getenv("BROWSERBASE_PROJECT_ID")
if model_api_key:
self.model_api_key = model_api_key
if model_name:
self.model_name = model_name
if server_url:
self.server_url = server_url
if headless is not None:
self.headless = headless
if dom_settle_timeout_ms is not None:
self.dom_settle_timeout_ms = dom_settle_timeout_ms
if self_heal is not None:
self.self_heal = self_heal
if wait_for_captcha_solves is not None:
self.wait_for_captcha_solves = wait_for_captcha_solves
if verbose is not None:
self.verbose = verbose
self._session_id = session_id
# Configure logging based on verbosity level
if not self._testing:
log_level = {1: "INFO", 2: "WARNING", 3: "DEBUG"}.get(self.verbose, "ERROR")
configure_logging(
level=log_level, remove_logger_name=True, quiet_dependencies=True
)
self._check_required_credentials()
def _check_required_credentials(self):
"""Validate that required credentials are present."""
if not self._testing and not _HAS_STAGEHAND:
raise ImportError(
"`stagehand` package not found, please run `uv add stagehand`"
)
if not self.api_key:
raise ValueError("api_key is required (or set BROWSERBASE_API_KEY in env).")
if not self.project_id:
raise ValueError(
"project_id is required (or set BROWSERBASE_PROJECT_ID in env)."
)
def __del__(self):
"""Ensure cleanup on deletion."""
try:
self.close()
except Exception: # noqa: S110
pass
def _get_model_api_key(self):
"""Get the appropriate API key based on the model being used."""
# Check model type and get appropriate key
model_str = str(self.model_name)
if "gpt" in model_str.lower():
return self.model_api_key or os.getenv("OPENAI_API_KEY")
if "claude" in model_str.lower() or "anthropic" in model_str.lower():
return self.model_api_key or os.getenv("ANTHROPIC_API_KEY")
if "gemini" in model_str.lower():
return self.model_api_key or os.getenv("GOOGLE_API_KEY")
# Default to trying OpenAI, then Anthropic
return (
self.model_api_key
or os.getenv("OPENAI_API_KEY")
or os.getenv("ANTHROPIC_API_KEY")
)
async def _setup_stagehand(self, session_id: str | None = None):
"""Initialize Stagehand if not already set up."""
# If we're in testing mode, return mock objects
if self._testing:
if not self._stagehand:
# Create mock objects for testing
class MockPage:
async def act(self, options):
mock_result = type("MockResult", (), {})()
mock_result.model_dump = lambda: {
"message": "Action completed successfully"
}
return mock_result
async def goto(self, url):
return None
async def extract(self, options):
mock_result = type("MockResult", (), {})()
mock_result.model_dump = lambda: {"data": "Extracted content"}
return mock_result
async def observe(self, options):
mock_result1 = type(
"MockResult",
(),
{"description": "Test element", "method": "click"},
)()
return [mock_result1]
async def wait_for_load_state(self, state):
return None
class MockStagehand:
def __init__(self):
self.page = MockPage()
self.session_id = "test-session-id"
async def init(self):
return None
async def close(self):
return None
self._stagehand = MockStagehand()
await self._stagehand.init()
self._page = self._stagehand.page
self._session_id = self._stagehand.session_id
return self._stagehand, self._page
# Normal initialization for non-testing mode
if not self._stagehand:
# Get the appropriate API key based on model type
model_api_key = self._get_model_api_key()
if not model_api_key:
raise ValueError(
"No appropriate API key found for model. Please set OPENAI_API_KEY, ANTHROPIC_API_KEY, or GOOGLE_API_KEY"
)
# Build the StagehandConfig with proper parameter names
config = StagehandConfig(
env="BROWSERBASE",
apiKey=self.api_key, # Browserbase API key (camelCase)
projectId=self.project_id, # Browserbase project ID (camelCase)
modelApiKey=model_api_key, # LLM API key - auto-detected based on model
modelName=self.model_name,
apiUrl=self.server_url
if self.server_url
else "https://api.stagehand.browserbase.com/v1",
domSettleTimeoutMs=self.dom_settle_timeout_ms,
selfHeal=self.self_heal,
waitForCaptchaSolves=self.wait_for_captcha_solves,
verbose=self.verbose,
browserbaseSessionID=session_id or self._session_id,
)
# Initialize Stagehand with config
self._stagehand = Stagehand(config=config)
# Initialize the Stagehand instance
await self._stagehand.init()
self._page = self._stagehand.page
self._session_id = self._stagehand.session_id
return self._stagehand, self._page
def _extract_steps(self, instruction: str) -> list[str]:
"""Extract individual steps from multi-step instructions."""
# Check for numbered steps (Step 1:, Step 2:, etc.)
if re.search(r"Step \d+:", instruction, re.IGNORECASE):
steps = re.findall(
r"Step \d+:\s*([^;]+?)(?=Step \d+:|$)",
instruction,
re.IGNORECASE | re.DOTALL,
)
return [step.strip() for step in steps if step.strip()]
# Check for semicolon-separated instructions
if ";" in instruction:
return [step.strip() for step in instruction.split(";") if step.strip()]
return [instruction]
def _simplify_instruction(self, instruction: str) -> str:
"""Simplify complex instructions to basic actions."""
# Extract the core action from complex instructions
instruction_lower = instruction.lower()
if "search" in instruction_lower and "click" in instruction_lower:
# For search tasks, focus on the search action first
if "type" in instruction_lower or "enter" in instruction_lower:
return "click on the search input field"
return "search for content on the page"
if "click" in instruction_lower:
# Extract what to click
if "button" in instruction_lower:
return "click the button"
if "link" in instruction_lower:
return "click the link"
if "search" in instruction_lower:
return "click the search field"
return "click on the element"
if "type" in instruction_lower or "enter" in instruction_lower:
return "type in the input field"
return instruction # Return as-is if can't simplify
async def _async_run(
self,
instruction: str | None = None,
url: str | None = None,
command_type: str = "act",
):
"""Override _async_run with improved atomic action handling."""
# Handle missing instruction based on command type
if not instruction:
if command_type == "navigate" and url:
instruction = f"Navigate to {url}"
elif command_type == "observe":
instruction = "Observe elements on the page"
elif command_type == "extract":
instruction = "Extract information from the page"
else:
instruction = "Perform the requested action"
# For testing mode, return mock result directly without calling parent
if self._testing:
mock_data = {
"message": f"Mock {command_type} completed successfully",
"instruction": instruction,
}
if url:
mock_data["url"] = url
return self._format_result(True, mock_data)
try:
_, page = await self._setup_stagehand(self._session_id)
self._logger.info(
f"Executing {command_type} with instruction: {instruction}"
)
# Get the API key to pass to model operations
model_api_key = self._get_model_api_key()
model_client_options = {"apiKey": model_api_key}
# Always navigate first if URL is provided and we're doing actions
if url and command_type.lower() == "act":
self._logger.info(f"Navigating to {url} before performing actions")
await page.goto(url)
await page.wait_for_load_state("networkidle")
# Small delay to ensure page is fully loaded
await asyncio.sleep(1)
# Process according to command type
if command_type.lower() == "act":
# Extract steps from complex instructions
steps = self._extract_steps(instruction)
self._logger.info(f"Extracted {len(steps)} steps: {steps}")
results = []
for i, step in enumerate(steps):
self._logger.info(f"Executing step {i + 1}/{len(steps)}: {step}")
try:
# Create act options with API key for each step
from stagehand.schemas import ActOptions
act_options = ActOptions(
action=step,
modelName=self.model_name,
domSettleTimeoutMs=self.dom_settle_timeout_ms,
modelClientOptions=model_client_options,
)
result = await page.act(act_options)
results.append(result.model_dump())
# Small delay between steps to let DOM settle
if i < len(steps) - 1: # Don't delay after last step
await asyncio.sleep(0.5)
except Exception as step_error:
error_msg = f"Step failed: {step_error}"
self._logger.warning(f"Step {i + 1} failed: {error_msg}")
# Try with simplified instruction
try:
simplified = self._simplify_instruction(step)
if simplified != step:
self._logger.info(
f"Retrying with simplified instruction: {simplified}"
)
act_options = ActOptions(
action=simplified,
modelName=self.model_name,
domSettleTimeoutMs=self.dom_settle_timeout_ms,
modelClientOptions=model_client_options,
)
result = await page.act(act_options)
results.append(result.model_dump())
else:
# If we can't simplify or retry fails, record the error
results.append({"error": error_msg, "step": step})
except Exception as retry_error:
self._logger.error(f"Retry also failed: {retry_error}")
results.append({"error": str(retry_error), "step": step})
# Return combined results
if len(results) == 1:
# Single step, return as-is
if "error" in results[0]:
return self._format_result(
False, results[0], results[0]["error"]
)
return self._format_result(True, results[0])
# Multiple steps, return all results
has_errors = any("error" in result for result in results)
return self._format_result(not has_errors, {"steps": results})
if command_type.lower() == "navigate":
# For navigation, use the goto method directly
if not url:
error_msg = "No URL provided for navigation. Please provide a URL."
self._logger.error(error_msg)
return self._format_result(False, {}, error_msg)
result = await page.goto(url)
self._logger.info(f"Navigate operation completed to {url}")
return self._format_result(
True,
{
"url": url,
"message": f"Successfully navigated to {url}",
},
)
if command_type.lower() == "extract":
# Create extract options with API key
from stagehand.schemas import ExtractOptions
extract_options = ExtractOptions(
instruction=instruction,
modelName=self.model_name,
domSettleTimeoutMs=self.dom_settle_timeout_ms,
useTextExtract=True,
modelClientOptions=model_client_options, # Add API key here
)
result = await page.extract(extract_options)
self._logger.info(f"Extract operation completed successfully {result}")
return self._format_result(True, result.model_dump())
if command_type.lower() == "observe":
# Create observe options with API key
from stagehand.schemas import ObserveOptions
observe_options = ObserveOptions(
instruction=instruction,
modelName=self.model_name,
onlyVisible=True,
domSettleTimeoutMs=self.dom_settle_timeout_ms,
modelClientOptions=model_client_options, # Add API key here
)
results = await page.observe(observe_options)
# Format the observation results
formatted_results = []
for i, result in enumerate(results):
formatted_results.append(
{
"index": i + 1,
"description": result.description,
"method": result.method,
}
)
self._logger.info(
f"Observe operation completed with {len(formatted_results)} elements found"
)
return self._format_result(True, formatted_results)
error_msg = f"Unknown command type: {command_type}"
self._logger.error(error_msg)
return self._format_result(False, {}, error_msg)
except Exception as e:
error_msg = f"Error using Stagehand: {e!s}"
self._logger.error(f"Operation failed: {error_msg}")
return self._format_result(False, {}, error_msg)
def _format_result(self, success, data, error=None):
"""Helper to format results consistently."""
return StagehandResult(success=success, data=data, error=error)
def _run(
self,
instruction: str | None = None,
url: str | None = None,
command_type: str = "act",
) -> str:
"""Run the Stagehand tool with the given instruction.
Args:
instruction: Natural language instruction for browser automation
url: Optional URL to navigate to before executing the instruction
command_type: Type of command to execute ('act', 'extract', or 'observe')
Returns:
The result of the browser automation task
"""
# Handle missing instruction based on command type
if not instruction:
if command_type == "navigate" and url:
instruction = f"Navigate to {url}"
elif command_type == "observe":
instruction = "Observe elements on the page"
elif command_type == "extract":
instruction = "Extract information from the page"
else:
instruction = "Perform the requested action"
# Create an event loop if we're not already in one
try:
loop = asyncio.get_event_loop()
if loop.is_running():
# We're in an existing event loop, use it
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor() as executor:
future = executor.submit(
asyncio.run, self._async_run(instruction, url, command_type)
)
result = future.result()
else:
# We have a loop but it's not running
result = loop.run_until_complete(
self._async_run(instruction, url, command_type)
)
# Format the result for output
if result.success:
if command_type.lower() == "act":
if isinstance(result.data, dict) and "steps" in result.data:
# Multiple steps
step_messages = []
for i, step in enumerate(result.data["steps"]):
if "error" in step:
step_messages.append(
f"Step {i + 1}: Failed - {step['error']}"
)
else:
step_messages.append(
f"Step {i + 1}: {step.get('message', 'Completed')}"
)
return "\n".join(step_messages)
return f"Action result: {result.data.get('message', 'Completed')}"
if command_type.lower() == "extract":
return f"Extracted data: {json.dumps(result.data, indent=2)}"
if command_type.lower() == "observe":
formatted_results = []
for element in result.data:
formatted_results.append(
f"Element {element['index']}: {element['description']}"
)
if element.get("method"):
formatted_results.append(
f"Suggested action: {element['method']}"
)
return "\n".join(formatted_results)
return json.dumps(result.data, indent=2)
return f"Error: {result.error}"
except RuntimeError:
# No event loop exists, create one
result = asyncio.run(self._async_run(instruction, url, command_type))
if result.success:
if isinstance(result.data, dict):
return json.dumps(result.data, indent=2)
return str(result.data)
return f"Error: {result.error}"
async def _async_close(self):
"""Asynchronously clean up Stagehand resources."""
# Skip for test mode
if self._testing:
self._stagehand = None
self._page = None
return
if self._stagehand:
await self._stagehand.close()
self._stagehand = None
if self._page:
self._page = None
def close(self):
"""Clean up Stagehand resources."""
# Skip actual closing for testing mode
if self._testing:
self._stagehand = None
self._page = None
return
if self._stagehand:
try:
# Handle both synchronous and asynchronous cases
if hasattr(self._stagehand, "close"):
if asyncio.iscoroutinefunction(self._stagehand.close):
try:
loop = asyncio.get_event_loop()
if loop.is_running():
import concurrent.futures
with (
concurrent.futures.ThreadPoolExecutor() as executor
):
future = executor.submit(
asyncio.run, self._async_close()
)
future.result()
else:
loop.run_until_complete(self._async_close())
except RuntimeError:
asyncio.run(self._async_close())
else:
# Handle non-async close method (for mocks)
self._stagehand.close()
except Exception: # noqa: S110
# Log but don't raise - we're cleaning up
pass
self._stagehand = None
if self._page:
self._page = None
def __enter__(self):
"""Enter the context manager."""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Exit the context manager and clean up resources."""
self.close()
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/stagehand_tool/stagehand_tool.py",
"license": "MIT License",
"lines": 634,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/tavily_extractor_tool/tavily_extractor_tool.py | import json
import os
from typing import Any, Literal
from crewai.tools import BaseTool, EnvVar
from dotenv import load_dotenv
from pydantic import BaseModel, ConfigDict, Field
load_dotenv()
try:
from tavily import AsyncTavilyClient, TavilyClient # type: ignore[import-untyped]
TAVILY_AVAILABLE = True
except ImportError:
TAVILY_AVAILABLE = False
TavilyClient = Any
AsyncTavilyClient = Any
class TavilyExtractorToolSchema(BaseModel):
"""Input schema for TavilyExtractorTool."""
urls: list[str] | str = Field(
...,
description="The URL(s) to extract data from. Can be a single URL or a list of URLs.",
)
class TavilyExtractorTool(BaseTool):
package_dependencies: list[str] = Field(default_factory=lambda: ["tavily-python"])
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="TAVILY_API_KEY",
description="API key for Tavily extraction service",
required=True,
),
]
)
"""
Tool that uses the Tavily API to extract content from web pages.
Attributes:
client: Synchronous Tavily client.
async_client: Asynchronous Tavily client.
name: The name of the tool.
description: The description of the tool.
args_schema: The schema for the tool's arguments.
api_key: The Tavily API key.
proxies: Optional proxies for the API requests.
include_images: Whether to include images in the extraction.
extract_depth: The depth of extraction.
timeout: The timeout for the extraction request in seconds.
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
client: TavilyClient | None = None
async_client: AsyncTavilyClient | None = None
name: str = "TavilyExtractorTool"
description: str = "Extracts content from one or more web pages using the Tavily API. Returns structured data."
args_schema: type[BaseModel] = TavilyExtractorToolSchema
api_key: str | None = Field(
default_factory=lambda: os.getenv("TAVILY_API_KEY"),
description="The Tavily API key. If not provided, it will be loaded from the environment variable TAVILY_API_KEY.",
)
proxies: dict[str, str] | None = Field(
default=None,
description="Optional proxies to use for the Tavily API requests.",
)
include_images: bool = Field(
default=False,
description="Whether to include images in the extraction.",
)
extract_depth: Literal["basic", "advanced"] = Field(
default="basic",
description="The depth of extraction. 'basic' for basic extraction, 'advanced' for advanced extraction.",
)
timeout: int = Field(
default=60,
description="The timeout for the extraction request in seconds.",
)
def __init__(self, **kwargs: Any):
"""Initializes the TavilyExtractorTool.
Args:
**kwargs: Additional keyword arguments.
"""
super().__init__(**kwargs)
if TAVILY_AVAILABLE:
self.client = TavilyClient(api_key=self.api_key, proxies=self.proxies)
self.async_client = AsyncTavilyClient(
api_key=self.api_key, proxies=self.proxies
)
else:
try:
import subprocess
import click
except ImportError:
raise ImportError(
"The 'tavily-python' package is required. 'click' and 'subprocess' are also needed to assist with installation if the package is missing. "
"Please install 'tavily-python' manually (e.g., 'uv add tavily-python') and ensure 'click' and 'subprocess' are available."
) from None
if click.confirm(
"You are missing the 'tavily-python' package, which is required for TavilyExtractorTool. Would you like to install it?"
):
try:
subprocess.run(["uv pip", "install", "tavily-python"], check=True) # noqa: S607
raise ImportError(
"'tavily-python' has been installed. Please restart your Python application to use the TavilyExtractorTool."
)
except subprocess.CalledProcessError as e:
raise ImportError(
f"Attempted to install 'tavily-python' but failed: {e}. "
f"Please install it manually to use the TavilyExtractorTool."
) from e
else:
raise ImportError(
"The 'tavily-python' package is required to use the TavilyExtractorTool. "
"Please install it with: uv add tavily-python"
)
def _run(
self,
urls: list[str] | str,
) -> str:
"""Synchronously extracts content from the given URL(s).
Args:
urls: The URL(s) to extract data from.
Returns:
A JSON string containing the extracted data.
"""
if not self.client:
raise ValueError(
"Tavily client is not initialized. Ensure 'tavily-python' is installed and API key is set."
)
return json.dumps(
self.client.extract(
urls=urls,
extract_depth=self.extract_depth,
include_images=self.include_images,
timeout=self.timeout,
),
indent=2,
)
async def _arun(
self,
urls: list[str] | str,
) -> str:
"""Asynchronously extracts content from the given URL(s).
Args:
urls: The URL(s) to extract data from.
Returns:
A JSON string containing the extracted data.
"""
if not self.async_client:
raise ValueError(
"Tavily async client is not initialized. Ensure 'tavily-python' is installed and API key is set."
)
results = await self.async_client.extract(
urls=urls,
extract_depth=self.extract_depth,
include_images=self.include_images,
timeout=self.timeout,
)
return json.dumps(results, indent=2)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/tavily_extractor_tool/tavily_extractor_tool.py",
"license": "MIT License",
"lines": 153,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/tavily_search_tool/tavily_search_tool.py | from collections.abc import Sequence
import json
import os
from typing import Any, Literal
from crewai.tools import BaseTool, EnvVar
from dotenv import load_dotenv
from pydantic import BaseModel, ConfigDict, Field
load_dotenv()
try:
from tavily import AsyncTavilyClient, TavilyClient # type: ignore[import-untyped]
TAVILY_AVAILABLE = True
except ImportError:
TAVILY_AVAILABLE = False
TavilyClient = Any
AsyncTavilyClient = Any
class TavilySearchToolSchema(BaseModel):
"""Input schema for TavilySearchTool."""
query: str = Field(..., description="The search query string.")
class TavilySearchTool(BaseTool):
"""Tool that uses the Tavily Search API to perform web searches.
Attributes:
client: An instance of TavilyClient.
async_client: An instance of AsyncTavilyClient.
name: The name of the tool.
description: A description of the tool's purpose.
args_schema: The schema for the tool's arguments.
api_key: The Tavily API key.
proxies: Optional proxies for the API requests.
search_depth: The depth of the search.
topic: The topic to focus the search on.
time_range: The time range for the search.
days: The number of days to search back.
max_results: The maximum number of results to return.
include_domains: A list of domains to include in the search.
exclude_domains: A list of domains to exclude from the search.
include_answer: Whether to include a direct answer to the query.
include_raw_content: Whether to include the raw content of the search results.
include_images: Whether to include images in the search results.
timeout: The timeout for the search request in seconds.
max_content_length_per_result: Maximum length for the 'content' of each search result.
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
client: TavilyClient | None = None
async_client: AsyncTavilyClient | None = None
name: str = "Tavily Search"
description: str = (
"A tool that performs web searches using the Tavily Search API. "
"It returns a JSON object containing the search results."
)
args_schema: type[BaseModel] = TavilySearchToolSchema
api_key: str | None = Field(
default_factory=lambda: os.getenv("TAVILY_API_KEY"),
description="The Tavily API key. If not provided, it will be loaded from the environment variable TAVILY_API_KEY.",
)
proxies: dict[str, str] | None = Field(
default=None,
description="Optional proxies to use for the Tavily API requests.",
)
search_depth: Literal["basic", "advanced"] = Field(
default="basic", description="The depth of the search."
)
topic: Literal["general", "news", "finance"] = Field(
default="general", description="The topic to focus the search on."
)
time_range: Literal["day", "week", "month", "year"] | None = Field(
default=None, description="The time range for the search."
)
days: int = Field(default=7, description="The number of days to search back.")
max_results: int = Field(
default=5, description="The maximum number of results to return."
)
include_domains: Sequence[str] | None = Field(
default=None, description="A list of domains to include in the search."
)
exclude_domains: Sequence[str] | None = Field(
default=None, description="A list of domains to exclude from the search."
)
include_answer: bool | Literal["basic", "advanced"] = Field(
default=False, description="Whether to include a direct answer to the query."
)
include_raw_content: bool = Field(
default=False,
description="Whether to include the raw content of the search results.",
)
include_images: bool = Field(
default=False, description="Whether to include images in the search results."
)
timeout: int = Field(
default=60, description="The timeout for the search request in seconds."
)
max_content_length_per_result: int = Field(
default=1000,
description="Maximum length for the 'content' of each search result to avoid context window issues.",
)
package_dependencies: list[str] = Field(default_factory=lambda: ["tavily-python"])
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="TAVILY_API_KEY",
description="API key for Tavily search service",
required=True,
),
]
)
def __init__(self, **kwargs: Any):
super().__init__(**kwargs)
if TAVILY_AVAILABLE:
self.client = TavilyClient(api_key=self.api_key, proxies=self.proxies)
self.async_client = AsyncTavilyClient(
api_key=self.api_key, proxies=self.proxies
)
else:
try:
import subprocess
import click
except ImportError as e:
raise ImportError(
"The 'tavily-python' package is required. 'click' and 'subprocess' are also needed to assist with installation if the package is missing. "
"Please install 'tavily-python' manually (e.g., 'pip install tavily-python') and ensure 'click' and 'subprocess' are available."
) from e
if click.confirm(
"You are missing the 'tavily-python' package, which is required for TavilySearchTool. Would you like to install it?"
):
try:
subprocess.run(["uv", "add", "tavily-python"], check=True) # noqa: S607
raise ImportError(
"'tavily-python' has been installed. Please restart your Python application to use the TavilySearchTool."
)
except subprocess.CalledProcessError as e:
raise ImportError(
f"Attempted to install 'tavily-python' but failed: {e}. "
f"Please install it manually to use the TavilySearchTool."
) from e
else:
raise ImportError(
"The 'tavily-python' package is required to use the TavilySearchTool. "
"Please install it with: uv add tavily-python"
)
def _run(
self,
query: str,
) -> str:
"""Synchronously performs a search using the Tavily API.
Content of each result is truncated to `max_content_length_per_result`.
Args:
query: The search query string.
Returns:
A JSON string containing the search results with truncated content.
"""
if not self.client:
raise ValueError(
"Tavily client is not initialized. Ensure 'tavily-python' is installed and API key is set."
)
raw_results = self.client.search(
query=query,
search_depth=self.search_depth,
topic=self.topic,
time_range=self.time_range,
days=self.days,
max_results=self.max_results,
include_domains=self.include_domains,
exclude_domains=self.exclude_domains,
include_answer=self.include_answer,
include_raw_content=self.include_raw_content,
include_images=self.include_images,
timeout=self.timeout,
)
if (
isinstance(raw_results, dict)
and "results" in raw_results
and isinstance(raw_results["results"], list)
):
for item in raw_results["results"]:
if (
isinstance(item, dict)
and "content" in item
and isinstance(item["content"], str)
):
if len(item["content"]) > self.max_content_length_per_result:
item["content"] = (
item["content"][: self.max_content_length_per_result]
+ "..."
)
return json.dumps(raw_results, indent=2)
async def _arun(
self,
query: str,
) -> str:
"""Asynchronously performs a search using the Tavily API.
Content of each result is truncated to `max_content_length_per_result`.
Args:
query: The search query string.
Returns:
A JSON string containing the search results with truncated content.
"""
if not self.async_client:
raise ValueError(
"Tavily async client is not initialized. Ensure 'tavily-python' is installed and API key is set."
)
raw_results = await self.async_client.search(
query=query,
search_depth=self.search_depth,
topic=self.topic,
time_range=self.time_range,
days=self.days,
max_results=self.max_results,
include_domains=self.include_domains,
exclude_domains=self.exclude_domains,
include_answer=self.include_answer,
include_raw_content=self.include_raw_content,
include_images=self.include_images,
timeout=self.timeout,
)
if (
isinstance(raw_results, dict)
and "results" in raw_results
and isinstance(raw_results["results"], list)
):
for item in raw_results["results"]:
if (
isinstance(item, dict)
and "content" in item
and isinstance(item["content"], str)
):
if len(item["content"]) > self.max_content_length_per_result:
item["content"] = (
item["content"][: self.max_content_length_per_result]
+ "..."
)
return json.dumps(raw_results, indent=2)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/tavily_search_tool/tavily_search_tool.py",
"license": "MIT License",
"lines": 230,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py | from pydantic import BaseModel, Field, model_validator
from typing_extensions import Self
from crewai_tools.tools.rag.rag_tool import RagTool
class FixedTXTSearchToolSchema(BaseModel):
"""Input for TXTSearchTool."""
search_query: str = Field(
...,
description="Mandatory search query you want to use to search the txt's content",
)
class TXTSearchToolSchema(FixedTXTSearchToolSchema):
"""Input for TXTSearchTool."""
txt: str = Field(..., description="File path or URL of a TXT file to be searched")
class TXTSearchTool(RagTool):
name: str = "Search a txt's content"
description: str = (
"A tool that can be used to semantic search a query from a txt's content."
)
args_schema: type[BaseModel] = TXTSearchToolSchema
txt: str | None = None
@model_validator(mode="after")
def _configure_for_txt(self) -> Self:
"""Configure tool for specific TXT file if provided."""
if self.txt is not None:
self.add(self.txt)
self.description = f"A tool that can be used to semantic search a query the {self.txt} txt's content."
self.args_schema = FixedTXTSearchToolSchema
self._generate_description()
return self
def _run( # type: ignore[override]
self,
search_query: str,
txt: str | None = None,
similarity_threshold: float | None = None,
limit: int | None = None,
) -> str:
if txt is not None:
self.add(txt)
return super()._run(
query=search_query, similarity_threshold=similarity_threshold, limit=limit
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/txt_search_tool/txt_search_tool.py",
"license": "MIT License",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/vision_tool/vision_tool.py | import base64
from pathlib import Path
from crewai import LLM
from crewai.tools import BaseTool, EnvVar
from crewai.utilities.types import LLMMessage
from pydantic import BaseModel, Field, PrivateAttr, field_validator
class ImagePromptSchema(BaseModel):
"""Input for Vision Tool."""
image_path_url: str = "The image path or URL."
@field_validator("image_path_url")
@classmethod
def validate_image_path_url(cls, v: str) -> str:
if v.startswith("http"):
return v
path = Path(v)
if not path.exists():
raise ValueError(f"Image file does not exist: {v}")
# Validate supported formats
valid_extensions = {".jpg", ".jpeg", ".png", ".gif", ".webp"}
if path.suffix.lower() not in valid_extensions:
raise ValueError(
f"Unsupported image format. Supported formats: {valid_extensions}"
)
return v
class VisionTool(BaseTool):
"""Tool for analyzing images using vision models.
Args:
llm: Optional LLM instance to use
model: Model identifier to use if no LLM is provided
"""
name: str = "Vision Tool"
description: str = (
"This tool uses OpenAI's Vision API to describe the contents of an image."
)
args_schema: type[BaseModel] = ImagePromptSchema
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="OPENAI_API_KEY",
description="API key for OpenAI services",
required=True,
),
]
)
_model: str = PrivateAttr(default="gpt-4o-mini")
_llm: LLM | None = PrivateAttr(default=None)
def __init__(self, llm: LLM | None = None, model: str = "gpt-4o-mini", **kwargs):
"""Initialize the vision tool.
Args:
llm: Optional LLM instance to use
model: Model identifier to use if no LLM is provided
**kwargs: Additional arguments for the base tool
"""
super().__init__(**kwargs)
self._model = model
self._llm = llm
@property
def model(self) -> str:
"""Get the current model identifier."""
return self._model
@model.setter
def model(self, value: str) -> None:
"""Set the model identifier and reset LLM if it was auto-created."""
self._model = value
if self._llm is not None and getattr(self._llm, "model", None) != value:
self._llm = None
@property
def llm(self) -> LLM:
"""Get the LLM instance, creating one if needed."""
if self._llm is None:
self._llm = LLM(model=self._model, stop=["STOP", "END"])
return self._llm
def _run(self, **kwargs) -> str:
try:
image_path_url = kwargs.get("image_path_url")
if not image_path_url:
return "Image Path or URL is required."
ImagePromptSchema(image_path_url=image_path_url)
if image_path_url.startswith("http"):
image_data = image_path_url
else:
try:
base64_image = self._encode_image(image_path_url)
image_data = f"data:image/jpeg;base64,{base64_image}"
except Exception as e:
return f"Error processing image: {e!s}"
messages: list[LLMMessage] = [
{
"role": "user",
"content": [
{"type": "text", "text": "What's in this image?"},
{
"type": "image_url",
"image_url": {"url": image_data},
},
],
},
]
return self.llm.call(messages=messages)
except Exception as e:
return f"An error occurred: {e!s}"
@staticmethod
def _encode_image(image_path: str) -> str:
"""Encode an image file as base64.
Args:
image_path: Path to the image file
Returns:
Base64-encoded image data
"""
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode()
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/vision_tool/vision_tool.py",
"license": "MIT License",
"lines": 111,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/weaviate_tool/vector_search.py | import json
import os
import subprocess
from typing import Any
import click
try:
import weaviate
from weaviate.classes.config import Configure, Vectorizers
from weaviate.classes.init import Auth
WEAVIATE_AVAILABLE = True
except ImportError:
WEAVIATE_AVAILABLE = False
weaviate = Any # type: ignore[assignment,misc] # type placeholder
Configure = Any # type: ignore[assignment,misc]
Vectorizers = Any # type: ignore[assignment,misc]
Auth = Any # type: ignore[assignment,misc]
from crewai.tools import BaseTool, EnvVar
from pydantic import BaseModel, Field
class WeaviateToolSchema(BaseModel):
"""Input for WeaviateTool."""
query: str = Field(
...,
description="The query to search retrieve relevant information from the Weaviate database. Pass only the query, not the question.",
)
def _set_generative_model() -> Any:
"""Set the generative model based on the provided model name."""
from weaviate.classes.config import Configure
return Configure.Generative.openai(
model="gpt-4o",
)
def _set_vectorizer() -> Any:
"""Set the vectorizer based on the provided model name."""
from weaviate.classes.config import Configure
return Configure.Vectorizer.text2vec_openai(
model="nomic-embed-text",
)
class WeaviateVectorSearchTool(BaseTool):
"""Tool to search the Weaviate database."""
package_dependencies: list[str] = Field(default_factory=lambda: ["weaviate-client"])
name: str = "WeaviateVectorSearchTool"
description: str = "A tool to search the Weaviate database for relevant information on internal documents."
args_schema: type[BaseModel] = WeaviateToolSchema
query: str | None = None
vectorizer: Any = Field(default_factory=_set_vectorizer)
generative_model: Any = Field(default_factory=_set_generative_model)
collection_name: str = Field(
description="The name of the Weaviate collection to search",
)
limit: int | None = Field(default=3)
headers: dict | None = None
alpha: float = Field(default=0.75)
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="OPENAI_API_KEY",
description="OpenAI API key for embedding generation and retrieval",
required=True,
),
]
)
weaviate_cluster_url: str = Field(
...,
description="The URL of the Weaviate cluster",
)
weaviate_api_key: str = Field(
...,
description="The API key for the Weaviate cluster",
)
def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
if WEAVIATE_AVAILABLE:
openai_api_key = os.environ.get("OPENAI_API_KEY")
if not openai_api_key:
raise ValueError(
"OPENAI_API_KEY environment variable is required for WeaviateVectorSearchTool and it is mandatory to use the tool."
)
self.headers = {"X-OpenAI-Api-Key": openai_api_key}
else:
if click.confirm(
"You are missing the 'weaviate-client' package. Would you like to install it?"
):
subprocess.run(["uv", "pip", "install", "weaviate-client"], check=True) # noqa: S607
else:
raise ImportError(
"You are missing the 'weaviate-client' package. Would you like to install it?"
)
def _run(self, query: str) -> str:
if not WEAVIATE_AVAILABLE:
raise ImportError(
"You are missing the 'weaviate-client' package. Would you like to install it?"
)
if not self.weaviate_cluster_url or not self.weaviate_api_key:
raise ValueError("WEAVIATE_URL or WEAVIATE_API_KEY is not set")
client = weaviate.connect_to_weaviate_cloud(
cluster_url=self.weaviate_cluster_url,
auth_credentials=Auth.api_key(self.weaviate_api_key),
headers=self.headers,
)
internal_docs = client.collections.get(self.collection_name)
if not internal_docs:
internal_docs = client.collections.create(
name=self.collection_name,
vectorizer_config=self.vectorizer, # type: ignore
generative_config=self.generative_model,
)
response = internal_docs.query.hybrid(
query=query, limit=self.limit, alpha=self.alpha
)
json_response = ""
for obj in response.objects:
json_response += json.dumps(obj.properties, indent=2)
client.close()
return json_response
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/weaviate_tool/vector_search.py",
"license": "MIT License",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/website_search/website_search_tool.py | from pydantic import BaseModel, Field
from crewai_tools.rag.data_types import DataType
from crewai_tools.tools.rag.rag_tool import RagTool
class FixedWebsiteSearchToolSchema(BaseModel):
"""Input for WebsiteSearchTool."""
search_query: str = Field(
...,
description="Mandatory search query you want to use to search a specific website",
)
class WebsiteSearchToolSchema(FixedWebsiteSearchToolSchema):
"""Input for WebsiteSearchTool."""
website: str = Field(
..., description="Mandatory valid website URL you want to search on"
)
class WebsiteSearchTool(RagTool):
name: str = "Search in a specific website"
description: str = "A tool that can be used to semantic search a query from a specific URL content."
args_schema: type[BaseModel] = WebsiteSearchToolSchema
def __init__(self, website: str | None = None, **kwargs):
super().__init__(**kwargs)
if website is not None:
self.add(website)
self.description = f"A tool that can be used to semantic search a query from {website} website content."
self.args_schema = FixedWebsiteSearchToolSchema
self._generate_description()
def add(self, website: str) -> None:
super().add(website, data_type=DataType.WEBSITE)
def _run( # type: ignore[override]
self,
search_query: str,
website: str | None = None,
similarity_threshold: float | None = None,
limit: int | None = None,
) -> str:
if website is not None:
self.add(website)
return super()._run(
query=search_query, similarity_threshold=similarity_threshold, limit=limit
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/website_search/website_search_tool.py",
"license": "MIT License",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py | from pydantic import BaseModel, Field
from crewai_tools.tools.rag.rag_tool import RagTool
class FixedXMLSearchToolSchema(BaseModel):
"""Input for XMLSearchTool."""
search_query: str = Field(
...,
description="Mandatory search query you want to use to search the XML's content",
)
class XMLSearchToolSchema(FixedXMLSearchToolSchema):
"""Input for XMLSearchTool."""
xml: str = Field(..., description="File path or URL of a XML file to be searched")
class XMLSearchTool(RagTool):
name: str = "Search a XML's content"
description: str = (
"A tool that can be used to semantic search a query from a XML's content."
)
args_schema: type[BaseModel] = XMLSearchToolSchema
def __init__(self, xml: str | None = None, **kwargs):
super().__init__(**kwargs)
if xml is not None:
self.add(xml)
self.description = f"A tool that can be used to semantic search a query the {xml} XML's content."
self.args_schema = FixedXMLSearchToolSchema
self._generate_description()
def _run( # type: ignore[override]
self,
search_query: str,
xml: str | None = None,
similarity_threshold: float | None = None,
limit: int | None = None,
) -> str:
if xml is not None:
self.add(xml)
return super()._run(
query=search_query, similarity_threshold=similarity_threshold, limit=limit
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/xml_search_tool/xml_search_tool.py",
"license": "MIT License",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py | from pydantic import BaseModel, Field
from crewai_tools.rag.data_types import DataType
from crewai_tools.tools.rag.rag_tool import RagTool
class FixedYoutubeChannelSearchToolSchema(BaseModel):
"""Input for YoutubeChannelSearchTool."""
search_query: str = Field(
...,
description="Mandatory search query you want to use to search the Youtube Channels content",
)
class YoutubeChannelSearchToolSchema(FixedYoutubeChannelSearchToolSchema):
"""Input for YoutubeChannelSearchTool."""
youtube_channel_handle: str = Field(
..., description="Mandatory youtube_channel_handle path you want to search"
)
class YoutubeChannelSearchTool(RagTool):
name: str = "Search a Youtube Channels content"
description: str = "A tool that can be used to semantic search a query from a Youtube Channels content."
args_schema: type[BaseModel] = YoutubeChannelSearchToolSchema
def __init__(self, youtube_channel_handle: str | None = None, **kwargs):
super().__init__(**kwargs)
if youtube_channel_handle is not None:
self.add(youtube_channel_handle)
self.description = f"A tool that can be used to semantic search a query the {youtube_channel_handle} Youtube Channels content."
self.args_schema = FixedYoutubeChannelSearchToolSchema
self._generate_description()
def add(
self,
youtube_channel_handle: str,
) -> None:
if not youtube_channel_handle.startswith("@"):
youtube_channel_handle = f"@{youtube_channel_handle}"
super().add(youtube_channel_handle, data_type=DataType.YOUTUBE_CHANNEL)
def _run( # type: ignore[override]
self,
search_query: str,
youtube_channel_handle: str | None = None,
similarity_threshold: float | None = None,
limit: int | None = None,
) -> str:
if youtube_channel_handle is not None:
self.add(youtube_channel_handle)
return super()._run(
query=search_query, similarity_threshold=similarity_threshold, limit=limit
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/youtube_channel_search_tool/youtube_channel_search_tool.py",
"license": "MIT License",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py | from pydantic import BaseModel, Field
from crewai_tools.rag.data_types import DataType
from crewai_tools.tools.rag.rag_tool import RagTool
class FixedYoutubeVideoSearchToolSchema(BaseModel):
"""Input for YoutubeVideoSearchTool."""
search_query: str = Field(
...,
description="Mandatory search query you want to use to search the Youtube Video content",
)
class YoutubeVideoSearchToolSchema(FixedYoutubeVideoSearchToolSchema):
"""Input for YoutubeVideoSearchTool."""
youtube_video_url: str = Field(
..., description="Mandatory youtube_video_url path you want to search"
)
class YoutubeVideoSearchTool(RagTool):
name: str = "Search a Youtube Video content"
description: str = "A tool that can be used to semantic search a query from a Youtube Video content."
args_schema: type[BaseModel] = YoutubeVideoSearchToolSchema
def __init__(self, youtube_video_url: str | None = None, **kwargs):
super().__init__(**kwargs)
if youtube_video_url is not None:
self.add(youtube_video_url)
self.description = f"A tool that can be used to semantic search a query the {youtube_video_url} Youtube Video content."
self.args_schema = FixedYoutubeVideoSearchToolSchema
self._generate_description()
def add(self, youtube_video_url: str) -> None:
super().add(youtube_video_url, data_type=DataType.YOUTUBE_VIDEO)
def _run( # type: ignore[override]
self,
search_query: str,
youtube_video_url: str | None = None,
similarity_threshold: float | None = None,
limit: int | None = None,
) -> str:
if youtube_video_url is not None:
self.add(youtube_video_url)
return super()._run(
query=search_query, similarity_threshold=similarity_threshold, limit=limit
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/youtube_video_search_tool/youtube_video_search_tool.py",
"license": "MIT License",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/zapier_action_tool/zapier_action_tool.py | import logging
import os
from crewai_tools.adapters.zapier_adapter import ZapierActionTool, ZapierActionsAdapter
logger = logging.getLogger(__name__)
def ZapierActionTools( # noqa: N802
zapier_api_key: str | None = None, action_list: list[str] | None = None
) -> list[ZapierActionTool]:
"""Factory function that returns Zapier action tools.
Args:
zapier_api_key: The API key for Zapier.
action_list: Optional list of specific tool names to include.
Returns:
A list of Zapier action tools.
"""
if zapier_api_key is None:
zapier_api_key = os.getenv("ZAPIER_API_KEY")
if zapier_api_key is None:
logger.error("ZAPIER_API_KEY is not set")
raise ValueError("ZAPIER_API_KEY is not set")
adapter = ZapierActionsAdapter(zapier_api_key)
all_tools = adapter.tools()
if action_list is None:
return all_tools
return [tool for tool in all_tools if tool.name in action_list]
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/zapier_action_tool/zapier_action_tool.py",
"license": "MIT License",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/tests/adapters/mcp_adapter_test.py | from textwrap import dedent
from unittest.mock import MagicMock, patch
from crewai_tools import MCPServerAdapter
from crewai_tools.adapters.tool_collection import ToolCollection
from mcp import StdioServerParameters
import pytest
@pytest.fixture
def echo_server_script():
return dedent(
'''
from mcp.server.fastmcp import FastMCP
mcp = FastMCP("Echo Server")
@mcp.tool()
def echo_tool(text: str) -> str:
"""Echo the input text"""
return f"Echo: {text}"
@mcp.tool()
def calc_tool(a: int, b: int) -> int:
"""Calculate a + b"""
return a + b
mcp.run()
'''
)
@pytest.fixture
def echo_server_sse_script():
return dedent(
'''
from mcp.server.fastmcp import FastMCP
mcp = FastMCP("Echo Server", host="127.0.0.1", port=8000)
@mcp.tool()
def echo_tool(text: str) -> str:
"""Echo the input text"""
return f"Echo: {text}"
@mcp.tool()
def calc_tool(a: int, b: int) -> int:
"""Calculate a + b"""
return a + b
mcp.run("sse")
'''
)
@pytest.fixture
def echo_sse_server(echo_server_sse_script):
import subprocess
import time
# Start the SSE server process with its own process group
process = subprocess.Popen(
["python", "-c", echo_server_sse_script],
)
# Give the server a moment to start up
time.sleep(1)
try:
yield {"url": "http://127.0.0.1:8000/sse"}
finally:
# Clean up the process when test is done
process.kill()
process.wait()
def test_context_manager_syntax(echo_server_script):
serverparams = StdioServerParameters(
command="uv", args=["run", "python", "-c", echo_server_script]
)
with MCPServerAdapter(serverparams) as tools:
assert isinstance(tools, ToolCollection)
assert len(tools) == 2
assert tools[0].name == "echo_tool"
assert tools[1].name == "calc_tool"
assert tools[0].run(text="hello") == "Echo: hello"
assert tools[1].run(a=5, b=3) == "8"
def test_context_manager_syntax_sse(echo_sse_server):
sse_serverparams = echo_sse_server
with MCPServerAdapter(sse_serverparams) as tools:
assert len(tools) == 2
assert tools[0].name == "echo_tool"
assert tools[1].name == "calc_tool"
assert tools[0].run(text="hello") == "Echo: hello"
assert tools[1].run(a=5, b=3) == "8"
def test_try_finally_syntax(echo_server_script):
serverparams = StdioServerParameters(
command="uv", args=["run", "python", "-c", echo_server_script]
)
try:
mcp_server_adapter = MCPServerAdapter(serverparams)
tools = mcp_server_adapter.tools
assert len(tools) == 2
assert tools[0].name == "echo_tool"
assert tools[1].name == "calc_tool"
assert tools[0].run(text="hello") == "Echo: hello"
assert tools[1].run(a=5, b=3) == "8"
finally:
mcp_server_adapter.stop()
def test_try_finally_syntax_sse(echo_sse_server):
sse_serverparams = echo_sse_server
mcp_server_adapter = MCPServerAdapter(sse_serverparams)
try:
tools = mcp_server_adapter.tools
assert len(tools) == 2
assert tools[0].name == "echo_tool"
assert tools[1].name == "calc_tool"
assert tools[0].run(text="hello") == "Echo: hello"
assert tools[1].run(a=5, b=3) == "8"
finally:
mcp_server_adapter.stop()
def test_context_manager_with_filtered_tools(echo_server_script):
serverparams = StdioServerParameters(
command="uv", args=["run", "python", "-c", echo_server_script]
)
# Only select the echo_tool
with MCPServerAdapter(serverparams, "echo_tool") as tools:
assert isinstance(tools, ToolCollection)
assert len(tools) == 1
assert tools[0].name == "echo_tool"
assert tools[0].run(text="hello") == "Echo: hello"
# Check that calc_tool is not present
with pytest.raises(IndexError):
_ = tools[1]
with pytest.raises(KeyError):
_ = tools["calc_tool"]
def test_context_manager_sse_with_filtered_tools(echo_sse_server):
sse_serverparams = echo_sse_server
# Only select the calc_tool
with MCPServerAdapter(sse_serverparams, "calc_tool") as tools:
assert isinstance(tools, ToolCollection)
assert len(tools) == 1
assert tools[0].name == "calc_tool"
assert tools[0].run(a=10, b=5) == "15"
# Check that echo_tool is not present
with pytest.raises(IndexError):
_ = tools[1]
with pytest.raises(KeyError):
_ = tools["echo_tool"]
def test_try_finally_with_filtered_tools(echo_server_script):
serverparams = StdioServerParameters(
command="uv", args=["run", "python", "-c", echo_server_script]
)
try:
# Select both tools but in reverse order
mcp_server_adapter = MCPServerAdapter(serverparams, "calc_tool", "echo_tool")
tools = mcp_server_adapter.tools
assert len(tools) == 2
# The order of tools is based on filter_by_names which preserves
# the original order from the collection
assert tools[0].name == "calc_tool"
assert tools[1].name == "echo_tool"
finally:
mcp_server_adapter.stop()
def test_filter_with_nonexistent_tool(echo_server_script):
serverparams = StdioServerParameters(
command="uv", args=["run", "python", "-c", echo_server_script]
)
# Include a tool that doesn't exist
with MCPServerAdapter(serverparams, "echo_tool", "nonexistent_tool") as tools:
# Only echo_tool should be in the result
assert len(tools) == 1
assert tools[0].name == "echo_tool"
def test_filter_with_only_nonexistent_tools(echo_server_script):
serverparams = StdioServerParameters(
command="uv", args=["run", "python", "-c", echo_server_script]
)
# All requested tools don't exist
with MCPServerAdapter(serverparams, "nonexistent1", "nonexistent2") as tools:
# Should return an empty tool collection
assert isinstance(tools, ToolCollection)
assert len(tools) == 0
def test_connect_timeout_parameter(echo_server_script):
serverparams = StdioServerParameters(
command="uv", args=["run", "python", "-c", echo_server_script]
)
with MCPServerAdapter(serverparams, connect_timeout=60) as tools:
assert isinstance(tools, ToolCollection)
assert len(tools) == 2
assert tools[0].name == "echo_tool"
assert tools[1].name == "calc_tool"
assert tools[0].run(text="hello") == "Echo: hello"
def test_connect_timeout_with_filtered_tools(echo_server_script):
serverparams = StdioServerParameters(
command="uv", args=["run", "python", "-c", echo_server_script]
)
with MCPServerAdapter(serverparams, "echo_tool", connect_timeout=45) as tools:
assert isinstance(tools, ToolCollection)
assert len(tools) == 1
assert tools[0].name == "echo_tool"
assert tools[0].run(text="timeout test") == "Echo: timeout test"
@patch("crewai_tools.adapters.mcp_adapter.MCPAdapt")
def test_connect_timeout_passed_to_mcpadapt(mock_mcpadapt):
mock_adapter_instance = MagicMock()
mock_mcpadapt.return_value = mock_adapter_instance
serverparams = StdioServerParameters(command="uv", args=["run", "echo", "test"])
MCPServerAdapter(serverparams)
mock_mcpadapt.assert_called_once()
assert mock_mcpadapt.call_args[0][2] == 30
mock_mcpadapt.reset_mock()
MCPServerAdapter(serverparams, connect_timeout=5)
mock_mcpadapt.assert_called_once()
assert mock_mcpadapt.call_args[0][2] == 5
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/adapters/mcp_adapter_test.py",
"license": "MIT License",
"lines": 193,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/base_tool_test.py | from collections.abc import Callable
from crewai.tools import BaseTool, tool
from crewai.tools.base_tool import to_langchain
def test_creating_a_tool_using_annotation():
@tool("Name of my tool")
def my_tool(question: str) -> str:
"""Clear description for what this tool is useful for, you agent will need this information to use it."""
return question
# Assert all the right attributes were defined
assert my_tool.name == "Name of my tool"
assert (
my_tool.description
== "Tool Name: Name of my tool\nTool Arguments: {'question': {'description': None, 'type': 'str'}}\nTool Description: Clear description for what this tool is useful for, you agent will need this information to use it."
)
assert my_tool.args_schema.model_json_schema()["properties"] == {
"question": {"title": "Question", "type": "string"}
}
assert (
my_tool.func("What is the meaning of life?") == "What is the meaning of life?"
)
# Assert the langchain tool conversion worked as expected
converted_tool = to_langchain([my_tool])[0]
assert converted_tool.name == "Name of my tool"
assert (
converted_tool.description
== "Tool Name: Name of my tool\nTool Arguments: {'question': {'description': None, 'type': 'str'}}\nTool Description: Clear description for what this tool is useful for, you agent will need this information to use it."
)
assert converted_tool.args_schema.model_json_schema()["properties"] == {
"question": {"title": "Question", "type": "string"}
}
assert (
converted_tool.func("What is the meaning of life?")
== "What is the meaning of life?"
)
def test_creating_a_tool_using_baseclass():
class MyCustomTool(BaseTool):
name: str = "Name of my tool"
description: str = "Clear description for what this tool is useful for, you agent will need this information to use it."
def _run(self, question: str) -> str:
return question
my_tool = MyCustomTool()
# Assert all the right attributes were defined
assert my_tool.name == "Name of my tool"
assert (
my_tool.description
== "Tool Name: Name of my tool\nTool Arguments: {'question': {'description': None, 'type': 'str'}}\nTool Description: Clear description for what this tool is useful for, you agent will need this information to use it."
)
assert my_tool.args_schema.model_json_schema()["properties"] == {
"question": {"title": "Question", "type": "string"}
}
assert (
my_tool._run("What is the meaning of life?") == "What is the meaning of life?"
)
# Assert the langchain tool conversion worked as expected
converted_tool = to_langchain([my_tool])[0]
assert converted_tool.name == "Name of my tool"
assert (
converted_tool.description
== "Tool Name: Name of my tool\nTool Arguments: {'question': {'description': None, 'type': 'str'}}\nTool Description: Clear description for what this tool is useful for, you agent will need this information to use it."
)
assert converted_tool.args_schema.model_json_schema()["properties"] == {
"question": {"title": "Question", "type": "string"}
}
assert (
converted_tool.invoke({"question": "What is the meaning of life?"})
== "What is the meaning of life?"
)
def test_setting_cache_function():
class MyCustomTool(BaseTool):
name: str = "Name of my tool"
description: str = "Clear description for what this tool is useful for, you agent will need this information to use it."
cache_function: Callable = lambda: False
def _run(self, question: str) -> str:
return question
my_tool = MyCustomTool()
# Assert all the right attributes were defined
assert not my_tool.cache_function()
def test_default_cache_function_is_true():
class MyCustomTool(BaseTool):
name: str = "Name of my tool"
description: str = "Clear description for what this tool is useful for, you agent will need this information to use it."
def _run(self, question: str) -> str:
return question
my_tool = MyCustomTool()
# Assert all the right attributes were defined
assert my_tool.cache_function()
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/base_tool_test.py",
"license": "MIT License",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/file_read_tool_test.py | import os
from unittest.mock import mock_open, patch
from crewai_tools import FileReadTool
def test_file_read_tool_constructor():
"""Test FileReadTool initialization with file_path."""
# Create a temporary test file
test_file = "/tmp/test_file.txt"
test_content = "Hello, World!"
with open(test_file, "w") as f:
f.write(test_content)
# Test initialization with file_path
tool = FileReadTool(file_path=test_file)
assert tool.file_path == test_file
assert "test_file.txt" in tool.description
# Clean up
os.remove(test_file)
def test_file_read_tool_run():
"""Test FileReadTool _run method with file_path at runtime."""
test_file = "/tmp/test_file.txt"
test_content = "Hello, World!"
# Use mock_open to mock file operations
with patch("builtins.open", mock_open(read_data=test_content)):
# Test reading file with runtime file_path
tool = FileReadTool()
result = tool._run(file_path=test_file)
assert result == test_content
def test_file_read_tool_error_handling():
"""Test FileReadTool error handling."""
# Test missing file path
tool = FileReadTool()
result = tool._run()
assert "Error: No file path provided" in result
# Test non-existent file
result = tool._run(file_path="/nonexistent/file.txt")
assert "Error: File not found at path:" in result
# Test permission error
with patch("builtins.open", side_effect=PermissionError()):
result = tool._run(file_path="/tmp/no_permission.txt")
assert "Error: Permission denied" in result
def test_file_read_tool_constructor_and_run():
"""Test FileReadTool using both constructor and runtime file paths."""
test_file1 = "/tmp/test1.txt"
test_file2 = "/tmp/test2.txt"
content1 = "File 1 content"
content2 = "File 2 content"
# First test with content1
with patch("builtins.open", mock_open(read_data=content1)):
tool = FileReadTool(file_path=test_file1)
result = tool._run()
assert result == content1
# Then test with content2 (should override constructor file_path)
with patch("builtins.open", mock_open(read_data=content2)):
result = tool._run(file_path=test_file2)
assert result == content2
def test_file_read_tool_chunk_reading():
"""Test FileReadTool reading specific chunks of a file."""
test_file = "/tmp/multiline_test.txt"
lines = [
"Line 1\n",
"Line 2\n",
"Line 3\n",
"Line 4\n",
"Line 5\n",
"Line 6\n",
"Line 7\n",
"Line 8\n",
"Line 9\n",
"Line 10\n",
]
file_content = "".join(lines)
with patch("builtins.open", mock_open(read_data=file_content)):
tool = FileReadTool()
# Test reading a specific chunk (lines 3-5)
result = tool._run(file_path=test_file, start_line=3, line_count=3)
expected = "".join(lines[2:5]) # Lines are 0-indexed in the array
assert result == expected
# Test reading from a specific line to the end
result = tool._run(file_path=test_file, start_line=8)
expected = "".join(lines[7:])
assert result == expected
# Test with default values (should read entire file)
result = tool._run(file_path=test_file)
expected = "".join(lines)
assert result == expected
# Test when start_line is 1 but line_count is specified
result = tool._run(file_path=test_file, start_line=1, line_count=5)
expected = "".join(lines[0:5])
assert result == expected
def test_file_read_tool_chunk_error_handling():
"""Test error handling for chunk reading."""
test_file = "/tmp/short_test.txt"
lines = ["Line 1\n", "Line 2\n", "Line 3\n"]
file_content = "".join(lines)
with patch("builtins.open", mock_open(read_data=file_content)):
tool = FileReadTool()
# Test start_line exceeding file length
result = tool._run(file_path=test_file, start_line=10)
assert "Error: Start line 10 exceeds the number of lines in the file" in result
# Test reading partial chunk when line_count exceeds available lines
result = tool._run(file_path=test_file, start_line=2, line_count=10)
expected = "".join(lines[1:]) # Should return from line 2 to end
assert result == expected
def test_file_read_tool_zero_or_negative_start_line():
"""Test that start_line values of 0 or negative read from the start of the file."""
test_file = "/tmp/negative_test.txt"
lines = ["Line 1\n", "Line 2\n", "Line 3\n", "Line 4\n", "Line 5\n"]
file_content = "".join(lines)
with patch("builtins.open", mock_open(read_data=file_content)):
tool = FileReadTool()
# Test with start_line = None
result = tool._run(file_path=test_file, start_line=None)
expected = "".join(lines) # Should read the entire file
assert result == expected
# Test with start_line = 0
result = tool._run(file_path=test_file, start_line=0)
expected = "".join(lines) # Should read the entire file
assert result == expected
# Test with start_line = 0 and limited line count
result = tool._run(file_path=test_file, start_line=0, line_count=3)
expected = "".join(lines[0:3]) # Should read first 3 lines
assert result == expected
# Test with negative start_line
result = tool._run(file_path=test_file, start_line=-5)
expected = "".join(lines) # Should read the entire file
assert result == expected
# Test with negative start_line and limited line count
result = tool._run(file_path=test_file, start_line=-10, line_count=2)
expected = "".join(lines[0:2]) # Should read first 2 lines
assert result == expected
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/file_read_tool_test.py",
"license": "MIT License",
"lines": 129,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/rag/test_csv_loader.py | import os
import tempfile
from unittest.mock import Mock, patch
from crewai_tools.rag.base_loader import LoaderResult
from crewai_tools.rag.loaders.csv_loader import CSVLoader
from crewai_tools.rag.source_content import SourceContent
import pytest
@pytest.fixture
def temp_csv_file():
created_files = []
def _create(content: str):
f = tempfile.NamedTemporaryFile(mode="w", suffix=".csv", delete=False)
f.write(content)
f.close()
created_files.append(f.name)
return f.name
yield _create
for path in created_files:
os.unlink(path)
class TestCSVLoader:
def test_load_csv_from_file(self, temp_csv_file):
path = temp_csv_file("name,age,city\nJohn,25,New York\nJane,30,Chicago")
loader = CSVLoader()
result = loader.load(SourceContent(path))
assert isinstance(result, LoaderResult)
assert "Headers: name | age | city" in result.content
assert "Row 1: name: John | age: 25 | city: New York" in result.content
assert "Row 2: name: Jane | age: 30 | city: Chicago" in result.content
assert result.metadata == {
"format": "csv",
"columns": ["name", "age", "city"],
"rows": 2,
}
assert result.source == path
assert result.doc_id
def test_load_csv_with_empty_values(self, temp_csv_file):
path = temp_csv_file("name,age,city\nJohn,,New York\n,30,")
result = CSVLoader().load(SourceContent(path))
assert "Row 1: name: John | city: New York" in result.content
assert "Row 2: age: 30" in result.content
assert result.metadata["rows"] == 2
def test_load_csv_malformed(self, temp_csv_file):
path = temp_csv_file('invalid,csv\nunclosed quote "missing')
result = CSVLoader().load(SourceContent(path))
assert "Headers: invalid | csv" in result.content
assert 'Row 1: invalid: unclosed quote "missing' in result.content
assert result.metadata["columns"] == ["invalid", "csv"]
def test_load_csv_empty_file(self, temp_csv_file):
path = temp_csv_file("")
result = CSVLoader().load(SourceContent(path))
assert result.content == ""
assert result.metadata["rows"] == 0
def test_load_csv_text_input(self):
raw_csv = "col1,col2\nvalue1,value2\nvalue3,value4"
result = CSVLoader().load(SourceContent(raw_csv))
assert "Headers: col1 | col2" in result.content
assert "Row 1: col1: value1 | col2: value2" in result.content
assert "Row 2: col1: value3 | col2: value4" in result.content
assert result.metadata["columns"] == ["col1", "col2"]
assert result.metadata["rows"] == 2
def test_doc_id_is_deterministic(self, temp_csv_file):
path = temp_csv_file("name,value\ntest,123")
loader = CSVLoader()
result1 = loader.load(SourceContent(path))
result2 = loader.load(SourceContent(path))
assert result1.doc_id == result2.doc_id
@patch("requests.get")
def test_load_csv_from_url(self, mock_get):
mock_get.return_value = Mock(
text="name,value\ntest,123", raise_for_status=Mock(return_value=None)
)
result = CSVLoader().load(SourceContent("https://example.com/data.csv"))
assert "Headers: name | value" in result.content
assert "Row 1: name: test | value: 123" in result.content
headers = mock_get.call_args[1]["headers"]
assert "text/csv" in headers["Accept"]
assert "crewai-tools CSVLoader" in headers["User-Agent"]
@patch("requests.get")
def test_load_csv_with_custom_headers(self, mock_get):
mock_get.return_value = Mock(
text="data,value\ntest,456", raise_for_status=Mock(return_value=None)
)
headers = {"Authorization": "Bearer token", "Custom-Header": "value"}
result = CSVLoader().load(
SourceContent("https://example.com/data.csv"), headers=headers
)
assert "Headers: data | value" in result.content
assert mock_get.call_args[1]["headers"] == headers
@patch("requests.get")
def test_csv_loader_handles_network_errors(self, mock_get):
mock_get.side_effect = Exception("Network error")
loader = CSVLoader()
with pytest.raises(ValueError, match="Error fetching content from URL"):
loader.load(SourceContent("https://example.com/data.csv"))
@patch("requests.get")
def test_csv_loader_handles_http_error(self, mock_get):
mock_get.return_value = Mock()
mock_get.return_value.raise_for_status.side_effect = Exception("404 Not Found")
loader = CSVLoader()
with pytest.raises(ValueError, match="Error fetching content from URL"):
loader.load(SourceContent("https://example.com/notfound.csv"))
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/rag/test_csv_loader.py",
"license": "MIT License",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/rag/test_directory_loader.py | import os
import tempfile
from crewai_tools.rag.base_loader import LoaderResult
from crewai_tools.rag.loaders.directory_loader import DirectoryLoader
from crewai_tools.rag.source_content import SourceContent
import pytest
@pytest.fixture
def temp_directory():
with tempfile.TemporaryDirectory() as temp_dir:
yield temp_dir
class TestDirectoryLoader:
def _create_file(self, directory, filename, content="test content"):
path = os.path.join(directory, filename)
with open(path, "w") as f:
f.write(content)
return path
def test_load_non_recursive(self, temp_directory):
self._create_file(temp_directory, "file1.txt")
self._create_file(temp_directory, "file2.txt")
subdir = os.path.join(temp_directory, "subdir")
os.makedirs(subdir)
self._create_file(subdir, "file3.txt")
loader = DirectoryLoader()
result = loader.load(SourceContent(temp_directory), recursive=False)
assert isinstance(result, LoaderResult)
assert "file1.txt" in result.content
assert "file2.txt" in result.content
assert "file3.txt" not in result.content
assert result.metadata["total_files"] == 2
def test_load_recursive(self, temp_directory):
self._create_file(temp_directory, "file1.txt")
nested = os.path.join(temp_directory, "subdir", "nested")
os.makedirs(nested)
self._create_file(os.path.join(temp_directory, "subdir"), "file2.txt")
self._create_file(nested, "file3.txt")
loader = DirectoryLoader()
result = loader.load(SourceContent(temp_directory), recursive=True)
assert all(f"file{i}.txt" in result.content for i in range(1, 4))
def test_include_and_exclude_extensions(self, temp_directory):
self._create_file(temp_directory, "a.txt")
self._create_file(temp_directory, "b.py")
self._create_file(temp_directory, "c.md")
loader = DirectoryLoader()
result = loader.load(
SourceContent(temp_directory), include_extensions=[".txt", ".py"]
)
assert "a.txt" in result.content
assert "b.py" in result.content
assert "c.md" not in result.content
result2 = loader.load(
SourceContent(temp_directory), exclude_extensions=[".py", ".md"]
)
assert "a.txt" in result2.content
assert "b.py" not in result2.content
assert "c.md" not in result2.content
def test_max_files_limit(self, temp_directory):
for i in range(5):
self._create_file(temp_directory, f"file{i}.txt")
loader = DirectoryLoader()
result = loader.load(SourceContent(temp_directory), max_files=3)
assert result.metadata["total_files"] == 3
assert all(f"file{i}.txt" in result.content for i in range(3))
def test_hidden_files_and_dirs_excluded(self, temp_directory):
self._create_file(temp_directory, "visible.txt", "visible")
self._create_file(temp_directory, ".hidden.txt", "hidden")
hidden_dir = os.path.join(temp_directory, ".hidden")
os.makedirs(hidden_dir)
self._create_file(hidden_dir, "inside_hidden.txt")
loader = DirectoryLoader()
result = loader.load(SourceContent(temp_directory), recursive=True)
assert "visible.txt" in result.content
assert ".hidden.txt" not in result.content
assert "inside_hidden.txt" not in result.content
def test_directory_does_not_exist(self):
loader = DirectoryLoader()
with pytest.raises(FileNotFoundError, match="Directory does not exist"):
loader.load(SourceContent("/path/does/not/exist"))
def test_path_is_not_a_directory(self):
with tempfile.NamedTemporaryFile() as f:
loader = DirectoryLoader()
with pytest.raises(ValueError, match="Path is not a directory"):
loader.load(SourceContent(f.name))
def test_url_not_supported(self):
loader = DirectoryLoader()
with pytest.raises(ValueError, match="URL directory loading is not supported"):
loader.load(SourceContent("https://example.com"))
def test_processing_error_handling(self, temp_directory):
self._create_file(temp_directory, "valid.txt")
self._create_file(temp_directory, "error.txt")
loader = DirectoryLoader()
original_method = loader._process_single_file
def mock(file_path):
if "error" in file_path:
raise ValueError("Mock error")
return original_method(file_path)
loader._process_single_file = mock
result = loader.load(SourceContent(temp_directory))
assert "valid.txt" in result.content
assert "error.txt (ERROR)" in result.content
assert result.metadata["errors"] == 1
assert len(result.metadata["error_details"]) == 1
def test_metadata_structure(self, temp_directory):
self._create_file(temp_directory, "test.txt", "Sample")
loader = DirectoryLoader()
result = loader.load(SourceContent(temp_directory))
metadata = result.metadata
expected_keys = {
"format",
"directory_path",
"total_files",
"processed_files",
"errors",
"file_details",
"error_details",
}
assert expected_keys.issubset(metadata)
assert all(
k in metadata["file_details"][0] for k in ("path", "metadata", "source")
)
def test_empty_directory(self, temp_directory):
loader = DirectoryLoader()
result = loader.load(SourceContent(temp_directory))
assert result.content == ""
assert result.metadata["total_files"] == 0
assert result.metadata["processed_files"] == 0
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/rag/test_directory_loader.py",
"license": "MIT License",
"lines": 125,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/rag/test_docx_loader.py | import tempfile
from unittest.mock import Mock, patch
from crewai_tools.rag.base_loader import LoaderResult
from crewai_tools.rag.loaders.docx_loader import DOCXLoader
from crewai_tools.rag.source_content import SourceContent
import pytest
class TestDOCXLoader:
@patch("docx.Document")
def test_load_docx_from_file(self, mock_docx_class):
mock_doc = Mock()
mock_doc.paragraphs = [
Mock(text="First paragraph"),
Mock(text="Second paragraph"),
Mock(text=" "), # Blank paragraph
]
mock_doc.tables = []
mock_docx_class.return_value = mock_doc
with tempfile.NamedTemporaryFile(suffix=".docx") as f:
loader = DOCXLoader()
result = loader.load(SourceContent(f.name))
assert isinstance(result, LoaderResult)
assert result.content == "First paragraph\nSecond paragraph"
assert result.metadata == {"format": "docx", "paragraphs": 3, "tables": 0}
assert result.source == f.name
@patch("docx.Document")
def test_load_docx_with_tables(self, mock_docx_class):
mock_doc = Mock()
mock_doc.paragraphs = [Mock(text="Document with table")]
mock_doc.tables = [Mock(), Mock()]
mock_docx_class.return_value = mock_doc
with tempfile.NamedTemporaryFile(suffix=".docx") as f:
loader = DOCXLoader()
result = loader.load(SourceContent(f.name))
assert result.metadata["tables"] == 2
@patch("requests.get")
@patch("docx.Document")
@patch("tempfile.NamedTemporaryFile")
@patch("os.unlink")
def test_load_docx_from_url(
self, mock_unlink, mock_tempfile, mock_docx_class, mock_get
):
mock_get.return_value = Mock(
content=b"fake docx content", raise_for_status=Mock()
)
mock_temp = Mock(name="/tmp/temp_docx_file.docx")
mock_temp.__enter__ = Mock(return_value=mock_temp)
mock_temp.__exit__ = Mock(return_value=None)
mock_tempfile.return_value = mock_temp
mock_doc = Mock()
mock_doc.paragraphs = [Mock(text="Content from URL")]
mock_doc.tables = []
mock_docx_class.return_value = mock_doc
loader = DOCXLoader()
result = loader.load(SourceContent("https://example.com/test.docx"))
assert "Content from URL" in result.content
assert result.source == "https://example.com/test.docx"
headers = mock_get.call_args[1]["headers"]
assert (
"application/vnd.openxmlformats-officedocument.wordprocessingml.document"
in headers["Accept"]
)
assert "crewai-tools DOCXLoader" in headers["User-Agent"]
mock_temp.write.assert_called_once_with(b"fake docx content")
@patch("requests.get")
@patch("docx.Document")
def test_load_docx_from_url_with_custom_headers(self, mock_docx_class, mock_get):
mock_get.return_value = Mock(
content=b"fake docx content", raise_for_status=Mock()
)
mock_docx_class.return_value = Mock(paragraphs=[], tables=[])
loader = DOCXLoader()
custom_headers = {"Authorization": "Bearer token"}
with patch("tempfile.NamedTemporaryFile"), patch("os.unlink"):
loader.load(
SourceContent("https://example.com/test.docx"), headers=custom_headers
)
assert mock_get.call_args[1]["headers"] == custom_headers
@patch("requests.get")
def test_load_docx_url_download_error(self, mock_get):
mock_get.side_effect = Exception("Network error")
loader = DOCXLoader()
with pytest.raises(ValueError, match="Error fetching content from URL"):
loader.load(SourceContent("https://example.com/test.docx"))
@patch("requests.get")
def test_load_docx_url_http_error(self, mock_get):
mock_get.return_value = Mock(
raise_for_status=Mock(side_effect=Exception("404 Not Found"))
)
loader = DOCXLoader()
with pytest.raises(ValueError, match="Error fetching content from URL"):
loader.load(SourceContent("https://example.com/notfound.docx"))
def test_load_docx_invalid_source(self):
loader = DOCXLoader()
with pytest.raises(ValueError, match="Source must be a valid file path or URL"):
loader.load(SourceContent("not_a_file_or_url"))
@patch("docx.Document")
def test_load_docx_parsing_error(self, mock_docx_class):
mock_docx_class.side_effect = Exception("Invalid DOCX file")
with tempfile.NamedTemporaryFile(suffix=".docx") as f:
loader = DOCXLoader()
with pytest.raises(ValueError, match="Error loading DOCX file"):
loader.load(SourceContent(f.name))
@patch("docx.Document")
def test_load_docx_empty_document(self, mock_docx_class):
mock_docx_class.return_value = Mock(paragraphs=[], tables=[])
with tempfile.NamedTemporaryFile(suffix=".docx") as f:
loader = DOCXLoader()
result = loader.load(SourceContent(f.name))
assert result.content == ""
assert result.metadata == {"paragraphs": 0, "tables": 0, "format": "docx"}
@patch("docx.Document")
def test_docx_doc_id_generation(self, mock_docx_class):
mock_docx_class.return_value = Mock(
paragraphs=[Mock(text="Consistent content")], tables=[]
)
with tempfile.NamedTemporaryFile(suffix=".docx") as f:
loader = DOCXLoader()
source = SourceContent(f.name)
assert loader.load(source).doc_id == loader.load(source).doc_id
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/rag/test_docx_loader.py",
"license": "MIT License",
"lines": 119,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/rag/test_embedding_service.py | """
Tests for the enhanced embedding service.
"""
import os
import pytest
from unittest.mock import Mock, patch
from crewai_tools.rag.embedding_service import EmbeddingService, EmbeddingConfig
class TestEmbeddingConfig:
"""Test the EmbeddingConfig model."""
def test_default_config(self):
"""Test default configuration values."""
config = EmbeddingConfig(provider="openai", model="text-embedding-3-small")
assert config.provider == "openai"
assert config.model == "text-embedding-3-small"
assert config.api_key is None
assert config.timeout == 30.0
assert config.max_retries == 3
assert config.batch_size == 100
assert config.extra_config == {}
def test_custom_config(self):
"""Test custom configuration values."""
config = EmbeddingConfig(
provider="voyageai",
model="voyage-2",
api_key="test-key",
timeout=60.0,
max_retries=5,
batch_size=50,
extra_config={"input_type": "document"}
)
assert config.provider == "voyageai"
assert config.model == "voyage-2"
assert config.api_key == "test-key"
assert config.timeout == 60.0
assert config.max_retries == 5
assert config.batch_size == 50
assert config.extra_config == {"input_type": "document"}
class TestEmbeddingService:
"""Test the EmbeddingService class."""
def test_list_supported_providers(self):
"""Test listing supported providers."""
providers = EmbeddingService.list_supported_providers()
expected_providers = [
"openai", "azure", "voyageai", "cohere", "google-generativeai",
"amazon-bedrock", "huggingface", "jina", "ollama", "sentence-transformer",
"instructor", "watsonx", "custom"
]
assert isinstance(providers, list)
assert len(providers) >= 15 # Should have at least 15 providers
assert all(provider in providers for provider in expected_providers)
def test_get_default_api_key(self):
"""Test getting default API keys from environment."""
service = EmbeddingService.__new__(EmbeddingService) # Create without __init__
# Test with environment variable set
with patch.dict(os.environ, {"OPENAI_API_KEY": "test-openai-key"}):
api_key = service._get_default_api_key("openai")
assert api_key == "test-openai-key"
# Test with no environment variable
with patch.dict(os.environ, {}, clear=True):
api_key = service._get_default_api_key("openai")
assert api_key is None
# Test unknown provider
api_key = service._get_default_api_key("unknown-provider")
assert api_key is None
@patch('crewai.rag.embeddings.factory.build_embedder')
def test_initialization_success(self, mock_build_embedder):
"""Test successful initialization."""
# Mock the embedding function
mock_embedding_function = Mock()
mock_build_embedder.return_value = mock_embedding_function
service = EmbeddingService(
provider="openai",
model="text-embedding-3-small",
api_key="test-key"
)
assert service.config.provider == "openai"
assert service.config.model == "text-embedding-3-small"
assert service.config.api_key == "test-key"
assert service._embedding_function == mock_embedding_function
# Verify build_embedder was called with correct config
mock_build_embedder.assert_called_once()
call_args = mock_build_embedder.call_args[0][0]
assert call_args["provider"] == "openai"
assert call_args["config"]["api_key"] == "test-key"
assert call_args["config"]["model_name"] == "text-embedding-3-small"
@patch('crewai.rag.embeddings.factory.build_embedder')
def test_initialization_import_error(self, mock_build_embedder):
"""Test initialization with import error."""
mock_build_embedder.side_effect = ImportError("CrewAI not installed")
with pytest.raises(ImportError, match="CrewAI embedding providers not available"):
EmbeddingService(provider="openai", model="test-model", api_key="test-key")
@patch('crewai.rag.embeddings.factory.build_embedder')
def test_embed_text_success(self, mock_build_embedder):
"""Test successful text embedding."""
# Mock the embedding function
mock_embedding_function = Mock()
mock_embedding_function.return_value = [[0.1, 0.2, 0.3]]
mock_build_embedder.return_value = mock_embedding_function
service = EmbeddingService(provider="openai", model="test-model", api_key="test-key")
result = service.embed_text("test text")
assert result == [0.1, 0.2, 0.3]
mock_embedding_function.assert_called_once_with(["test text"])
@patch('crewai.rag.embeddings.factory.build_embedder')
def test_embed_text_empty_input(self, mock_build_embedder):
"""Test embedding empty text."""
mock_embedding_function = Mock()
mock_build_embedder.return_value = mock_embedding_function
service = EmbeddingService(provider="openai", model="test-model", api_key="test-key")
result = service.embed_text("")
assert result == []
result = service.embed_text(" ")
assert result == []
# Embedding function should not be called for empty text
mock_embedding_function.assert_not_called()
@patch('crewai.rag.embeddings.factory.build_embedder')
def test_embed_batch_success(self, mock_build_embedder):
"""Test successful batch embedding."""
# Mock the embedding function
mock_embedding_function = Mock()
mock_embedding_function.return_value = [[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]
mock_build_embedder.return_value = mock_embedding_function
service = EmbeddingService(provider="openai", model="test-model", api_key="test-key")
texts = ["text1", "text2", "text3"]
result = service.embed_batch(texts)
assert result == [[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]
mock_embedding_function.assert_called_once_with(texts)
@patch('crewai.rag.embeddings.factory.build_embedder')
def test_embed_batch_empty_input(self, mock_build_embedder):
"""Test batch embedding with empty input."""
mock_embedding_function = Mock()
mock_build_embedder.return_value = mock_embedding_function
service = EmbeddingService(provider="openai", model="test-model", api_key="test-key")
# Empty list
result = service.embed_batch([])
assert result == []
# List with empty strings
result = service.embed_batch(["", " ", ""])
assert result == []
# Embedding function should not be called for empty input
mock_embedding_function.assert_not_called()
@patch('crewai.rag.embeddings.factory.build_embedder')
def test_validate_connection(self, mock_build_embedder):
"""Test connection validation."""
# Mock successful embedding
mock_embedding_function = Mock()
mock_embedding_function.return_value = [[0.1, 0.2, 0.3]]
mock_build_embedder.return_value = mock_embedding_function
service = EmbeddingService(provider="openai", model="test-model", api_key="test-key")
assert service.validate_connection() is True
# Mock failed embedding
mock_embedding_function.side_effect = Exception("Connection failed")
assert service.validate_connection() is False
@patch('crewai.rag.embeddings.factory.build_embedder')
def test_get_service_info(self, mock_build_embedder):
"""Test getting service information."""
# Mock the embedding function
mock_embedding_function = Mock()
mock_embedding_function.return_value = [[0.1, 0.2, 0.3]]
mock_build_embedder.return_value = mock_embedding_function
service = EmbeddingService(provider="openai", model="test-model", api_key="test-key")
info = service.get_service_info()
assert info["provider"] == "openai"
assert info["model"] == "test-model"
assert info["embedding_dimension"] == 3
assert info["batch_size"] == 100
assert info["is_connected"] is True
def test_create_openai_service(self):
"""Test OpenAI service creation."""
with patch('crewai.rag.embeddings.factory.build_embedder'):
service = EmbeddingService.create_openai_service(
model="text-embedding-3-large",
api_key="test-key"
)
assert service.config.provider == "openai"
assert service.config.model == "text-embedding-3-large"
assert service.config.api_key == "test-key"
def test_create_voyage_service(self):
"""Test Voyage AI service creation."""
with patch('crewai.rag.embeddings.factory.build_embedder'):
service = EmbeddingService.create_voyage_service(
model="voyage-large-2",
api_key="test-key"
)
assert service.config.provider == "voyageai"
assert service.config.model == "voyage-large-2"
assert service.config.api_key == "test-key"
def test_create_cohere_service(self):
"""Test Cohere service creation."""
with patch('crewai.rag.embeddings.factory.build_embedder'):
service = EmbeddingService.create_cohere_service(
model="embed-multilingual-v3.0",
api_key="test-key"
)
assert service.config.provider == "cohere"
assert service.config.model == "embed-multilingual-v3.0"
assert service.config.api_key == "test-key"
def test_create_gemini_service(self):
"""Test Gemini service creation."""
with patch('crewai.rag.embeddings.factory.build_embedder'):
service = EmbeddingService.create_gemini_service(
model="models/embedding-001",
api_key="test-key"
)
assert service.config.provider == "google-generativeai"
assert service.config.model == "models/embedding-001"
assert service.config.api_key == "test-key"
class TestProviderConfigurations:
"""Test provider-specific configurations."""
@patch('crewai.rag.embeddings.factory.build_embedder')
def test_openai_config(self, mock_build_embedder):
"""Test OpenAI configuration mapping."""
mock_build_embedder.return_value = Mock()
service = EmbeddingService(
provider="openai",
model="text-embedding-3-small",
api_key="test-key",
extra_config={"dimensions": 1024}
)
# Check the configuration passed to build_embedder
call_args = mock_build_embedder.call_args[0][0]
assert call_args["provider"] == "openai"
assert call_args["config"]["api_key"] == "test-key"
assert call_args["config"]["model_name"] == "text-embedding-3-small"
assert call_args["config"]["dimensions"] == 1024
@patch('crewai.rag.embeddings.factory.build_embedder')
def test_voyageai_config(self, mock_build_embedder):
"""Test Voyage AI configuration mapping."""
mock_build_embedder.return_value = Mock()
service = EmbeddingService(
provider="voyageai",
model="voyage-2",
api_key="test-key",
timeout=60.0,
max_retries=5,
extra_config={"input_type": "document"}
)
# Check the configuration passed to build_embedder
call_args = mock_build_embedder.call_args[0][0]
assert call_args["provider"] == "voyageai"
assert call_args["config"]["api_key"] == "test-key"
assert call_args["config"]["model"] == "voyage-2"
assert call_args["config"]["timeout"] == 60.0
assert call_args["config"]["max_retries"] == 5
assert call_args["config"]["input_type"] == "document"
@patch('crewai.rag.embeddings.factory.build_embedder')
def test_cohere_config(self, mock_build_embedder):
"""Test Cohere configuration mapping."""
mock_build_embedder.return_value = Mock()
service = EmbeddingService(
provider="cohere",
model="embed-english-v3.0",
api_key="test-key"
)
# Check the configuration passed to build_embedder
call_args = mock_build_embedder.call_args[0][0]
assert call_args["provider"] == "cohere"
assert call_args["config"]["api_key"] == "test-key"
assert call_args["config"]["model_name"] == "embed-english-v3.0"
@patch('crewai.rag.embeddings.factory.build_embedder')
def test_gemini_config(self, mock_build_embedder):
"""Test Gemini configuration mapping."""
mock_build_embedder.return_value = Mock()
service = EmbeddingService(
provider="google-generativeai",
model="models/embedding-001",
api_key="test-key"
)
# Check the configuration passed to build_embedder
call_args = mock_build_embedder.call_args[0][0]
assert call_args["provider"] == "google-generativeai"
assert call_args["config"]["api_key"] == "test-key"
assert call_args["config"]["model_name"] == "models/embedding-001"
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/rag/test_embedding_service.py",
"license": "MIT License",
"lines": 272,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/rag/test_json_loader.py | import json
import os
import tempfile
from unittest.mock import Mock, patch
from crewai_tools.rag.base_loader import LoaderResult
from crewai_tools.rag.loaders.json_loader import JSONLoader
from crewai_tools.rag.source_content import SourceContent
import pytest
class TestJSONLoader:
def _create_temp_json_file(self, data) -> str:
"""Helper to write JSON data to a temporary file and return its path."""
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as f:
json.dump(data, f)
return f.name
def _create_temp_raw_file(self, content: str) -> str:
"""Helper to write raw content to a temporary file and return its path."""
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as f:
f.write(content)
return f.name
def _load_from_path(self, path) -> LoaderResult:
loader = JSONLoader()
return loader.load(SourceContent(path))
def test_load_json_dict(self):
path = self._create_temp_json_file(
{"name": "John", "age": 30, "items": ["a", "b", "c"]}
)
try:
result = self._load_from_path(path)
assert isinstance(result, LoaderResult)
assert all(k in result.content for k in ["name", "John", "age", "30"])
assert result.metadata == {"format": "json", "type": "dict", "size": 3}
assert result.source == path
finally:
os.unlink(path)
def test_load_json_list(self):
path = self._create_temp_json_file(
[
{"id": 1, "name": "Item 1"},
{"id": 2, "name": "Item 2"},
]
)
try:
result = self._load_from_path(path)
assert result.metadata["type"] == "list"
assert result.metadata["size"] == 2
assert all(item in result.content for item in ["Item 1", "Item 2"])
finally:
os.unlink(path)
@pytest.mark.parametrize(
"value, expected_type",
[
("simple string value", "str"),
(42, "int"),
],
)
def test_load_json_primitives(self, value, expected_type):
path = self._create_temp_json_file(value)
try:
result = self._load_from_path(path)
assert result.metadata["type"] == expected_type
assert result.metadata["size"] == 1
assert str(value) in result.content
finally:
os.unlink(path)
def test_load_malformed_json(self):
path = self._create_temp_raw_file('{"invalid": json,}')
try:
result = self._load_from_path(path)
assert result.metadata["format"] == "json"
assert "parse_error" in result.metadata
assert result.content == '{"invalid": json,}'
finally:
os.unlink(path)
def test_load_empty_file(self):
path = self._create_temp_raw_file("")
try:
result = self._load_from_path(path)
assert "parse_error" in result.metadata
assert result.content == ""
finally:
os.unlink(path)
def test_load_text_input(self):
json_text = '{"message": "hello", "count": 5}'
loader = JSONLoader()
result = loader.load(SourceContent(json_text))
assert all(
part in result.content for part in ["message", "hello", "count", "5"]
)
assert result.metadata["type"] == "dict"
assert result.metadata["size"] == 2
def test_load_complex_nested_json(self):
data = {
"users": [
{"id": 1, "profile": {"name": "Alice", "settings": {"theme": "dark"}}},
{"id": 2, "profile": {"name": "Bob", "settings": {"theme": "light"}}},
],
"meta": {"total": 2, "version": "1.0"},
}
path = self._create_temp_json_file(data)
try:
result = self._load_from_path(path)
for value in ["Alice", "Bob", "dark", "light"]:
assert value in result.content
assert result.metadata["size"] == 2 # top-level keys
finally:
os.unlink(path)
def test_consistent_doc_id(self):
path = self._create_temp_json_file({"test": "data"})
try:
result1 = self._load_from_path(path)
result2 = self._load_from_path(path)
assert result1.doc_id == result2.doc_id
finally:
os.unlink(path)
# ------------------------------
# URL-based tests
# ------------------------------
@patch("requests.get")
def test_url_response_valid_json(self, mock_get):
mock_get.return_value = Mock(
text='{"key": "value", "number": 123}',
json=Mock(return_value={"key": "value", "number": 123}),
raise_for_status=Mock(),
)
loader = JSONLoader()
result = loader.load(SourceContent("https://api.example.com/data.json"))
assert all(val in result.content for val in ["key", "value", "number", "123"])
headers = mock_get.call_args[1]["headers"]
assert "application/json" in headers["Accept"]
assert "crewai-tools JSONLoader" in headers["User-Agent"]
@patch("requests.get")
def test_url_response_not_json(self, mock_get):
mock_get.return_value = Mock(
text='{"key": "value"}',
json=Mock(side_effect=ValueError("Not JSON")),
raise_for_status=Mock(),
)
loader = JSONLoader()
result = loader.load(SourceContent("https://example.com/data.json"))
assert all(part in result.content for part in ["key", "value"])
@patch("requests.get")
def test_url_with_custom_headers(self, mock_get):
mock_get.return_value = Mock(
text='{"data": "test"}',
json=Mock(return_value={"data": "test"}),
raise_for_status=Mock(),
)
headers = {"Authorization": "Bearer token", "Custom-Header": "value"}
loader = JSONLoader()
loader.load(SourceContent("https://api.example.com/data.json"), headers=headers)
assert mock_get.call_args[1]["headers"] == headers
@patch("requests.get")
def test_url_network_failure(self, mock_get):
mock_get.side_effect = Exception("Network error")
loader = JSONLoader()
with pytest.raises(ValueError, match="Error fetching content from URL"):
loader.load(SourceContent("https://api.example.com/data.json"))
@patch("requests.get")
def test_url_http_error(self, mock_get):
mock_get.return_value = Mock(
raise_for_status=Mock(side_effect=Exception("404"))
)
loader = JSONLoader()
with pytest.raises(ValueError, match="Error fetching content from URL"):
loader.load(SourceContent("https://api.example.com/404.json"))
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/rag/test_json_loader.py",
"license": "MIT License",
"lines": 165,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/rag/test_mdx_loader.py | import os
import tempfile
from unittest.mock import Mock, patch
from crewai_tools.rag.base_loader import LoaderResult
from crewai_tools.rag.loaders.mdx_loader import MDXLoader
from crewai_tools.rag.source_content import SourceContent
import pytest
class TestMDXLoader:
def _write_temp_mdx(self, content):
f = tempfile.NamedTemporaryFile(mode="w", suffix=".mdx", delete=False)
f.write(content)
f.close()
return f.name
def _load_from_file(self, content):
path = self._write_temp_mdx(content)
try:
loader = MDXLoader()
return loader.load(SourceContent(path)), path
finally:
os.unlink(path)
def test_load_basic_mdx_file(self):
content = """
import Component from './Component'
export const meta = { title: 'Test' }
# Test MDX File
This is a **markdown** file with JSX.
<Component prop="value" />
Some more content.
<div className="container">
<p>Nested content</p>
</div>
"""
result, path = self._load_from_file(content)
assert isinstance(result, LoaderResult)
assert all(
tag not in result.content
for tag in ["import", "export", "<Component", "<div", "</div>"]
)
assert all(
text in result.content
for text in [
"# Test MDX File",
"markdown",
"Some more content",
"Nested content",
]
)
assert result.metadata["format"] == "mdx"
assert result.source == path
def test_mdx_multiple_imports_exports(self):
content = """
import React from 'react'
import { useState } from 'react'
import CustomComponent from './custom'
export default function Layout() { return null }
export const config = { test: true }
# Content
Regular markdown content here.
"""
result, _ = self._load_from_file(content)
assert "# Content" in result.content
assert "Regular markdown content here." in result.content
assert "import" not in result.content and "export" not in result.content
def test_complex_jsx_cleanup(self):
content = """
# MDX with Complex JSX
<div className="alert alert-info">
<strong>Info:</strong> This is important information.
<ul><li>Item 1</li><li>Item 2</li></ul>
</div>
Regular paragraph text.
<MyComponent prop1="value1">Nested content inside component</MyComponent>
"""
result, _ = self._load_from_file(content)
assert all(
tag not in result.content
for tag in ["<div", "<strong>", "<ul>", "<MyComponent"]
)
assert all(
text in result.content
for text in [
"Info:",
"Item 1",
"Regular paragraph text.",
"Nested content inside component",
]
)
def test_whitespace_cleanup(self):
content = """
# Title
Some content.
More content after multiple newlines.
Final content.
"""
result, _ = self._load_from_file(content)
assert result.content.count("\n\n\n") == 0
assert result.content.startswith("# Title")
assert result.content.endswith("Final content.")
def test_only_jsx_content(self):
content = """
<div>
<h1>Only JSX content</h1>
<p>No markdown here</p>
</div>
"""
result, _ = self._load_from_file(content)
assert all(tag not in result.content for tag in ["<div>", "<h1>", "<p>"])
assert "Only JSX content" in result.content
assert "No markdown here" in result.content
@patch("requests.get")
def test_load_mdx_from_url(self, mock_get):
mock_get.return_value = Mock(
text="# MDX from URL\n\nContent here.\n\n<Component />",
raise_for_status=lambda: None,
)
loader = MDXLoader()
result = loader.load(SourceContent("https://example.com/content.mdx"))
assert "# MDX from URL" in result.content
assert "<Component />" not in result.content
@patch("requests.get")
def test_load_mdx_with_custom_headers(self, mock_get):
mock_get.return_value = Mock(
text="# Custom headers test", raise_for_status=lambda: None
)
loader = MDXLoader()
loader.load(
SourceContent("https://example.com"),
headers={"Authorization": "Bearer token"},
)
assert mock_get.call_args[1]["headers"] == {"Authorization": "Bearer token"}
@patch("requests.get")
def test_mdx_url_fetch_error(self, mock_get):
mock_get.side_effect = Exception("Network error")
with pytest.raises(ValueError, match="Error fetching content from URL https://example.com: Network error"):
MDXLoader().load(SourceContent("https://example.com"))
def test_load_inline_mdx_text(self):
content = """# Inline MDX\n\nimport Something from 'somewhere'\n\nContent with <Component prop=\"value\" />.\n\nexport const meta = { title: 'Test' }"""
loader = MDXLoader()
result = loader.load(SourceContent(content))
assert "# Inline MDX" in result.content
assert "Content with ." in result.content
def test_empty_result_after_cleaning(self):
content = """
import Something from 'somewhere'
export const config = {}
<div></div>
"""
result, _ = self._load_from_file(content)
assert result.content.strip() == ""
def test_edge_case_parsing(self):
content = """
# Title
<Component>
Multi-line
JSX content
</Component>
import { a, b } from 'module'
export { x, y }
Final text.
"""
result, _ = self._load_from_file(content)
assert "# Title" in result.content
assert "JSX content" in result.content
assert "Final text." in result.content
assert all(
phrase not in result.content
for phrase in ["import {", "export {", "<Component>"]
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/rag/test_mdx_loader.py",
"license": "MIT License",
"lines": 168,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/rag/test_text_loaders.py | import hashlib
import os
import tempfile
from crewai_tools.rag.base_loader import LoaderResult
from crewai_tools.rag.loaders.text_loader import TextFileLoader, TextLoader
from crewai_tools.rag.source_content import SourceContent
import pytest
def write_temp_file(content, suffix=".txt", encoding="utf-8"):
with tempfile.NamedTemporaryFile(
mode="w", suffix=suffix, delete=False, encoding=encoding
) as f:
f.write(content)
return f.name
def cleanup_temp_file(path):
try:
os.unlink(path)
except FileNotFoundError:
pass
class TestTextFileLoader:
def test_basic_text_file(self):
content = "This is test content\nWith multiple lines\nAnd more text"
path = write_temp_file(content)
try:
result = TextFileLoader().load(SourceContent(path))
assert isinstance(result, LoaderResult)
assert result.content == content
assert result.source == path
assert result.doc_id
assert result.metadata in (None, {})
finally:
cleanup_temp_file(path)
def test_empty_file(self):
path = write_temp_file("")
try:
result = TextFileLoader().load(SourceContent(path))
assert result.content == ""
finally:
cleanup_temp_file(path)
def test_unicode_content(self):
content = "Hello 世界 🌍 émojis 🎉 åäö"
path = write_temp_file(content)
try:
result = TextFileLoader().load(SourceContent(path))
assert content in result.content
finally:
cleanup_temp_file(path)
def test_large_file(self):
content = "\n".join(f"Line {i}" for i in range(100))
path = write_temp_file(content)
try:
result = TextFileLoader().load(SourceContent(path))
assert "Line 0" in result.content
assert "Line 99" in result.content
assert result.content.count("\n") == 99
finally:
cleanup_temp_file(path)
def test_missing_file(self):
with pytest.raises(FileNotFoundError):
TextFileLoader().load(SourceContent("/nonexistent/path.txt"))
def test_permission_denied(self):
path = write_temp_file("Some content")
os.chmod(path, 0o000)
try:
with pytest.raises(PermissionError):
TextFileLoader().load(SourceContent(path))
finally:
os.chmod(path, 0o644)
cleanup_temp_file(path)
def test_doc_id_consistency(self):
content = "Consistent content"
path = write_temp_file(content)
try:
loader = TextFileLoader()
result1 = loader.load(SourceContent(path))
result2 = loader.load(SourceContent(path))
expected_id = hashlib.sha256((path + content).encode("utf-8")).hexdigest()
assert result1.doc_id == result2.doc_id == expected_id
finally:
cleanup_temp_file(path)
def test_various_extensions(self):
content = "Same content"
for ext in [".txt", ".md", ".log", ".json"]:
path = write_temp_file(content, suffix=ext)
try:
result = TextFileLoader().load(SourceContent(path))
assert result.content == content
finally:
cleanup_temp_file(path)
class TestTextLoader:
def test_basic_text(self):
content = "Raw text"
result = TextLoader().load(SourceContent(content))
expected_hash = hashlib.sha256(content.encode("utf-8")).hexdigest()
assert result.content == content
assert result.source == expected_hash
assert result.doc_id == expected_hash
def test_multiline_text(self):
content = "Line 1\nLine 2\nLine 3"
result = TextLoader().load(SourceContent(content))
assert "Line 2" in result.content
def test_empty_text(self):
result = TextLoader().load(SourceContent(""))
assert result.content == ""
assert result.source == hashlib.sha256("".encode("utf-8")).hexdigest()
def test_unicode_text(self):
content = "世界 🌍 émojis 🎉 åäö"
result = TextLoader().load(SourceContent(content))
assert content in result.content
def test_special_characters(self):
content = "!@#$$%^&*()_+-=~`{}[]\\|;:'\",.<>/?"
result = TextLoader().load(SourceContent(content))
assert result.content == content
def test_doc_id_uniqueness(self):
result1 = TextLoader().load(SourceContent("A"))
result2 = TextLoader().load(SourceContent("B"))
assert result1.doc_id != result2.doc_id
def test_whitespace_text(self):
content = " \n\t "
result = TextLoader().load(SourceContent(content))
assert result.content == content
def test_long_text(self):
content = "A" * 10000
result = TextLoader().load(SourceContent(content))
assert len(result.content) == 10000
class TestTextLoadersIntegration:
def test_consistency_between_loaders(self):
content = "Consistent content"
text_result = TextLoader().load(SourceContent(content))
file_path = write_temp_file(content)
try:
file_result = TextFileLoader().load(SourceContent(file_path))
assert text_result.content == file_result.content
assert text_result.source != file_result.source
assert text_result.doc_id != file_result.doc_id
finally:
cleanup_temp_file(file_path)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/rag/test_text_loaders.py",
"license": "MIT License",
"lines": 136,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/rag/test_webpage_loader.py | from unittest.mock import Mock, patch
from crewai_tools.rag.base_loader import LoaderResult
from crewai_tools.rag.loaders.webpage_loader import WebPageLoader
from crewai_tools.rag.source_content import SourceContent
import pytest
class TestWebPageLoader:
def setup_mock_response(self, text, status_code=200, content_type="text/html"):
response = Mock()
response.text = text
response.apparent_encoding = "utf-8"
response.status_code = status_code
response.headers = {"content-type": content_type}
return response
def setup_mock_soup(self, text, title=None, script_style_elements=None):
soup = Mock()
soup.get_text.return_value = text
soup.title = Mock(string=title) if title is not None else None
soup.return_value = script_style_elements or []
return soup
@patch("requests.get")
@patch("crewai_tools.rag.loaders.webpage_loader.BeautifulSoup")
def test_load_basic_webpage(self, mock_bs, mock_get):
mock_get.return_value = self.setup_mock_response(
"<html><head><title>Test Page</title></head><body><p>Test content</p></body></html>"
)
mock_bs.return_value = self.setup_mock_soup("Test content", title="Test Page")
loader = WebPageLoader()
result = loader.load(SourceContent("https://example.com"))
assert isinstance(result, LoaderResult)
assert result.content == "Test content"
assert result.metadata["title"] == "Test Page"
@patch("requests.get")
@patch("crewai_tools.rag.loaders.webpage_loader.BeautifulSoup")
def test_load_webpage_with_scripts_and_styles(self, mock_bs, mock_get):
html = """
<html><head><title>Page with Scripts</title><style>body { color: red; }</style></head>
<body><script>console.log('test');</script><p>Visible content</p></body></html>
"""
mock_get.return_value = self.setup_mock_response(html)
scripts = [Mock(), Mock()]
styles = [Mock()]
for el in scripts + styles:
el.decompose = Mock()
mock_bs.return_value = self.setup_mock_soup(
"Page with Scripts Visible content",
title="Page with Scripts",
script_style_elements=scripts + styles,
)
loader = WebPageLoader()
result = loader.load(SourceContent("https://example.com/with-scripts"))
assert "Visible content" in result.content
for el in scripts + styles:
el.decompose.assert_called_once()
@patch("requests.get")
@patch("crewai_tools.rag.loaders.webpage_loader.BeautifulSoup")
def test_text_cleaning_and_title_handling(self, mock_bs, mock_get):
mock_get.return_value = self.setup_mock_response(
"<html><body><p> Messy text </p></body></html>"
)
mock_bs.return_value = self.setup_mock_soup(
"Text with extra spaces\n\n More\t text \n\n", title=None
)
loader = WebPageLoader()
result = loader.load(SourceContent("https://example.com/messy-text"))
assert result.content is not None
assert result.metadata["title"] == ""
@patch("requests.get")
@patch("crewai_tools.rag.loaders.webpage_loader.BeautifulSoup")
def test_empty_or_missing_title(self, mock_bs, mock_get):
for title in [None, ""]:
mock_get.return_value = self.setup_mock_response(
"<html><head><title></title></head><body>Content</body></html>"
)
mock_bs.return_value = self.setup_mock_soup("Content", title=title)
loader = WebPageLoader()
result = loader.load(SourceContent("https://example.com"))
assert result.metadata["title"] == ""
@patch("requests.get")
def test_custom_and_default_headers(self, mock_get):
mock_get.return_value = self.setup_mock_response(
"<html><body>Test</body></html>"
)
custom_headers = {
"User-Agent": "Bot",
"Authorization": "Bearer xyz",
"Accept": "text/html",
}
with patch("crewai_tools.rag.loaders.webpage_loader.BeautifulSoup") as mock_bs:
mock_bs.return_value = self.setup_mock_soup("Test")
WebPageLoader().load(
SourceContent("https://example.com"), headers=custom_headers
)
assert mock_get.call_args[1]["headers"] == custom_headers
@patch("requests.get")
def test_error_handling(self, mock_get):
for error in [Exception("Fail"), ValueError("Bad"), ImportError("Oops")]:
mock_get.side_effect = error
with pytest.raises(ValueError, match="Error loading webpage"):
WebPageLoader().load(SourceContent("https://example.com"))
@patch("requests.get")
def test_timeout_and_http_error(self, mock_get):
import requests
mock_get.side_effect = requests.Timeout("Timeout")
with pytest.raises(ValueError):
WebPageLoader().load(SourceContent("https://example.com"))
mock_response = Mock()
mock_response.raise_for_status.side_effect = requests.HTTPError("404")
mock_get.side_effect = None
mock_get.return_value = mock_response
with pytest.raises(ValueError):
WebPageLoader().load(SourceContent("https://example.com/404"))
@patch("requests.get")
@patch("crewai_tools.rag.loaders.webpage_loader.BeautifulSoup")
def test_doc_id_consistency(self, mock_bs, mock_get):
mock_get.return_value = self.setup_mock_response(
"<html><body>Doc</body></html>"
)
mock_bs.return_value = self.setup_mock_soup("Doc")
loader = WebPageLoader()
result1 = loader.load(SourceContent("https://example.com"))
result2 = loader.load(SourceContent("https://example.com"))
assert result1.doc_id == result2.doc_id
@patch("requests.get")
@patch("crewai_tools.rag.loaders.webpage_loader.BeautifulSoup")
def test_status_code_and_content_type(self, mock_bs, mock_get):
for status in [200, 201, 301]:
mock_get.return_value = self.setup_mock_response(
f"<html><body>Status {status}</body></html>", status_code=status
)
mock_bs.return_value = self.setup_mock_soup(f"Status {status}")
result = WebPageLoader().load(
SourceContent(f"https://example.com/{status}")
)
assert result.metadata["status_code"] == status
for ctype in ["text/html", "text/plain", "application/xhtml+xml"]:
mock_get.return_value = self.setup_mock_response(
"<html><body>Content</body></html>", content_type=ctype
)
mock_bs.return_value = self.setup_mock_soup("Content")
result = WebPageLoader().load(SourceContent("https://example.com"))
assert result.metadata["content_type"] == ctype
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/rag/test_webpage_loader.py",
"license": "MIT License",
"lines": 141,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/test_generate_tool_specs.py | import json
from unittest import mock
from crewai.tools.base_tool import BaseTool, EnvVar
from crewai_tools.generate_tool_specs import ToolSpecExtractor
from pydantic import BaseModel, Field
import pytest
class MockToolSchema(BaseModel):
query: str = Field(..., description="The query parameter")
count: int = Field(5, description="Number of results to return")
filters: list[str] | None = Field(None, description="Optional filters to apply")
class MockTool(BaseTool):
name: str = "Mock Search Tool"
description: str = "A tool that mocks search functionality"
args_schema: type[BaseModel] = MockToolSchema
another_parameter: str = Field(
"Another way to define a default value", description=""
)
my_parameter: str = Field("This is default value", description="What a description")
my_parameter_bool: bool = Field(False)
# Use default_factory like real tools do (not direct default)
package_dependencies: list[str] = Field(
default_factory=lambda: ["this-is-a-required-package", "another-required-package"]
)
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="SERPER_API_KEY",
description="API key for Serper",
required=True,
default=None,
),
EnvVar(
name="API_RATE_LIMIT",
description="API rate limit",
required=False,
default="100",
),
]
)
@pytest.fixture
def extractor():
ext = ToolSpecExtractor()
return ext
def test_unwrap_schema(extractor):
nested_schema = {
"type": "function-after",
"schema": {"type": "default", "schema": {"type": "str", "value": "test"}},
}
result = extractor._unwrap_schema(nested_schema)
assert result["type"] == "str"
assert result["value"] == "test"
@pytest.fixture
def mock_tool_extractor(extractor):
with (
mock.patch("crewai_tools.generate_tool_specs.dir", return_value=["MockTool"]),
mock.patch("crewai_tools.generate_tool_specs.getattr", return_value=MockTool),
):
extractor.extract_all_tools()
assert len(extractor.tools_spec) == 1
return extractor.tools_spec[0]
def test_extract_basic_tool_info(mock_tool_extractor):
tool_info = mock_tool_extractor
assert tool_info.keys() == {
"name",
"humanized_name",
"description",
"run_params_schema",
"env_vars",
"init_params_schema",
"package_dependencies",
}
assert tool_info["name"] == "MockTool"
assert tool_info["humanized_name"] == "Mock Search Tool"
assert tool_info["description"] == "A tool that mocks search functionality"
def test_extract_init_params_schema(mock_tool_extractor):
tool_info = mock_tool_extractor
init_params_schema = tool_info["init_params_schema"]
assert init_params_schema.keys() == {
"$defs",
"properties",
"title",
"type",
}
another_parameter = init_params_schema["properties"]["another_parameter"]
assert another_parameter["description"] == ""
assert another_parameter["default"] == "Another way to define a default value"
assert another_parameter["type"] == "string"
my_parameter = init_params_schema["properties"]["my_parameter"]
assert my_parameter["description"] == "What a description"
assert my_parameter["default"] == "This is default value"
assert my_parameter["type"] == "string"
my_parameter_bool = init_params_schema["properties"]["my_parameter_bool"]
assert not my_parameter_bool["default"]
assert my_parameter_bool["type"] == "boolean"
def test_extract_env_vars(mock_tool_extractor):
tool_info = mock_tool_extractor
assert len(tool_info["env_vars"]) == 2
api_key_var, rate_limit_var = tool_info["env_vars"]
assert api_key_var["name"] == "SERPER_API_KEY"
assert api_key_var["description"] == "API key for Serper"
assert api_key_var["required"]
assert api_key_var["default"] is None
assert rate_limit_var["name"] == "API_RATE_LIMIT"
assert rate_limit_var["description"] == "API rate limit"
assert not rate_limit_var["required"]
assert rate_limit_var["default"] == "100"
def test_extract_run_params_schema(mock_tool_extractor):
tool_info = mock_tool_extractor
run_params_schema = tool_info["run_params_schema"]
assert run_params_schema.keys() == {
"properties",
"required",
"title",
"type",
}
query_param = run_params_schema["properties"]["query"]
assert query_param["description"] == "The query parameter"
assert query_param["type"] == "string"
count_param = run_params_schema["properties"]["count"]
assert count_param["type"] == "integer"
assert count_param["default"] == 5
filters_param = run_params_schema["properties"]["filters"]
assert filters_param["description"] == "Optional filters to apply"
assert filters_param["default"] is None
assert filters_param["anyOf"] == [
{"items": {"type": "string"}, "type": "array"},
{"type": "null"},
]
def test_extract_package_dependencies(mock_tool_extractor):
tool_info = mock_tool_extractor
assert tool_info["package_dependencies"] == [
"this-is-a-required-package",
"another-required-package",
]
def test_save_to_json(extractor, tmp_path):
extractor.tools_spec = [
{
"name": "TestTool",
"humanized_name": "Test Tool",
"description": "A test tool",
"run_params_schema": [
{"name": "param1", "description": "Test parameter", "type": "str"}
],
}
]
file_path = tmp_path / "output.json"
extractor.save_to_json(str(file_path))
assert file_path.exists()
with open(file_path, "r") as f:
data = json.load(f)
assert "tools" in data
assert len(data["tools"]) == 1
assert data["tools"][0]["humanized_name"] == "Test Tool"
assert data["tools"][0]["run_params_schema"][0]["name"] == "param1"
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/test_generate_tool_specs.py",
"license": "MIT License",
"lines": 154,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/test_optional_dependencies.py | from pathlib import Path
import subprocess
import tempfile
import pytest
@pytest.fixture
def temp_project():
temp_dir = tempfile.TemporaryDirectory()
project_dir = Path(temp_dir.name) / "test_project"
project_dir.mkdir()
pyproject_content = """
[project]
name = "test-project"
version = "0.1.0"
description = "Test project"
requires-python = ">=3.10"
"""
(project_dir / "pyproject.toml").write_text(pyproject_content)
run_command(
["uv", "add", "--editable", f"file://{Path.cwd().absolute()}"], project_dir
)
run_command(["uv", "sync"], project_dir)
yield project_dir
def run_command(cmd, cwd):
return subprocess.run(cmd, cwd=cwd, capture_output=True, text=True)
@pytest.mark.skip(reason="Test takes too long in GitHub Actions (>30s timeout) due to dependency installation")
def test_no_optional_dependencies_in_init(temp_project):
"""
Test that crewai-tools can be imported without optional dependencies.
The package defines optional dependencies in pyproject.toml, but the base
package should be importable without any of these optional dependencies
being installed.
"""
result = run_command(
["uv", "run", "python", "-c", "import crewai_tools"], temp_project
)
assert result.returncode == 0, f"Import failed with error: {result.stderr}"
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/test_optional_dependencies.py",
"license": "MIT License",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/arxiv_paper_tool_test.py | from pathlib import Path
from unittest.mock import MagicMock, patch
import urllib.error
import xml.etree.ElementTree as ET
from crewai_tools import ArxivPaperTool
import pytest
@pytest.fixture
def tool():
return ArxivPaperTool(download_pdfs=False)
def mock_arxiv_response():
return """<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<entry>
<id>http://arxiv.org/abs/1234.5678</id>
<title>Sample Paper</title>
<summary>This is a summary of the sample paper.</summary>
<published>2022-01-01T00:00:00Z</published>
<author><name>John Doe</name></author>
<link title="pdf" href="http://arxiv.org/pdf/1234.5678.pdf"/>
</entry>
</feed>"""
@patch("urllib.request.urlopen")
def test_fetch_arxiv_data(mock_urlopen, tool):
mock_response = MagicMock()
mock_response.status = 200
mock_response.read.return_value = mock_arxiv_response().encode("utf-8")
mock_urlopen.return_value.__enter__.return_value = mock_response
results = tool.fetch_arxiv_data("transformer", 1)
assert isinstance(results, list)
assert results[0]["title"] == "Sample Paper"
@patch("urllib.request.urlopen", side_effect=urllib.error.URLError("Timeout"))
def test_fetch_arxiv_data_network_error(mock_urlopen, tool):
with pytest.raises(urllib.error.URLError):
tool.fetch_arxiv_data("transformer", 1)
@patch("urllib.request.urlretrieve")
def test_download_pdf_success(mock_urlretrieve):
tool = ArxivPaperTool()
tool.download_pdf("http://arxiv.org/pdf/1234.5678.pdf", Path("test.pdf"))
mock_urlretrieve.assert_called_once()
@patch("urllib.request.urlretrieve", side_effect=OSError("Permission denied"))
def test_download_pdf_oserror(mock_urlretrieve):
tool = ArxivPaperTool()
with pytest.raises(OSError):
tool.download_pdf(
"http://arxiv.org/pdf/1234.5678.pdf", Path("/restricted/test.pdf")
)
@patch("urllib.request.urlopen")
@patch("urllib.request.urlretrieve")
def test_run_with_download(mock_urlretrieve, mock_urlopen):
mock_response = MagicMock()
mock_response.status = 200
mock_response.read.return_value = mock_arxiv_response().encode("utf-8")
mock_urlopen.return_value.__enter__.return_value = mock_response
tool = ArxivPaperTool(download_pdfs=True)
output = tool._run("transformer", 1)
assert "Title: Sample Paper" in output
mock_urlretrieve.assert_called_once()
@patch("urllib.request.urlopen")
def test_run_no_download(mock_urlopen):
mock_response = MagicMock()
mock_response.status = 200
mock_response.read.return_value = mock_arxiv_response().encode("utf-8")
mock_urlopen.return_value.__enter__.return_value = mock_response
tool = ArxivPaperTool(download_pdfs=False)
result = tool._run("transformer", 1)
assert "Title: Sample Paper" in result
@patch("pathlib.Path.mkdir")
def test_validate_save_path_creates_directory(mock_mkdir):
path = ArxivPaperTool._validate_save_path("new_folder")
mock_mkdir.assert_called_once_with(parents=True, exist_ok=True)
assert isinstance(path, Path)
@patch("urllib.request.urlopen")
def test_run_handles_exception(mock_urlopen):
mock_urlopen.side_effect = Exception("API failure")
tool = ArxivPaperTool()
result = tool._run("transformer", 1)
assert "Failed to fetch or download Arxiv papers" in result
@patch("urllib.request.urlopen")
def test_invalid_xml_response(mock_urlopen, tool):
mock_response = MagicMock()
mock_response.read.return_value = b"<invalid><xml>"
mock_response.status = 200
mock_urlopen.return_value.__enter__.return_value = mock_response
with pytest.raises(ET.ParseError):
tool.fetch_arxiv_data("quantum", 1)
@patch.object(ArxivPaperTool, "fetch_arxiv_data")
def test_run_with_max_results(mock_fetch, tool):
mock_fetch.return_value = [
{
"arxiv_id": f"test_{i}",
"title": f"Title {i}",
"summary": "Summary",
"authors": ["Author"],
"published_date": "2023-01-01",
"pdf_url": None,
}
for i in range(100)
]
result = tool._run(search_query="test", max_results=100)
assert result.count("Title:") == 100
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/arxiv_paper_tool_test.py",
"license": "MIT License",
"lines": 100,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/brave_search_tool_test.py | import json
from unittest.mock import patch
import pytest
from crewai_tools.tools.brave_search_tool.brave_search_tool import BraveSearchTool
@pytest.fixture
def brave_tool():
return BraveSearchTool(n_results=2)
def test_brave_tool_initialization():
tool = BraveSearchTool()
assert tool.n_results == 10
assert tool.save_file is False
@patch("requests.get")
def test_brave_tool_search(mock_get, brave_tool):
mock_response = {
"web": {
"results": [
{
"title": "Test Title",
"url": "http://test.com",
"description": "Test Description",
}
]
}
}
mock_get.return_value.json.return_value = mock_response
result = brave_tool.run(query="test")
data = json.loads(result)
assert isinstance(data, list)
assert len(data) >= 1
assert data[0]["title"] == "Test Title"
assert data[0]["url"] == "http://test.com"
@patch("requests.get")
def test_brave_tool(mock_get):
mock_response = {
"web": {
"results": [
{
"title": "Brave Browser",
"url": "https://brave.com",
"description": "Brave Browser description",
}
]
}
}
mock_get.return_value.json.return_value = mock_response
tool = BraveSearchTool(n_results=2)
result = tool.run(query="Brave Browser")
assert result is not None
# Parse JSON so we can examine the structure
data = json.loads(result)
assert isinstance(data, list)
assert len(data) >= 1
# First item should have expected fields: title, url, and description
first = data[0]
assert "title" in first
assert first["title"] == "Brave Browser"
assert "url" in first
assert first["url"] == "https://brave.com"
assert "description" in first
assert first["description"] == "Brave Browser description"
if __name__ == "__main__":
test_brave_tool()
test_brave_tool_initialization()
# test_brave_tool_search(brave_tool)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/brave_search_tool_test.py",
"license": "MIT License",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/brightdata_serp_tool_test.py | import unittest
from unittest.mock import MagicMock, patch
from crewai_tools.tools.brightdata_tool.brightdata_serp import BrightDataSearchTool
class TestBrightDataSearchTool(unittest.TestCase):
@patch.dict(
"os.environ",
{"BRIGHT_DATA_API_KEY": "test_api_key", "BRIGHT_DATA_ZONE": "test_zone"},
)
def setUp(self):
self.tool = BrightDataSearchTool()
@patch("requests.post")
def test_run_successful_search(self, mock_post):
# Sample mock JSON response
mock_response = MagicMock()
mock_response.status_code = 200
mock_response.text = "mock response text"
mock_post.return_value = mock_response
# Define search input
input_data = {
"query": "latest AI news",
"search_engine": "google",
"country": "us",
"language": "en",
"search_type": "nws",
"device_type": "desktop",
"parse_results": True,
"save_file": False,
}
result = self.tool._run(**input_data)
# Assertions
self.assertIsInstance(result, str) # Your tool returns response.text (string)
mock_post.assert_called_once()
@patch("requests.post")
def test_run_with_request_exception(self, mock_post):
mock_post.side_effect = Exception("Timeout")
result = self.tool._run(query="AI", search_engine="google")
self.assertIn("Error", result)
def tearDown(self):
# Clean up env vars
pass
if __name__ == "__main__":
unittest.main()
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/brightdata_serp_tool_test.py",
"license": "MIT License",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/brightdata_webunlocker_tool_test.py | from unittest.mock import Mock, patch
from crewai_tools.tools.brightdata_tool.brightdata_unlocker import (
BrightDataWebUnlockerTool,
)
import requests
@patch.dict(
"os.environ",
{"BRIGHT_DATA_API_KEY": "test_api_key", "BRIGHT_DATA_ZONE": "test_zone"},
)
@patch("crewai_tools.tools.brightdata_tool.brightdata_unlocker.requests.post")
def test_run_success_html(mock_post):
mock_response = Mock()
mock_response.status_code = 200
mock_response.text = "<html><body>Test</body></html>"
mock_response.raise_for_status = Mock()
mock_post.return_value = mock_response
tool = BrightDataWebUnlockerTool()
tool._run(url="https://example.com", format="html", save_file=False)
@patch.dict(
"os.environ",
{"BRIGHT_DATA_API_KEY": "test_api_key", "BRIGHT_DATA_ZONE": "test_zone"},
)
@patch("crewai_tools.tools.brightdata_tool.brightdata_unlocker.requests.post")
def test_run_success_json(mock_post):
mock_response = Mock()
mock_response.status_code = 200
mock_response.text = "mock response text"
mock_response.raise_for_status = Mock()
mock_post.return_value = mock_response
tool = BrightDataWebUnlockerTool()
result = tool._run(url="https://example.com", format="json")
assert isinstance(result, str)
@patch.dict(
"os.environ",
{"BRIGHT_DATA_API_KEY": "test_api_key", "BRIGHT_DATA_ZONE": "test_zone"},
)
@patch("crewai_tools.tools.brightdata_tool.brightdata_unlocker.requests.post")
def test_run_http_error(mock_post):
mock_response = Mock()
mock_response.status_code = 403
mock_response.text = "Forbidden"
mock_response.raise_for_status.side_effect = requests.HTTPError(
response=mock_response
)
mock_post.return_value = mock_response
tool = BrightDataWebUnlockerTool()
result = tool._run(url="https://example.com")
assert "HTTP Error" in result
assert "Forbidden" in result
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/brightdata_webunlocker_tool_test.py",
"license": "MIT License",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/couchbase_tool_test.py | from unittest.mock import MagicMock, patch
import pytest
# Mock the couchbase library before importing the tool
# This prevents ImportErrors if couchbase isn't installed in the test environment
mock_couchbase = MagicMock()
mock_couchbase.search = MagicMock()
mock_couchbase.cluster = MagicMock()
mock_couchbase.options = MagicMock()
mock_couchbase.vector_search = MagicMock()
# Simulate the structure needed for checks
mock_couchbase.cluster.Cluster = MagicMock()
mock_couchbase.options.SearchOptions = MagicMock()
mock_couchbase.vector_search.VectorQuery = MagicMock()
mock_couchbase.vector_search.VectorSearch = MagicMock()
mock_couchbase.search.SearchRequest = MagicMock() # Mock the class itself
mock_couchbase.search.SearchRequest.create = MagicMock() # Mock the class method
# Add necessary exception types if needed for testing error handling
class MockCouchbaseException(Exception):
pass
mock_couchbase.exceptions = MagicMock()
mock_couchbase.exceptions.BucketNotFoundException = MockCouchbaseException
mock_couchbase.exceptions.ScopeNotFoundException = MockCouchbaseException
mock_couchbase.exceptions.CollectionNotFoundException = MockCouchbaseException
mock_couchbase.exceptions.IndexNotFoundException = MockCouchbaseException
import sys
sys.modules["couchbase"] = mock_couchbase
sys.modules["couchbase.search"] = mock_couchbase.search
sys.modules["couchbase.cluster"] = mock_couchbase.cluster
sys.modules["couchbase.options"] = mock_couchbase.options
sys.modules["couchbase.vector_search"] = mock_couchbase.vector_search
sys.modules["couchbase.exceptions"] = mock_couchbase.exceptions
# Now import the tool
from crewai_tools.tools.couchbase_tool.couchbase_tool import (
CouchbaseFTSVectorSearchTool,
)
# --- Test Fixtures ---
@pytest.fixture(autouse=True)
def reset_global_mocks():
"""Reset call counts for globally defined mocks before each test."""
# Reset the specific mock causing the issue
mock_couchbase.vector_search.VectorQuery.reset_mock()
# It's good practice to also reset other related global mocks
# that might be called in your tests to prevent similar issues:
mock_couchbase.vector_search.VectorSearch.from_vector_query.reset_mock()
mock_couchbase.search.SearchRequest.create.reset_mock()
# Additional fixture to handle import pollution in full test suite
@pytest.fixture(autouse=True)
def ensure_couchbase_mocks():
"""Ensure that couchbase imports are properly mocked even when other tests have run first."""
# This fixture ensures our mocks are in place regardless of import order
original_modules = {}
# Store any existing modules
for module_name in [
"couchbase",
"couchbase.search",
"couchbase.cluster",
"couchbase.options",
"couchbase.vector_search",
"couchbase.exceptions",
]:
if module_name in sys.modules:
original_modules[module_name] = sys.modules[module_name]
# Ensure our mocks are active
sys.modules["couchbase"] = mock_couchbase
sys.modules["couchbase.search"] = mock_couchbase.search
sys.modules["couchbase.cluster"] = mock_couchbase.cluster
sys.modules["couchbase.options"] = mock_couchbase.options
sys.modules["couchbase.vector_search"] = mock_couchbase.vector_search
sys.modules["couchbase.exceptions"] = mock_couchbase.exceptions
yield
# Restore original modules if they existed
for module_name, original_module in original_modules.items():
if original_module is not None:
sys.modules[module_name] = original_module
@pytest.fixture
def mock_cluster():
cluster = MagicMock()
bucket_manager = MagicMock()
search_index_manager = MagicMock()
bucket = MagicMock()
scope = MagicMock()
collection = MagicMock()
scope_search_index_manager = MagicMock()
# Setup mock return values for checks
cluster.buckets.return_value = bucket_manager
cluster.search_indexes.return_value = search_index_manager
cluster.bucket.return_value = bucket
bucket.scope.return_value = scope
scope.collection.return_value = collection
scope.search_indexes.return_value = scope_search_index_manager
# Mock bucket existence check
bucket_manager.get_bucket.return_value = True
# Mock scope/collection existence check
mock_scope_spec = MagicMock()
mock_scope_spec.name = "test_scope"
mock_collection_spec = MagicMock()
mock_collection_spec.name = "test_collection"
mock_scope_spec.collections = [mock_collection_spec]
bucket.collections.return_value.get_all_scopes.return_value = [mock_scope_spec]
# Mock index existence check
mock_index_def = MagicMock()
mock_index_def.name = "test_index"
scope_search_index_manager.get_all_indexes.return_value = [mock_index_def]
search_index_manager.get_all_indexes.return_value = [mock_index_def]
return cluster
@pytest.fixture
def mock_embedding_function():
# Simple mock embedding function
# return lambda query: [0.1] * 10 # Example embedding vector
return MagicMock(return_value=[0.1] * 10)
@pytest.fixture
def tool_config(mock_cluster, mock_embedding_function):
return {
"cluster": mock_cluster,
"bucket_name": "test_bucket",
"scope_name": "test_scope",
"collection_name": "test_collection",
"index_name": "test_index",
"embedding_function": mock_embedding_function,
"limit": 5,
"embedding_key": "test_embedding",
"scoped_index": True,
}
@pytest.fixture
def couchbase_tool(tool_config):
# Patch COUCHBASE_AVAILABLE to True for these tests
with patch(
"crewai_tools.tools.couchbase_tool.couchbase_tool.COUCHBASE_AVAILABLE", True
):
tool = CouchbaseFTSVectorSearchTool(**tool_config)
return tool
@pytest.fixture
def mock_search_iter():
mock_iter = MagicMock()
# Simulate search results with a 'fields' attribute
mock_row1 = MagicMock()
mock_row1.fields = {"id": "doc1", "text": "content 1", "test_embedding": [0.1] * 10}
mock_row2 = MagicMock()
mock_row2.fields = {"id": "doc2", "text": "content 2", "test_embedding": [0.2] * 10}
mock_iter.rows.return_value = [mock_row1, mock_row2]
return mock_iter
# --- Test Cases ---
def test_initialization_success(couchbase_tool, tool_config):
"""Test successful initialization with valid config."""
assert couchbase_tool.cluster == tool_config["cluster"]
assert couchbase_tool.bucket_name == "test_bucket"
assert couchbase_tool.scope_name == "test_scope"
assert couchbase_tool.collection_name == "test_collection"
assert couchbase_tool.index_name == "test_index"
assert couchbase_tool.embedding_function is not None
assert couchbase_tool.limit == 5
assert couchbase_tool.embedding_key == "test_embedding"
assert couchbase_tool.scoped_index
# Check if helper methods were called during init (via mocks in fixture)
couchbase_tool.cluster.buckets().get_bucket.assert_called_once_with("test_bucket")
couchbase_tool.cluster.bucket().collections().get_all_scopes.assert_called_once()
couchbase_tool.cluster.bucket().scope().search_indexes().get_all_indexes.assert_called_once()
def test_initialization_missing_required_args(mock_cluster, mock_embedding_function):
"""Test initialization fails when required arguments are missing."""
base_config = {
"cluster": mock_cluster,
"bucket_name": "b",
"scope_name": "s",
"collection_name": "c",
"index_name": "i",
"embedding_function": mock_embedding_function,
}
required_keys = base_config.keys()
with patch(
"crewai_tools.tools.couchbase_tool.couchbase_tool.COUCHBASE_AVAILABLE", True
):
for key in required_keys:
incomplete_config = base_config.copy()
del incomplete_config[key]
with pytest.raises(ValueError):
CouchbaseFTSVectorSearchTool(**incomplete_config)
def test_initialization_couchbase_unavailable():
"""Test behavior when couchbase library is not available."""
with patch(
"crewai_tools.tools.couchbase_tool.couchbase_tool.COUCHBASE_AVAILABLE", False
):
with patch("click.confirm", return_value=False) as mock_confirm:
with pytest.raises(
ImportError, match="The 'couchbase' package is required"
):
CouchbaseFTSVectorSearchTool(
cluster=MagicMock(),
bucket_name="b",
scope_name="s",
collection_name="c",
index_name="i",
embedding_function=MagicMock(),
)
mock_confirm.assert_called_once() # Ensure user was prompted
def test_run_success_scoped_index(
couchbase_tool, mock_search_iter, tool_config, mock_embedding_function
):
"""Test successful _run execution with a scoped index."""
query = "find relevant documents"
# expected_embedding = mock_embedding_function(query)
# Mock the scope search method
couchbase_tool._scope.search = MagicMock(return_value=mock_search_iter)
# Mock the VectorQuery/VectorSearch/SearchRequest creation using runtime patching
with (
patch(
"crewai_tools.tools.couchbase_tool.couchbase_tool.VectorQuery"
) as mock_vq,
patch(
"crewai_tools.tools.couchbase_tool.couchbase_tool.VectorSearch"
) as mock_vs,
patch(
"crewai_tools.tools.couchbase_tool.couchbase_tool.search.SearchRequest"
) as mock_sr,
patch(
"crewai_tools.tools.couchbase_tool.couchbase_tool.SearchOptions"
) as mock_so,
):
# Set up the mock objects and their return values
mock_vector_query = MagicMock()
mock_vector_search = MagicMock()
mock_search_req = MagicMock()
mock_search_options = MagicMock()
mock_vq.return_value = mock_vector_query
mock_vs.from_vector_query.return_value = mock_vector_search
mock_sr.create.return_value = mock_search_req
mock_so.return_value = mock_search_options
result = couchbase_tool._run(query=query)
# Check embedding function call
tool_config["embedding_function"].assert_called_once_with(query)
# Check VectorQuery call
mock_vq.assert_called_once_with(
tool_config["embedding_key"],
mock_embedding_function.return_value,
tool_config["limit"],
)
# Check VectorSearch call
mock_vs.from_vector_query.assert_called_once_with(mock_vector_query)
# Check SearchRequest creation
mock_sr.create.assert_called_once_with(mock_vector_search)
# Check SearchOptions creation
mock_so.assert_called_once_with(limit=tool_config["limit"], fields=["*"])
# Check that scope search was called correctly
couchbase_tool._scope.search.assert_called_once_with(
tool_config["index_name"], mock_search_req, mock_search_options
)
# Check cluster search was NOT called
couchbase_tool.cluster.search.assert_not_called()
# Check result format (simple check for JSON structure)
assert '"id": "doc1"' in result
assert '"id": "doc2"' in result
assert result.startswith("[") # Should be valid JSON after concatenation
def test_run_success_global_index(
tool_config, mock_search_iter, mock_embedding_function
):
"""Test successful _run execution with a global (non-scoped) index."""
tool_config["scoped_index"] = False
with patch(
"crewai_tools.tools.couchbase_tool.couchbase_tool.COUCHBASE_AVAILABLE", True
):
couchbase_tool = CouchbaseFTSVectorSearchTool(**tool_config)
query = "find global documents"
# expected_embedding = mock_embedding_function(query)
# Mock the cluster search method
couchbase_tool.cluster.search = MagicMock(return_value=mock_search_iter)
# Mock the VectorQuery/VectorSearch/SearchRequest creation using runtime patching
with (
patch(
"crewai_tools.tools.couchbase_tool.couchbase_tool.VectorQuery"
) as mock_vq,
patch(
"crewai_tools.tools.couchbase_tool.couchbase_tool.VectorSearch"
) as mock_vs,
patch(
"crewai_tools.tools.couchbase_tool.couchbase_tool.search.SearchRequest"
) as mock_sr,
patch(
"crewai_tools.tools.couchbase_tool.couchbase_tool.SearchOptions"
) as mock_so,
):
# Set up the mock objects and their return values
mock_vector_query = MagicMock()
mock_vector_search = MagicMock()
mock_search_req = MagicMock()
mock_search_options = MagicMock()
mock_vq.return_value = mock_vector_query
mock_vs.from_vector_query.return_value = mock_vector_search
mock_sr.create.return_value = mock_search_req
mock_so.return_value = mock_search_options
result = couchbase_tool._run(query=query)
# Check embedding function call
tool_config["embedding_function"].assert_called_once_with(query)
# Check VectorQuery/Search call
mock_vq.assert_called_once_with(
tool_config["embedding_key"],
mock_embedding_function.return_value,
tool_config["limit"],
)
mock_sr.create.assert_called_once_with(mock_vector_search)
# Check SearchOptions creation
mock_so.assert_called_once_with(limit=tool_config["limit"], fields=["*"])
# Check that cluster search was called correctly
couchbase_tool.cluster.search.assert_called_once_with(
tool_config["index_name"], mock_search_req, mock_search_options
)
# Check scope search was NOT called
couchbase_tool._scope.search.assert_not_called()
# Check result format
assert '"id": "doc1"' in result
assert '"id": "doc2"' in result
def test_check_bucket_exists_fail(tool_config):
"""Test check for bucket non-existence."""
mock_cluster = tool_config["cluster"]
mock_cluster.buckets().get_bucket.side_effect = (
mock_couchbase.exceptions.BucketNotFoundException("Bucket not found")
)
with patch(
"crewai_tools.tools.couchbase_tool.couchbase_tool.COUCHBASE_AVAILABLE", True
):
with pytest.raises(ValueError, match="Bucket test_bucket does not exist."):
CouchbaseFTSVectorSearchTool(**tool_config)
def test_check_scope_exists_fail(tool_config):
"""Test check for scope non-existence."""
mock_cluster = tool_config["cluster"]
# Simulate scope not being in the list returned
mock_scope_spec = MagicMock()
mock_scope_spec.name = "wrong_scope"
mock_cluster.bucket().collections().get_all_scopes.return_value = [mock_scope_spec]
with patch(
"crewai_tools.tools.couchbase_tool.couchbase_tool.COUCHBASE_AVAILABLE", True
):
with pytest.raises(ValueError, match="Scope test_scope not found"):
CouchbaseFTSVectorSearchTool(**tool_config)
def test_check_collection_exists_fail(tool_config):
"""Test check for collection non-existence."""
mock_cluster = tool_config["cluster"]
# Simulate collection not being in the scope's list
mock_scope_spec = MagicMock()
mock_scope_spec.name = "test_scope"
mock_collection_spec = MagicMock()
mock_collection_spec.name = "wrong_collection"
mock_scope_spec.collections = [mock_collection_spec] # Only has wrong collection
mock_cluster.bucket().collections().get_all_scopes.return_value = [mock_scope_spec]
with patch(
"crewai_tools.tools.couchbase_tool.couchbase_tool.COUCHBASE_AVAILABLE", True
):
with pytest.raises(ValueError, match="Collection test_collection not found"):
CouchbaseFTSVectorSearchTool(**tool_config)
def test_check_index_exists_fail_scoped(tool_config):
"""Test check for scoped index non-existence."""
mock_cluster = tool_config["cluster"]
# Simulate index not being in the list returned by scope manager
mock_cluster.bucket().scope().search_indexes().get_all_indexes.return_value = []
with patch(
"crewai_tools.tools.couchbase_tool.couchbase_tool.COUCHBASE_AVAILABLE", True
):
with pytest.raises(ValueError, match="Index test_index does not exist"):
CouchbaseFTSVectorSearchTool(**tool_config)
def test_check_index_exists_fail_global(tool_config):
"""Test check for global index non-existence."""
tool_config["scoped_index"] = False
mock_cluster = tool_config["cluster"]
# Simulate index not being in the list returned by cluster manager
mock_cluster.search_indexes().get_all_indexes.return_value = []
with patch(
"crewai_tools.tools.couchbase_tool.couchbase_tool.COUCHBASE_AVAILABLE", True
):
with pytest.raises(ValueError, match="Index test_index does not exist"):
CouchbaseFTSVectorSearchTool(**tool_config)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/couchbase_tool_test.py",
"license": "MIT License",
"lines": 368,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/crewai_platform_tools/test_crewai_platform_action_tool.py | from unittest.mock import patch, Mock
import os
from crewai_tools.tools.crewai_platform_tools.crewai_platform_action_tool import (
CrewAIPlatformActionTool,
)
class TestCrewAIPlatformActionToolVerify:
"""Test suite for SSL verification behavior based on CREWAI_FACTORY environment variable"""
def setup_method(self):
self.action_schema = {
"function": {
"name": "test_action",
"parameters": {
"properties": {
"test_param": {
"type": "string",
"description": "Test parameter"
}
},
"required": []
}
}
}
def create_test_tool(self):
return CrewAIPlatformActionTool(
description="Test action tool",
action_name="test_action",
action_schema=self.action_schema
)
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token"}, clear=True)
@patch("crewai_tools.tools.crewai_platform_tools.crewai_platform_action_tool.requests.post")
def test_run_with_ssl_verification_default(self, mock_post):
"""Test that _run uses SSL verification by default when CREWAI_FACTORY is not set"""
mock_response = Mock()
mock_response.ok = True
mock_response.json.return_value = {"result": "success"}
mock_post.return_value = mock_response
tool = self.create_test_tool()
tool._run(test_param="test_value")
mock_post.assert_called_once()
call_args = mock_post.call_args
assert call_args.kwargs["verify"] is True
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token", "CREWAI_FACTORY": "false"}, clear=True)
@patch("crewai_tools.tools.crewai_platform_tools.crewai_platform_action_tool.requests.post")
def test_run_with_ssl_verification_factory_false(self, mock_post):
"""Test that _run uses SSL verification when CREWAI_FACTORY is 'false'"""
mock_response = Mock()
mock_response.ok = True
mock_response.json.return_value = {"result": "success"}
mock_post.return_value = mock_response
tool = self.create_test_tool()
tool._run(test_param="test_value")
mock_post.assert_called_once()
call_args = mock_post.call_args
assert call_args.kwargs["verify"] is True
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token", "CREWAI_FACTORY": "FALSE"}, clear=True)
@patch("crewai_tools.tools.crewai_platform_tools.crewai_platform_action_tool.requests.post")
def test_run_with_ssl_verification_factory_false_uppercase(self, mock_post):
"""Test that _run uses SSL verification when CREWAI_FACTORY is 'FALSE' (case-insensitive)"""
mock_response = Mock()
mock_response.ok = True
mock_response.json.return_value = {"result": "success"}
mock_post.return_value = mock_response
tool = self.create_test_tool()
tool._run(test_param="test_value")
mock_post.assert_called_once()
call_args = mock_post.call_args
assert call_args.kwargs["verify"] is True
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token", "CREWAI_FACTORY": "true"}, clear=True)
@patch("crewai_tools.tools.crewai_platform_tools.crewai_platform_action_tool.requests.post")
def test_run_without_ssl_verification_factory_true(self, mock_post):
"""Test that _run disables SSL verification when CREWAI_FACTORY is 'true'"""
mock_response = Mock()
mock_response.ok = True
mock_response.json.return_value = {"result": "success"}
mock_post.return_value = mock_response
tool = self.create_test_tool()
tool._run(test_param="test_value")
mock_post.assert_called_once()
call_args = mock_post.call_args
assert call_args.kwargs["verify"] is False
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token", "CREWAI_FACTORY": "TRUE"}, clear=True)
@patch("crewai_tools.tools.crewai_platform_tools.crewai_platform_action_tool.requests.post")
def test_run_without_ssl_verification_factory_true_uppercase(self, mock_post):
"""Test that _run disables SSL verification when CREWAI_FACTORY is 'TRUE' (case-insensitive)"""
mock_response = Mock()
mock_response.ok = True
mock_response.json.return_value = {"result": "success"}
mock_post.return_value = mock_response
tool = self.create_test_tool()
tool._run(test_param="test_value")
mock_post.assert_called_once()
call_args = mock_post.call_args
assert call_args.kwargs["verify"] is False
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/crewai_platform_tools/test_crewai_platform_action_tool.py",
"license": "MIT License",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/crewai_platform_tools/test_crewai_platform_tool_builder.py | import unittest
from unittest.mock import Mock, patch
from crewai_tools.tools.crewai_platform_tools import (
CrewAIPlatformActionTool,
CrewaiPlatformToolBuilder,
)
import pytest
class TestCrewaiPlatformToolBuilder(unittest.TestCase):
@pytest.fixture
def platform_tool_builder(self):
"""Create a CrewaiPlatformToolBuilder instance for testing"""
return CrewaiPlatformToolBuilder(apps=["github", "slack"])
@pytest.fixture
def mock_api_response(self):
return {
"actions": {
"github": [
{
"name": "create_issue",
"description": "Create a GitHub issue",
"parameters": {
"type": "object",
"properties": {
"title": {
"type": "string",
"description": "Issue title",
},
"body": {"type": "string", "description": "Issue body"},
},
"required": ["title"],
},
}
],
"slack": [
{
"name": "send_message",
"description": "Send a Slack message",
"parameters": {
"type": "object",
"properties": {
"channel": {
"type": "string",
"description": "Channel name",
},
"text": {
"type": "string",
"description": "Message text",
},
},
"required": ["channel", "text"],
},
}
],
}
}
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token"})
@patch(
"crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get"
)
def test_fetch_actions_success(self, mock_get):
mock_api_response = {
"actions": {
"github": [
{
"name": "create_issue",
"description": "Create a GitHub issue",
"parameters": {
"type": "object",
"properties": {
"title": {
"type": "string",
"description": "Issue title",
}
},
"required": ["title"],
},
}
]
}
}
builder = CrewaiPlatformToolBuilder(apps=["github", "slack/send_message"])
mock_response = Mock()
mock_response.raise_for_status.return_value = None
mock_response.json.return_value = mock_api_response
mock_get.return_value = mock_response
builder._fetch_actions()
mock_get.assert_called_once()
args, kwargs = mock_get.call_args
assert "/actions" in args[0]
assert kwargs["headers"]["Authorization"] == "Bearer test_token"
assert kwargs["params"]["apps"] == "github,slack/send_message"
assert "create_issue" in builder._actions_schema
assert (
builder._actions_schema["create_issue"]["function"]["name"]
== "create_issue"
)
def test_fetch_actions_no_token(self):
builder = CrewaiPlatformToolBuilder(apps=["github"])
with patch.dict("os.environ", {}, clear=True):
with self.assertRaises(ValueError) as context:
builder._fetch_actions()
assert "No platform integration token found" in str(context.exception)
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token"})
@patch(
"crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get"
)
def test_create_tools(self, mock_get):
mock_api_response = {
"actions": {
"github": [
{
"name": "create_issue",
"description": "Create a GitHub issue",
"parameters": {
"type": "object",
"properties": {
"title": {
"type": "string",
"description": "Issue title",
}
},
"required": ["title"],
},
}
],
"slack": [
{
"name": "send_message",
"description": "Send a Slack message",
"parameters": {
"type": "object",
"properties": {
"channel": {
"type": "string",
"description": "Channel name",
}
},
"required": ["channel"],
},
}
],
}
}
builder = CrewaiPlatformToolBuilder(apps=["github", "slack"])
mock_response = Mock()
mock_response.raise_for_status.return_value = None
mock_response.json.return_value = mock_api_response
mock_get.return_value = mock_response
tools = builder.tools()
assert len(tools) == 2
assert all(isinstance(tool, CrewAIPlatformActionTool) for tool in tools)
tool_names = [tool.action_name for tool in tools]
assert "create_issue" in tool_names
assert "send_message" in tool_names
github_tool = next((t for t in tools if t.action_name == "create_issue"), None)
slack_tool = next((t for t in tools if t.action_name == "send_message"), None)
assert github_tool is not None
assert slack_tool is not None
assert "Create a GitHub issue" in github_tool.description
assert "Send a Slack message" in slack_tool.description
def test_tools_caching(self):
builder = CrewaiPlatformToolBuilder(apps=["github"])
cached_tools = []
def mock_create_tools():
builder._tools = cached_tools
with (
patch.object(builder, "_fetch_actions") as mock_fetch,
patch.object(
builder, "_create_tools", side_effect=mock_create_tools
) as mock_create,
):
tools1 = builder.tools()
assert mock_fetch.call_count == 1
assert mock_create.call_count == 1
tools2 = builder.tools()
assert mock_fetch.call_count == 1
assert mock_create.call_count == 1
assert tools1 is tools2
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token"})
def test_empty_apps_list(self):
builder = CrewaiPlatformToolBuilder(apps=[])
with patch(
"crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get"
) as mock_get:
mock_response = Mock()
mock_response.raise_for_status.return_value = None
mock_response.json.return_value = {"actions": {}}
mock_get.return_value = mock_response
tools = builder.tools()
assert isinstance(tools, list)
assert len(tools) == 0
_, kwargs = mock_get.call_args
assert kwargs["params"]["apps"] == ""
class TestCrewaiPlatformToolBuilderVerify(unittest.TestCase):
"""Test suite for SSL verification behavior in CrewaiPlatformToolBuilder"""
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token"}, clear=True)
@patch(
"crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get"
)
def test_fetch_actions_with_ssl_verification_default(self, mock_get):
"""Test that _fetch_actions uses SSL verification by default when CREWAI_FACTORY is not set"""
mock_response = Mock()
mock_response.raise_for_status.return_value = None
mock_response.json.return_value = {"actions": {}}
mock_get.return_value = mock_response
builder = CrewaiPlatformToolBuilder(apps=["github"])
builder._fetch_actions()
mock_get.assert_called_once()
call_args = mock_get.call_args
assert call_args.kwargs["verify"] is True
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token", "CREWAI_FACTORY": "false"}, clear=True)
@patch(
"crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get"
)
def test_fetch_actions_with_ssl_verification_factory_false(self, mock_get):
"""Test that _fetch_actions uses SSL verification when CREWAI_FACTORY is 'false'"""
mock_response = Mock()
mock_response.raise_for_status.return_value = None
mock_response.json.return_value = {"actions": {}}
mock_get.return_value = mock_response
builder = CrewaiPlatformToolBuilder(apps=["github"])
builder._fetch_actions()
mock_get.assert_called_once()
call_args = mock_get.call_args
assert call_args.kwargs["verify"] is True
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token", "CREWAI_FACTORY": "FALSE"}, clear=True)
@patch(
"crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get"
)
def test_fetch_actions_with_ssl_verification_factory_false_uppercase(self, mock_get):
"""Test that _fetch_actions uses SSL verification when CREWAI_FACTORY is 'FALSE' (case-insensitive)"""
mock_response = Mock()
mock_response.raise_for_status.return_value = None
mock_response.json.return_value = {"actions": {}}
mock_get.return_value = mock_response
builder = CrewaiPlatformToolBuilder(apps=["github"])
builder._fetch_actions()
mock_get.assert_called_once()
call_args = mock_get.call_args
assert call_args.kwargs["verify"] is True
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token", "CREWAI_FACTORY": "true"}, clear=True)
@patch(
"crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get"
)
def test_fetch_actions_without_ssl_verification_factory_true(self, mock_get):
"""Test that _fetch_actions disables SSL verification when CREWAI_FACTORY is 'true'"""
mock_response = Mock()
mock_response.raise_for_status.return_value = None
mock_response.json.return_value = {"actions": {}}
mock_get.return_value = mock_response
builder = CrewaiPlatformToolBuilder(apps=["github"])
builder._fetch_actions()
mock_get.assert_called_once()
call_args = mock_get.call_args
assert call_args.kwargs["verify"] is False
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token", "CREWAI_FACTORY": "TRUE"}, clear=True)
@patch(
"crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get"
)
def test_fetch_actions_without_ssl_verification_factory_true_uppercase(self, mock_get):
"""Test that _fetch_actions disables SSL verification when CREWAI_FACTORY is 'TRUE' (case-insensitive)"""
mock_response = Mock()
mock_response.raise_for_status.return_value = None
mock_response.json.return_value = {"actions": {}}
mock_get.return_value = mock_response
builder = CrewaiPlatformToolBuilder(apps=["github"])
builder._fetch_actions()
mock_get.assert_called_once()
call_args = mock_get.call_args
assert call_args.kwargs["verify"] is False
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/crewai_platform_tools/test_crewai_platform_tool_builder.py",
"license": "MIT License",
"lines": 270,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/crewai_platform_tools/test_crewai_platform_tools.py | import unittest
from unittest.mock import Mock, patch
from crewai_tools.tools.crewai_platform_tools import CrewaiPlatformTools
class TestCrewaiPlatformTools(unittest.TestCase):
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token"})
@patch(
"crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get"
)
def test_crewai_platform_tools_basic(self, mock_get):
mock_response = Mock()
mock_response.raise_for_status.return_value = None
mock_response.json.return_value = {"actions": {"github": []}}
mock_get.return_value = mock_response
tools = CrewaiPlatformTools(apps=["github"])
assert tools is not None
assert isinstance(tools, list)
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token"})
@patch(
"crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get"
)
def test_crewai_platform_tools_multiple_apps(self, mock_get):
mock_response = Mock()
mock_response.raise_for_status.return_value = None
mock_response.json.return_value = {
"actions": {
"github": [
{
"name": "create_issue",
"description": "Create a GitHub issue",
"parameters": {
"type": "object",
"properties": {
"title": {
"type": "string",
"description": "Issue title",
},
"body": {"type": "string", "description": "Issue body"},
},
"required": ["title"],
},
}
],
"slack": [
{
"name": "send_message",
"description": "Send a Slack message",
"parameters": {
"type": "object",
"properties": {
"channel": {
"type": "string",
"description": "Channel to send to",
},
"text": {
"type": "string",
"description": "Message text",
},
},
"required": ["channel", "text"],
},
}
],
}
}
mock_get.return_value = mock_response
tools = CrewaiPlatformTools(apps=["github", "slack"])
assert tools is not None
assert isinstance(tools, list)
assert len(tools) == 2
mock_get.assert_called_once()
args, kwargs = mock_get.call_args
assert (
"apps=github,slack" in args[0]
or kwargs.get("params", {}).get("apps") == "github,slack"
)
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token"})
def test_crewai_platform_tools_empty_apps(self):
with patch(
"crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get"
) as mock_get:
mock_response = Mock()
mock_response.raise_for_status.return_value = None
mock_response.json.return_value = {"actions": {}}
mock_get.return_value = mock_response
tools = CrewaiPlatformTools(apps=[])
assert tools is not None
assert isinstance(tools, list)
assert len(tools) == 0
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token"})
@patch(
"crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get"
)
def test_crewai_platform_tools_api_error_handling(self, mock_get):
mock_get.side_effect = Exception("API Error")
tools = CrewaiPlatformTools(apps=["github"])
assert tools is not None
assert isinstance(tools, list)
assert len(tools) == 0
def test_crewai_platform_tools_no_token(self):
with patch.dict("os.environ", {}, clear=True):
with self.assertRaises(ValueError) as context:
CrewaiPlatformTools(apps=["github"])
assert "No platform integration token found" in str(context.exception)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/crewai_platform_tools/test_crewai_platform_tools.py",
"license": "MIT License",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/exa_search_tool_test.py | import os
from unittest.mock import patch
from crewai_tools import EXASearchTool
import pytest
@pytest.fixture
def exa_search_tool():
return EXASearchTool(api_key="test_api_key")
@pytest.fixture(autouse=True)
def mock_exa_api_key():
with patch.dict(os.environ, {"EXA_API_KEY": "test_key_from_env"}):
yield
def test_exa_search_tool_initialization():
with patch.dict(os.environ, {}, clear=True):
with patch(
"crewai_tools.tools.exa_tools.exa_search_tool.Exa"
) as mock_exa_class:
api_key = "test_api_key"
tool = EXASearchTool(api_key=api_key)
assert tool.api_key == api_key
assert tool.content is False
assert tool.summary is False
assert tool.type == "auto"
mock_exa_class.assert_called_once_with(api_key=api_key)
def test_exa_search_tool_initialization_with_env(mock_exa_api_key):
with patch.dict(os.environ, {"EXA_API_KEY": "test_key_from_env"}, clear=True):
with patch(
"crewai_tools.tools.exa_tools.exa_search_tool.Exa"
) as mock_exa_class:
EXASearchTool()
mock_exa_class.assert_called_once_with(api_key="test_key_from_env")
def test_exa_search_tool_initialization_with_base_url():
with patch.dict(os.environ, {}, clear=True):
with patch(
"crewai_tools.tools.exa_tools.exa_search_tool.Exa"
) as mock_exa_class:
api_key = "test_api_key"
base_url = "https://custom.exa.api.com"
tool = EXASearchTool(api_key=api_key, base_url=base_url)
assert tool.api_key == api_key
assert tool.base_url == base_url
assert tool.content is False
assert tool.summary is False
assert tool.type == "auto"
mock_exa_class.assert_called_once_with(api_key=api_key, base_url=base_url)
@pytest.fixture
def mock_exa_base_url():
with patch.dict(os.environ, {"EXA_BASE_URL": "https://env.exa.api.com"}):
yield
def test_exa_search_tool_initialization_with_env_base_url(
mock_exa_api_key, mock_exa_base_url
):
with patch("crewai_tools.tools.exa_tools.exa_search_tool.Exa") as mock_exa_class:
EXASearchTool()
mock_exa_class.assert_called_once_with(
api_key="test_key_from_env", base_url="https://env.exa.api.com"
)
def test_exa_search_tool_initialization_without_base_url():
with patch.dict(os.environ, {}, clear=True):
with patch(
"crewai_tools.tools.exa_tools.exa_search_tool.Exa"
) as mock_exa_class:
api_key = "test_api_key"
tool = EXASearchTool(api_key=api_key)
assert tool.api_key == api_key
assert tool.base_url is None
mock_exa_class.assert_called_once_with(api_key=api_key)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/exa_search_tool_test.py",
"license": "MIT License",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/files_compressor_tool_test.py | from unittest.mock import patch
from crewai_tools.tools.files_compressor_tool import FileCompressorTool
import pytest
@pytest.fixture
def tool():
return FileCompressorTool()
@patch("os.path.exists", return_value=False)
def test_input_path_does_not_exist(mock_exists, tool):
result = tool._run("nonexistent_path")
assert "does not exist" in result
@patch("os.path.exists", return_value=True)
@patch("os.getcwd", return_value="/mocked/cwd")
@patch.object(FileCompressorTool, "_compress_zip") # Mock actual compression
@patch.object(FileCompressorTool, "_prepare_output", return_value=True)
def test_generate_output_path_default(
mock_prepare, mock_compress, mock_cwd, mock_exists, tool
):
result = tool._run(input_path="mydir", format="zip")
assert "Successfully compressed" in result
mock_compress.assert_called_once()
@patch("os.path.exists", return_value=True)
@patch.object(FileCompressorTool, "_compress_zip")
@patch.object(FileCompressorTool, "_prepare_output", return_value=True)
def test_zip_compression(mock_prepare, mock_compress, mock_exists, tool):
result = tool._run(
input_path="some/path", output_path="archive.zip", format="zip", overwrite=True
)
assert "Successfully compressed" in result
mock_compress.assert_called_once()
@patch("os.path.exists", return_value=True)
@patch.object(FileCompressorTool, "_compress_tar")
@patch.object(FileCompressorTool, "_prepare_output", return_value=True)
def test_tar_gz_compression(mock_prepare, mock_compress, mock_exists, tool):
result = tool._run(
input_path="some/path",
output_path="archive.tar.gz",
format="tar.gz",
overwrite=True,
)
assert "Successfully compressed" in result
mock_compress.assert_called_once()
@pytest.mark.parametrize("format", ["tar", "tar.bz2", "tar.xz"])
@patch("os.path.exists", return_value=True)
@patch.object(FileCompressorTool, "_compress_tar")
@patch.object(FileCompressorTool, "_prepare_output", return_value=True)
def test_other_tar_formats(mock_prepare, mock_compress, mock_exists, format, tool):
result = tool._run(
input_path="path/to/input",
output_path=f"archive.{format}",
format=format,
overwrite=True,
)
assert "Successfully compressed" in result
mock_compress.assert_called_once()
@pytest.mark.parametrize("format", ["rar", "7z"])
@patch("os.path.exists", return_value=True) # Ensure input_path exists
def test_unsupported_format(_, tool, format):
result = tool._run(
input_path="some/path", output_path=f"archive.{format}", format=format
)
assert "not supported" in result
@patch("os.path.exists", return_value=True)
def test_extension_mismatch(_, tool):
result = tool._run(
input_path="some/path", output_path="archive.zip", format="tar.gz"
)
assert "must have a '.tar.gz' extension" in result
@patch("os.path.exists", return_value=True)
@patch("os.path.isfile", return_value=True)
@patch("os.path.exists", return_value=True)
def test_existing_output_no_overwrite(_, __, ___, tool):
result = tool._run(
input_path="some/path", output_path="archive.zip", format="zip", overwrite=False
)
assert "overwrite is set to False" in result
@patch("os.path.exists", return_value=True)
@patch("zipfile.ZipFile", side_effect=PermissionError)
def test_permission_error(mock_zip, _, tool):
result = tool._run(
input_path="file.txt", output_path="file.zip", format="zip", overwrite=True
)
assert "Permission denied" in result
@patch("os.path.exists", return_value=True)
@patch("zipfile.ZipFile", side_effect=FileNotFoundError)
def test_file_not_found_during_zip(mock_zip, _, tool):
result = tool._run(
input_path="file.txt", output_path="file.zip", format="zip", overwrite=True
)
assert "File not found" in result
@patch("os.path.exists", return_value=True)
@patch("zipfile.ZipFile", side_effect=Exception("Unexpected"))
def test_general_exception_during_zip(mock_zip, _, tool):
result = tool._run(
input_path="file.txt", output_path="file.zip", format="zip", overwrite=True
)
assert "unexpected error" in result
# Test: Output directory is created when missing
@patch("os.makedirs")
@patch("os.path.exists", return_value=False)
def test_prepare_output_makes_dir(mock_exists, mock_makedirs):
tool = FileCompressorTool()
result = tool._prepare_output("some/missing/path/file.zip", overwrite=True)
assert result is True
mock_makedirs.assert_called_once()
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/files_compressor_tool_test.py",
"license": "MIT License",
"lines": 104,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/generate_crewai_automation_tool_test.py | import os
from unittest.mock import MagicMock, patch
from crewai_tools.tools.generate_crewai_automation_tool.generate_crewai_automation_tool import (
GenerateCrewaiAutomationTool,
GenerateCrewaiAutomationToolSchema,
)
import pytest
import requests
@pytest.fixture(autouse=True)
def mock_env():
with patch.dict(os.environ, {"CREWAI_PERSONAL_ACCESS_TOKEN": "test_token"}):
os.environ.pop("CREWAI_PLUS_URL", None)
yield
@pytest.fixture
def tool():
return GenerateCrewaiAutomationTool()
@pytest.fixture
def custom_url_tool():
with patch.dict(os.environ, {"CREWAI_PLUS_URL": "https://custom.crewai.com"}):
return GenerateCrewaiAutomationTool()
def test_default_initialization(tool):
assert tool.crewai_enterprise_url == "https://app.crewai.com"
assert tool.personal_access_token == "test_token"
assert tool.name == "Generate CrewAI Automation"
def test_custom_base_url_from_environment(custom_url_tool):
assert custom_url_tool.crewai_enterprise_url == "https://custom.crewai.com"
def test_personal_access_token_from_environment(tool):
assert tool.personal_access_token == "test_token"
def test_valid_prompt_only():
schema = GenerateCrewaiAutomationToolSchema(
prompt="Create a web scraping automation"
)
assert schema.prompt == "Create a web scraping automation"
assert schema.organization_id is None
def test_valid_prompt_with_organization_id():
schema = GenerateCrewaiAutomationToolSchema(
prompt="Create automation", organization_id="org-123"
)
assert schema.prompt == "Create automation"
assert schema.organization_id == "org-123"
def test_empty_prompt_validation():
schema = GenerateCrewaiAutomationToolSchema(prompt="")
assert schema.prompt == ""
assert schema.organization_id is None
@patch("requests.post")
def test_successful_generation_without_org_id(mock_post, tool):
mock_response = MagicMock()
mock_response.json.return_value = {
"url": "https://app.crewai.com/studio/project-123"
}
mock_post.return_value = mock_response
result = tool.run(prompt="Create automation")
assert (
result
== "Generated CrewAI Studio project URL: https://app.crewai.com/studio/project-123"
)
mock_post.assert_called_once_with(
"https://app.crewai.com/crewai_plus/api/v1/studio",
headers={
"Authorization": "Bearer test_token",
"Content-Type": "application/json",
"Accept": "application/json",
},
json={"prompt": "Create automation"},
)
@patch("requests.post")
def test_successful_generation_with_org_id(mock_post, tool):
mock_response = MagicMock()
mock_response.json.return_value = {
"url": "https://app.crewai.com/studio/project-456"
}
mock_post.return_value = mock_response
result = tool.run(prompt="Create automation", organization_id="org-456")
assert (
result
== "Generated CrewAI Studio project URL: https://app.crewai.com/studio/project-456"
)
mock_post.assert_called_once_with(
"https://app.crewai.com/crewai_plus/api/v1/studio",
headers={
"Authorization": "Bearer test_token",
"Content-Type": "application/json",
"Accept": "application/json",
"X-Crewai-Organization-Id": "org-456",
},
json={"prompt": "Create automation"},
)
@patch("requests.post")
def test_custom_base_url_usage(mock_post, custom_url_tool):
mock_response = MagicMock()
mock_response.json.return_value = {
"url": "https://custom.crewai.com/studio/project-789"
}
mock_post.return_value = mock_response
custom_url_tool.run(prompt="Create automation")
mock_post.assert_called_once_with(
"https://custom.crewai.com/crewai_plus/api/v1/studio",
headers={
"Authorization": "Bearer test_token",
"Content-Type": "application/json",
"Accept": "application/json",
},
json={"prompt": "Create automation"},
)
@patch("requests.post")
def test_api_error_response_handling(mock_post, tool):
mock_post.return_value.raise_for_status.side_effect = requests.HTTPError(
"400 Bad Request"
)
with pytest.raises(requests.HTTPError):
tool.run(prompt="Create automation")
@patch("requests.post")
def test_network_error_handling(mock_post, tool):
mock_post.side_effect = requests.ConnectionError("Network unreachable")
with pytest.raises(requests.ConnectionError):
tool.run(prompt="Create automation")
@patch("requests.post")
def test_api_response_missing_url(mock_post, tool):
mock_response = MagicMock()
mock_response.json.return_value = {"status": "success"}
mock_post.return_value = mock_response
result = tool.run(prompt="Create automation")
assert result == "Generated CrewAI Studio project URL: None"
def test_authorization_header_construction(tool):
headers = tool._get_headers()
assert headers["Authorization"] == "Bearer test_token"
assert headers["Content-Type"] == "application/json"
assert headers["Accept"] == "application/json"
assert "X-Crewai-Organization-Id" not in headers
def test_authorization_header_with_org_id(tool):
headers = tool._get_headers(organization_id="org-123")
assert headers["Authorization"] == "Bearer test_token"
assert headers["X-Crewai-Organization-Id"] == "org-123"
def test_missing_personal_access_token():
with patch.dict(os.environ, {}, clear=True):
tool = GenerateCrewaiAutomationTool()
assert tool.personal_access_token is None
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/generate_crewai_automation_tool_test.py",
"license": "MIT License",
"lines": 137,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/parallel_search_tool_test.py | import json
from unittest.mock import patch
from urllib.parse import urlparse
from crewai_tools.tools.parallel_tools.parallel_search_tool import (
ParallelSearchTool,
)
def test_requires_env_var(monkeypatch):
monkeypatch.delenv("PARALLEL_API_KEY", raising=False)
tool = ParallelSearchTool()
result = tool.run(objective="test")
assert "PARALLEL_API_KEY" in result
@patch("crewai_tools.tools.parallel_tools.parallel_search_tool.requests.post")
def test_happy_path(mock_post, monkeypatch):
monkeypatch.setenv("PARALLEL_API_KEY", "test")
mock_post.return_value.status_code = 200
mock_post.return_value.json.return_value = {
"search_id": "search_123",
"results": [
{
"url": "https://www.un.org/en/about-us/history-of-the-un",
"title": "History of the United Nations",
"excerpts": [
"Four months after the San Francisco Conference ended, the United Nations officially began, on 24 October 1945..."
],
}
],
}
tool = ParallelSearchTool()
result = tool.run(
objective="When was the UN established?", search_queries=["Founding year UN"]
)
data = json.loads(result)
assert "search_id" in data
urls = [r.get("url", "") for r in data.get("results", [])]
# Validate host against allowed set instead of substring matching
allowed_hosts = {"www.un.org", "un.org"}
assert any(urlparse(u).netloc in allowed_hosts for u in urls)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/parallel_search_tool_test.py",
"license": "MIT License",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/rag/rag_tool_test.py | from pathlib import Path
from tempfile import TemporaryDirectory
from typing import cast
from unittest.mock import MagicMock, Mock, patch
from crewai_tools.adapters.crewai_rag_adapter import CrewAIRagAdapter
from crewai_tools.tools.rag.rag_tool import RagTool
@patch("crewai_tools.adapters.crewai_rag_adapter.get_rag_client")
@patch("crewai_tools.adapters.crewai_rag_adapter.create_client")
def test_rag_tool_initialization(
mock_create_client: Mock, mock_get_rag_client: Mock
) -> None:
"""Test that RagTool initializes with CrewAI adapter by default."""
mock_client = MagicMock()
mock_client.get_or_create_collection = MagicMock(return_value=None)
mock_get_rag_client.return_value = mock_client
mock_create_client.return_value = mock_client
class MyTool(RagTool):
pass
tool = MyTool()
assert tool.adapter is not None
assert isinstance(tool.adapter, CrewAIRagAdapter)
adapter = cast(CrewAIRagAdapter, tool.adapter)
assert adapter.collection_name == "rag_tool_collection"
assert adapter._client is not None
@patch("crewai_tools.adapters.crewai_rag_adapter.get_rag_client")
@patch("crewai_tools.adapters.crewai_rag_adapter.create_client")
def test_rag_tool_add_and_query(
mock_create_client: Mock, mock_get_rag_client: Mock
) -> None:
"""Test adding content and querying with RagTool."""
mock_client = MagicMock()
mock_client.get_or_create_collection = MagicMock(return_value=None)
mock_client.add_documents = MagicMock(return_value=None)
mock_client.search = MagicMock(
return_value=[
{"content": "The sky is blue on a clear day.", "metadata": {}, "score": 0.9}
]
)
mock_get_rag_client.return_value = mock_client
mock_create_client.return_value = mock_client
class MyTool(RagTool):
pass
tool = MyTool()
tool.add("The sky is blue on a clear day.")
tool.add("Machine learning is a subset of artificial intelligence.")
# Verify documents were added
assert mock_client.add_documents.call_count == 2
result = tool._run(query="What color is the sky?")
assert "Relevant Content:" in result
assert "The sky is blue" in result
mock_client.search.return_value = [
{
"content": "Machine learning is a subset of artificial intelligence.",
"metadata": {},
"score": 0.85,
}
]
result = tool._run(query="Tell me about machine learning")
assert "Relevant Content:" in result
assert "Machine learning" in result
@patch("crewai_tools.adapters.crewai_rag_adapter.get_rag_client")
@patch("crewai_tools.adapters.crewai_rag_adapter.create_client")
def test_rag_tool_with_file(
mock_create_client: Mock, mock_get_rag_client: Mock
) -> None:
"""Test RagTool with file content."""
mock_client = MagicMock()
mock_client.get_or_create_collection = MagicMock(return_value=None)
mock_client.add_documents = MagicMock(return_value=None)
mock_client.search = MagicMock(
return_value=[
{
"content": "Python is a programming language known for its simplicity.",
"metadata": {"file_path": "test.txt"},
"score": 0.95,
}
]
)
mock_get_rag_client.return_value = mock_client
mock_create_client.return_value = mock_client
with TemporaryDirectory() as tmpdir:
test_file = Path(tmpdir) / "test.txt"
test_file.write_text(
"Python is a programming language known for its simplicity."
)
class MyTool(RagTool):
pass
tool = MyTool()
tool.add(str(test_file))
assert mock_client.add_documents.called
result = tool._run(query="What is Python?")
assert "Relevant Content:" in result
assert "Python is a programming language" in result
@patch("crewai_tools.tools.rag.rag_tool.build_embedder")
@patch("crewai_tools.adapters.crewai_rag_adapter.create_client")
def test_rag_tool_with_custom_embeddings(
mock_create_client: Mock, mock_build_embedder: Mock
) -> None:
"""Test RagTool with custom embeddings configuration to ensure no API calls."""
mock_embedding_func = MagicMock()
mock_embedding_func.return_value = [[0.2] * 1536]
mock_build_embedder.return_value = mock_embedding_func
mock_client = MagicMock()
mock_client.get_or_create_collection = MagicMock(return_value=None)
mock_client.add_documents = MagicMock(return_value=None)
mock_client.search = MagicMock(
return_value=[{"content": "Test content", "metadata": {}, "score": 0.8}]
)
mock_create_client.return_value = mock_client
class MyTool(RagTool):
pass
config = {
"vectordb": {"provider": "chromadb", "config": {}},
"embedding_model": {
"provider": "openai",
"config": {"model": "text-embedding-3-small"},
},
}
tool = MyTool(config=config)
tool.add("Test content")
result = tool._run(query="Test query")
assert "Relevant Content:" in result
assert "Test content" in result
mock_build_embedder.assert_called()
@patch("crewai_tools.adapters.crewai_rag_adapter.get_rag_client")
@patch("crewai_tools.adapters.crewai_rag_adapter.create_client")
def test_rag_tool_no_results(
mock_create_client: Mock, mock_get_rag_client: Mock
) -> None:
"""Test RagTool when no relevant content is found."""
mock_client = MagicMock()
mock_client.get_or_create_collection = MagicMock(return_value=None)
mock_client.search = MagicMock(return_value=[])
mock_get_rag_client.return_value = mock_client
mock_create_client.return_value = mock_client
class MyTool(RagTool):
pass
tool = MyTool()
result = tool._run(query="Non-existent content")
assert "Relevant Content:" in result
assert "No relevant content found" in result
@patch("crewai_tools.adapters.crewai_rag_adapter.create_client")
def test_rag_tool_with_azure_config_without_env_vars(
mock_create_client: Mock,
) -> None:
"""Test that RagTool accepts Azure config without requiring env vars.
This test verifies the fix for the issue where RAG tools were ignoring
the embedding configuration passed via the config parameter and instead
requiring environment variables like EMBEDDINGS_OPENAI_API_KEY.
"""
mock_embedding_func = MagicMock()
mock_embedding_func.return_value = [[0.1] * 1536]
mock_client = MagicMock()
mock_client.get_or_create_collection = MagicMock(return_value=None)
mock_client.add_documents = MagicMock(return_value=None)
mock_create_client.return_value = mock_client
# Patch the embedding function builder to avoid actual API calls
with patch(
"crewai_tools.tools.rag.rag_tool.build_embedder",
return_value=mock_embedding_func,
):
class MyTool(RagTool):
pass
# Configuration with explicit Azure credentials - should work without env vars
config = {
"embedding_model": {
"provider": "azure",
"config": {
"model": "text-embedding-3-small",
"api_key": "test-api-key",
"api_base": "https://test.openai.azure.com/",
"api_version": "2024-02-01",
"api_type": "azure",
"deployment_id": "test-deployment",
},
}
}
# This should not raise a validation error about missing env vars
tool = MyTool(config=config)
assert tool.adapter is not None
assert isinstance(tool.adapter, CrewAIRagAdapter)
@patch("crewai_tools.adapters.crewai_rag_adapter.create_client")
def test_rag_tool_with_openai_config_without_env_vars(
mock_create_client: Mock,
) -> None:
"""Test that RagTool accepts OpenAI config without requiring env vars."""
mock_embedding_func = MagicMock()
mock_embedding_func.return_value = [[0.1] * 1536]
mock_client = MagicMock()
mock_client.get_or_create_collection = MagicMock(return_value=None)
mock_create_client.return_value = mock_client
with patch(
"crewai_tools.tools.rag.rag_tool.build_embedder",
return_value=mock_embedding_func,
):
class MyTool(RagTool):
pass
config = {
"embedding_model": {
"provider": "openai",
"config": {
"model": "text-embedding-3-small",
"api_key": "sk-test123",
},
}
}
tool = MyTool(config=config)
assert tool.adapter is not None
assert isinstance(tool.adapter, CrewAIRagAdapter)
@patch("crewai_tools.adapters.crewai_rag_adapter.create_client")
def test_rag_tool_config_with_qdrant_and_azure_embeddings(
mock_create_client: Mock,
) -> None:
"""Test RagTool with Qdrant vector DB and Azure embeddings config."""
mock_embedding_func = MagicMock()
mock_embedding_func.return_value = [[0.1] * 1536]
mock_client = MagicMock()
mock_client.get_or_create_collection = MagicMock(return_value=None)
mock_create_client.return_value = mock_client
with patch(
"crewai_tools.tools.rag.rag_tool.build_embedder",
return_value=mock_embedding_func,
):
class MyTool(RagTool):
pass
config = {
"vectordb": {"provider": "qdrant", "config": {}},
"embedding_model": {
"provider": "azure",
"config": {
"model": "text-embedding-3-large",
"api_key": "test-key",
"api_base": "https://test.openai.azure.com/",
"api_version": "2024-02-01",
"deployment_id": "test-deployment",
},
},
}
tool = MyTool(config=config)
assert tool.adapter is not None
assert isinstance(tool.adapter, CrewAIRagAdapter)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/rag/rag_tool_test.py",
"license": "MIT License",
"lines": 241,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/selenium_scraping_tool_test.py | import os
import tempfile
from unittest.mock import MagicMock, patch
from bs4 import BeautifulSoup
from crewai_tools.tools.selenium_scraping_tool.selenium_scraping_tool import (
SeleniumScrapingTool,
)
from selenium.webdriver.chrome.options import Options
def mock_driver_with_html(html_content):
driver = MagicMock()
mock_element = MagicMock()
mock_element.get_attribute.return_value = html_content
bs = BeautifulSoup(html_content, "html.parser")
mock_element.text = bs.get_text()
driver.find_elements.return_value = [mock_element]
driver.find_element.return_value = mock_element
return driver
def initialize_tool_with(mock_driver):
tool = SeleniumScrapingTool(driver=mock_driver)
return tool
@patch("selenium.webdriver.Chrome")
def test_tool_initialization(mocked_chrome):
temp_dir = tempfile.mkdtemp()
mocked_chrome.return_value = MagicMock()
tool = SeleniumScrapingTool()
assert tool.website_url is None
assert tool.css_element is None
assert tool.cookie is None
assert tool.wait_time == 3
assert tool.return_html is False
try:
os.rmdir(temp_dir)
except:
pass
@patch("selenium.webdriver.Chrome")
def test_tool_initialization_with_options(mocked_chrome):
mocked_chrome.return_value = MagicMock()
options = Options()
options.add_argument("--disable-gpu")
SeleniumScrapingTool(options=options)
mocked_chrome.assert_called_once_with(options=options)
@patch("selenium.webdriver.Chrome")
def test_scrape_without_css_selector(_mocked_chrome_driver):
html_content = "<html><body><div>test content</div></body></html>"
mock_driver = mock_driver_with_html(html_content)
tool = initialize_tool_with(mock_driver)
result = tool._run(website_url="https://example.com")
assert "test content" in result
mock_driver.get.assert_called_once_with("https://example.com")
mock_driver.find_element.assert_called_with("tag name", "body")
mock_driver.close.assert_called_once()
@patch("selenium.webdriver.Chrome")
def test_scrape_with_css_selector(_mocked_chrome_driver):
html_content = "<html><body><div>test content</div><div class='test'>test content in a specific div</div></body></html>"
mock_driver = mock_driver_with_html(html_content)
tool = initialize_tool_with(mock_driver)
result = tool._run(website_url="https://example.com", css_element="div.test")
assert "test content in a specific div" in result
mock_driver.get.assert_called_once_with("https://example.com")
mock_driver.find_elements.assert_called_with("css selector", "div.test")
mock_driver.close.assert_called_once()
@patch("selenium.webdriver.Chrome")
def test_scrape_with_return_html_true(_mocked_chrome_driver):
html_content = "<html><body><div>HTML content</div></body></html>"
mock_driver = mock_driver_with_html(html_content)
tool = initialize_tool_with(mock_driver)
result = tool._run(website_url="https://example.com", return_html=True)
assert html_content in result
mock_driver.get.assert_called_once_with("https://example.com")
mock_driver.find_element.assert_called_with("tag name", "body")
mock_driver.close.assert_called_once()
@patch("selenium.webdriver.Chrome")
def test_scrape_with_return_html_false(_mocked_chrome_driver):
html_content = "<html><body><div>HTML content</div></body></html>"
mock_driver = mock_driver_with_html(html_content)
tool = initialize_tool_with(mock_driver)
result = tool._run(website_url="https://example.com", return_html=False)
assert "HTML content" in result
mock_driver.get.assert_called_once_with("https://example.com")
mock_driver.find_element.assert_called_with("tag name", "body")
mock_driver.close.assert_called_once()
@patch("selenium.webdriver.Chrome")
def test_scrape_with_driver_error(_mocked_chrome_driver):
mock_driver = MagicMock()
mock_driver.find_element.side_effect = Exception("WebDriver error occurred")
tool = initialize_tool_with(mock_driver)
result = tool._run(website_url="https://example.com")
assert result == "Error scraping website: WebDriver error occurred"
mock_driver.close.assert_called_once()
@patch("selenium.webdriver.Chrome")
def test_initialization_with_driver(_mocked_chrome_driver):
mock_driver = MagicMock()
tool = initialize_tool_with(mock_driver)
assert tool.driver == mock_driver
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/selenium_scraping_tool_test.py",
"license": "MIT License",
"lines": 94,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/serper_dev_tool_test.py | import os
from unittest.mock import patch
from crewai_tools.tools.serper_dev_tool.serper_dev_tool import SerperDevTool
import pytest
@pytest.fixture(autouse=True)
def mock_serper_api_key():
with patch.dict(os.environ, {"SERPER_API_KEY": "test_key"}):
yield
@pytest.fixture
def serper_tool():
return SerperDevTool(n_results=2)
def test_serper_tool_initialization():
tool = SerperDevTool()
assert tool.n_results == 10
assert tool.save_file is False
assert tool.search_type == "search"
assert tool.country == ""
assert tool.location == ""
assert tool.locale == ""
def test_serper_tool_custom_initialization():
tool = SerperDevTool(
n_results=5,
save_file=True,
search_type="news",
country="US",
location="New York",
locale="en",
)
assert tool.n_results == 5
assert tool.save_file is True
assert tool.search_type == "news"
assert tool.country == "US"
assert tool.location == "New York"
assert tool.locale == "en"
@patch("requests.post")
def test_serper_tool_search(mock_post):
tool = SerperDevTool(n_results=2)
mock_response = {
"searchParameters": {"q": "test query", "type": "search"},
"organic": [
{
"title": "Test Title 1",
"link": "http://test1.com",
"snippet": "Test Description 1",
"position": 1,
},
{
"title": "Test Title 2",
"link": "http://test2.com",
"snippet": "Test Description 2",
"position": 2,
},
],
"peopleAlsoAsk": [
{
"question": "Test Question",
"snippet": "Test Answer",
"title": "Test Source",
"link": "http://test.com",
}
],
}
mock_post.return_value.json.return_value = mock_response
mock_post.return_value.status_code = 200
result = tool.run(search_query="test query")
assert "searchParameters" in result
assert result["searchParameters"]["q"] == "test query"
assert len(result["organic"]) == 2
assert result["organic"][0]["title"] == "Test Title 1"
@patch("requests.post")
def test_serper_tool_news_search(mock_post):
tool = SerperDevTool(n_results=2, search_type="news")
mock_response = {
"searchParameters": {"q": "test news", "type": "news"},
"news": [
{
"title": "News Title 1",
"link": "http://news1.com",
"snippet": "News Description 1",
"date": "2024-01-01",
"source": "News Source 1",
"imageUrl": "http://image1.com",
}
],
}
mock_post.return_value.json.return_value = mock_response
mock_post.return_value.status_code = 200
result = tool.run(search_query="test news")
assert "news" in result
assert len(result["news"]) == 1
assert result["news"][0]["title"] == "News Title 1"
@patch("requests.post")
def test_serper_tool_with_location_params(mock_post):
tool = SerperDevTool(n_results=2, country="US", location="New York", locale="en")
tool.run(search_query="test")
called_payload = mock_post.call_args.kwargs["json"]
assert called_payload["gl"] == "US"
assert called_payload["location"] == "New York"
assert called_payload["hl"] == "en"
def test_invalid_search_type():
tool = SerperDevTool()
with pytest.raises(ValueError) as exc_info:
tool.run(search_query="test", search_type="invalid")
assert "Invalid search type" in str(exc_info.value)
@patch("requests.post")
def test_api_error_handling(mock_post):
tool = SerperDevTool()
mock_post.side_effect = Exception("API Error")
with pytest.raises(Exception) as exc_info:
tool.run(search_query="test")
assert "API Error" in str(exc_info.value)
if __name__ == "__main__":
pytest.main([__file__])
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/serper_dev_tool_test.py",
"license": "MIT License",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/singlestore_search_tool_test.py | from collections.abc import Generator
import os
from crewai_tools import SingleStoreSearchTool
from crewai_tools.tools.singlestore_search_tool import SingleStoreSearchToolSchema
import pytest
from singlestoredb import connect
from singlestoredb.server import docker
@pytest.fixture(scope="session")
def docker_server_url() -> Generator[str, None, None]:
"""Start a SingleStore Docker server for tests."""
try:
sdb = docker.start(license="")
conn = sdb.connect()
curr = conn.cursor()
curr.execute("CREATE DATABASE test_crewai")
curr.close()
conn.close()
yield sdb.connection_url
sdb.stop()
except Exception as e:
pytest.skip(f"Could not start SingleStore Docker container: {e}")
@pytest.fixture(scope="function")
def clean_db_url(docker_server_url) -> Generator[str, None, None]:
"""Provide a clean database URL and clean up tables after test."""
yield docker_server_url
try:
conn = connect(host=docker_server_url, database="test_crewai")
curr = conn.cursor()
curr.execute("SHOW TABLES")
results = curr.fetchall()
for result in results:
curr.execute(f"DROP TABLE {result[0]}")
curr.close()
conn.close()
except Exception:
# Ignore cleanup errors
pass
@pytest.fixture
def sample_table_setup(clean_db_url):
"""Set up sample tables for testing."""
conn = connect(host=clean_db_url, database="test_crewai")
curr = conn.cursor()
# Create sample tables
curr.execute(
"""
CREATE TABLE employees (
id INT PRIMARY KEY,
name VARCHAR(100),
department VARCHAR(50),
salary DECIMAL(10,2)
)
"""
)
curr.execute(
"""
CREATE TABLE departments (
id INT PRIMARY KEY,
name VARCHAR(100),
budget DECIMAL(12,2)
)
"""
)
# Insert sample data
curr.execute(
"""
INSERT INTO employees VALUES
(1, 'Alice Smith', 'Engineering', 75000.00),
(2, 'Bob Johnson', 'Marketing', 65000.00),
(3, 'Carol Davis', 'Engineering', 80000.00)
"""
)
curr.execute(
"""
INSERT INTO departments VALUES
(1, 'Engineering', 500000.00),
(2, 'Marketing', 300000.00)
"""
)
curr.close()
conn.close()
return clean_db_url
class TestSingleStoreSearchTool:
"""Test suite for SingleStoreSearchTool."""
def test_tool_creation_with_connection_params(self, sample_table_setup):
"""Test tool creation with individual connection parameters."""
# Parse URL components for individual parameters
url_parts = sample_table_setup.split("@")[1].split(":")
host = url_parts[0]
port = int(url_parts[1].split("/")[0])
user = "root"
password = sample_table_setup.split("@")[0].split(":")[2]
tool = SingleStoreSearchTool(
tables=[],
host=host,
port=port,
user=user,
password=password,
database="test_crewai",
)
assert tool.name == "Search a database's table(s) content"
assert "SingleStore" in tool.description
assert (
"employees(id int(11), name varchar(100), department varchar(50), salary decimal(10,2))"
in tool.description.lower()
)
assert (
"departments(id int(11), name varchar(100), budget decimal(12,2))"
in tool.description.lower()
)
assert tool.args_schema == SingleStoreSearchToolSchema
assert tool.connection_pool is not None
def test_tool_creation_with_connection_url(self, sample_table_setup):
"""Test tool creation with connection URL."""
tool = SingleStoreSearchTool(host=f"{sample_table_setup}/test_crewai")
assert tool.name == "Search a database's table(s) content"
assert tool.connection_pool is not None
def test_tool_creation_with_specific_tables(self, sample_table_setup):
"""Test tool creation with specific table list."""
tool = SingleStoreSearchTool(
tables=["employees"],
host=sample_table_setup,
database="test_crewai",
)
# Check that description includes specific tables
assert "employees" in tool.description
assert "departments" not in tool.description
def test_tool_creation_with_nonexistent_table(self, sample_table_setup):
"""Test tool creation fails with non-existent table."""
with pytest.raises(ValueError, match="Table nonexistent does not exist"):
SingleStoreSearchTool(
tables=["employees", "nonexistent"],
host=sample_table_setup,
database="test_crewai",
)
def test_tool_creation_with_empty_database(self, clean_db_url):
"""Test tool creation fails with empty database."""
with pytest.raises(ValueError, match="No tables found in the database"):
SingleStoreSearchTool(host=clean_db_url, database="test_crewai")
def test_description_generation(self, sample_table_setup):
"""Test that tool description is properly generated with table info."""
tool = SingleStoreSearchTool(host=sample_table_setup, database="test_crewai")
# Check description contains table definitions
assert "employees(" in tool.description
assert "departments(" in tool.description
assert "id int" in tool.description.lower()
assert "name varchar" in tool.description.lower()
def test_query_validation_select_allowed(self, sample_table_setup):
"""Test that SELECT queries are allowed."""
os.environ["SINGLESTOREDB_URL"] = sample_table_setup
tool = SingleStoreSearchTool(database="test_crewai")
valid, message = tool._validate_query("SELECT * FROM employees")
assert valid is True
assert message == "Valid query"
def test_query_validation_show_allowed(self, sample_table_setup):
"""Test that SHOW queries are allowed."""
tool = SingleStoreSearchTool(host=sample_table_setup, database="test_crewai")
valid, message = tool._validate_query("SHOW TABLES")
assert valid is True
assert message == "Valid query"
def test_query_validation_case_insensitive(self, sample_table_setup):
"""Test that query validation is case insensitive."""
tool = SingleStoreSearchTool(host=sample_table_setup, database="test_crewai")
valid, _ = tool._validate_query("select * from employees")
assert valid is True
valid, _ = tool._validate_query("SHOW tables")
assert valid is True
def test_query_validation_insert_denied(self, sample_table_setup):
"""Test that INSERT queries are denied."""
tool = SingleStoreSearchTool(host=sample_table_setup, database="test_crewai")
valid, message = tool._validate_query(
"INSERT INTO employees VALUES (4, 'Test', 'Test', 1000)"
)
assert valid is False
assert "Only SELECT and SHOW queries are supported" in message
def test_query_validation_update_denied(self, sample_table_setup):
"""Test that UPDATE queries are denied."""
tool = SingleStoreSearchTool(host=sample_table_setup, database="test_crewai")
valid, message = tool._validate_query("UPDATE employees SET salary = 90000")
assert valid is False
assert "Only SELECT and SHOW queries are supported" in message
def test_query_validation_delete_denied(self, sample_table_setup):
"""Test that DELETE queries are denied."""
tool = SingleStoreSearchTool(host=sample_table_setup, database="test_crewai")
valid, message = tool._validate_query("DELETE FROM employees WHERE id = 1")
assert valid is False
assert "Only SELECT and SHOW queries are supported" in message
def test_query_validation_non_string(self, sample_table_setup):
"""Test that non-string queries are rejected."""
tool = SingleStoreSearchTool(host=sample_table_setup, database="test_crewai")
valid, message = tool._validate_query(123)
assert valid is False
assert "Search query must be a string" in message
def test_run_select_query(self, sample_table_setup):
"""Test executing a SELECT query."""
tool = SingleStoreSearchTool(host=sample_table_setup, database="test_crewai")
result = tool._run("SELECT * FROM employees ORDER BY id")
assert "Search Results:" in result
assert "Alice Smith" in result
assert "Bob Johnson" in result
assert "Carol Davis" in result
def test_run_filtered_query(self, sample_table_setup):
"""Test executing a filtered SELECT query."""
tool = SingleStoreSearchTool(host=sample_table_setup, database="test_crewai")
result = tool._run(
"SELECT name FROM employees WHERE department = 'Engineering'"
)
assert "Search Results:" in result
assert "Alice Smith" in result
assert "Carol Davis" in result
assert "Bob Johnson" not in result
def test_run_show_query(self, sample_table_setup):
"""Test executing a SHOW query."""
tool = SingleStoreSearchTool(host=sample_table_setup, database="test_crewai")
result = tool._run("SHOW TABLES")
assert "Search Results:" in result
assert "employees" in result
assert "departments" in result
def test_run_empty_result(self, sample_table_setup):
"""Test executing a query that returns no results."""
tool = SingleStoreSearchTool(host=sample_table_setup, database="test_crewai")
result = tool._run("SELECT * FROM employees WHERE department = 'NonExistent'")
assert result == "No results found."
def test_run_invalid_query_syntax(self, sample_table_setup):
"""Test executing a query with invalid syntax."""
tool = SingleStoreSearchTool(host=sample_table_setup, database="test_crewai")
result = tool._run("SELECT * FORM employees") # Intentional typo
assert "Error executing search query:" in result
def test_run_denied_query(self, sample_table_setup):
"""Test that denied queries return appropriate error message."""
tool = SingleStoreSearchTool(host=sample_table_setup, database="test_crewai")
result = tool._run("DELETE FROM employees")
assert "Invalid search query:" in result
assert "Only SELECT and SHOW queries are supported" in result
def test_connection_pool_usage(self, sample_table_setup):
"""Test that connection pooling works correctly."""
tool = SingleStoreSearchTool(
host=sample_table_setup,
database="test_crewai",
pool_size=2,
)
# Execute multiple queries to test pool usage
results = []
for _ in range(5):
result = tool._run("SELECT COUNT(*) FROM employees")
results.append(result)
# All queries should succeed
for result in results:
assert "Search Results:" in result
assert "3" in result # Count of employees
def test_tool_schema_validation(self):
"""Test that the tool schema validation works correctly."""
# Valid input
valid_input = SingleStoreSearchToolSchema(search_query="SELECT * FROM test")
assert valid_input.search_query == "SELECT * FROM test"
# Test that description is present
schema_dict = SingleStoreSearchToolSchema.model_json_schema()
assert "search_query" in schema_dict["properties"]
assert "description" in schema_dict["properties"]["search_query"]
def test_connection_error_handling(self):
"""Test handling of connection errors."""
with pytest.raises(Exception):
# This should fail due to invalid connection parameters
SingleStoreSearchTool(
host="invalid_host",
port=9999,
user="invalid_user",
password="invalid_password",
database="invalid_db",
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/singlestore_search_tool_test.py",
"license": "MIT License",
"lines": 269,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/snowflake_search_tool_test.py | import asyncio
from unittest.mock import MagicMock, patch
from crewai_tools import SnowflakeConfig, SnowflakeSearchTool
import pytest
# Unit Test Fixtures
@pytest.fixture
def mock_snowflake_connection():
mock_conn = MagicMock()
mock_cursor = MagicMock()
mock_cursor.description = [("col1",), ("col2",)]
mock_cursor.fetchall.return_value = [(1, "value1"), (2, "value2")]
mock_cursor.execute.return_value = None
mock_conn.cursor.return_value = mock_cursor
return mock_conn
@pytest.fixture
def mock_config():
return SnowflakeConfig(
account="test_account",
user="test_user",
password="test_password",
warehouse="test_warehouse",
database="test_db",
snowflake_schema="test_schema",
)
@pytest.fixture
def snowflake_tool(mock_config):
with patch("snowflake.connector.connect"):
tool = SnowflakeSearchTool(config=mock_config)
yield tool
# Unit Tests
@pytest.mark.asyncio
async def test_successful_query_execution(snowflake_tool, mock_snowflake_connection):
with patch.object(snowflake_tool, "_create_connection") as mock_create_conn:
mock_create_conn.return_value = mock_snowflake_connection
results = await snowflake_tool._run(
query="SELECT * FROM test_table", timeout=300
)
assert len(results) == 2
assert results[0]["col1"] == 1
assert results[0]["col2"] == "value1"
mock_snowflake_connection.cursor.assert_called_once()
@pytest.mark.asyncio
async def test_connection_pooling(snowflake_tool, mock_snowflake_connection):
with patch.object(snowflake_tool, "_create_connection") as mock_create_conn:
mock_create_conn.return_value = mock_snowflake_connection
# Execute multiple queries
await asyncio.gather(
snowflake_tool._run("SELECT 1"),
snowflake_tool._run("SELECT 2"),
snowflake_tool._run("SELECT 3"),
)
# Should reuse connections from pool
assert mock_create_conn.call_count <= snowflake_tool.pool_size
@pytest.mark.asyncio
async def test_cleanup_on_deletion(snowflake_tool, mock_snowflake_connection):
with patch.object(snowflake_tool, "_create_connection") as mock_create_conn:
mock_create_conn.return_value = mock_snowflake_connection
# Add connection to pool
await snowflake_tool._get_connection()
# Return connection to pool
async with snowflake_tool._pool_lock:
snowflake_tool._connection_pool.append(mock_snowflake_connection)
# Trigger cleanup
snowflake_tool.__del__()
mock_snowflake_connection.close.assert_called_once()
def test_config_validation():
# Test missing required fields
with pytest.raises(ValueError):
SnowflakeConfig()
# Test invalid account format
with pytest.raises(ValueError):
SnowflakeConfig(
account="invalid//account", user="test_user", password="test_pass"
)
# Test missing authentication
with pytest.raises(ValueError):
SnowflakeConfig(account="test_account", user="test_user")
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/snowflake_search_tool_test.py",
"license": "MIT License",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/stagehand_tool_test.py | import sys
from unittest.mock import MagicMock, patch
import pytest
# Create mock classes that will be used by our fixture
class MockStagehandModule:
def __init__(self):
self.Stagehand = MagicMock()
self.StagehandConfig = MagicMock()
self.StagehandPage = MagicMock()
class MockStagehandSchemas:
def __init__(self):
self.ActOptions = MagicMock()
self.ExtractOptions = MagicMock()
self.ObserveOptions = MagicMock()
self.AvailableModel = MagicMock()
class MockStagehandUtils:
def __init__(self):
self.configure_logging = MagicMock()
@pytest.fixture(scope="module", autouse=True)
def mock_stagehand_modules():
"""Mock stagehand modules at the start of this test module."""
# Store original modules if they exist
original_modules = {}
for module_name in ["stagehand", "stagehand.schemas", "stagehand.utils"]:
if module_name in sys.modules:
original_modules[module_name] = sys.modules[module_name]
# Create and inject mock modules
mock_stagehand = MockStagehandModule()
mock_stagehand_schemas = MockStagehandSchemas()
mock_stagehand_utils = MockStagehandUtils()
sys.modules["stagehand"] = mock_stagehand
sys.modules["stagehand.schemas"] = mock_stagehand_schemas
sys.modules["stagehand.utils"] = mock_stagehand_utils
# Import after mocking
from crewai_tools.tools.stagehand_tool.stagehand_tool import (
StagehandResult,
StagehandTool,
)
# Make these available to tests in this module
sys.modules[__name__].StagehandResult = StagehandResult
sys.modules[__name__].StagehandTool = StagehandTool
yield
# Restore original modules
for module_name, module in original_modules.items():
sys.modules[module_name] = module
class MockStagehandPage(MagicMock):
def act(self, options):
mock_result = MagicMock()
mock_result.model_dump.return_value = {
"message": "Action completed successfully"
}
return mock_result
def goto(self, url):
return MagicMock()
def extract(self, options):
mock_result = MagicMock()
mock_result.model_dump.return_value = {
"data": "Extracted content",
"metadata": {"source": "test"},
}
return mock_result
def observe(self, options):
result1 = MagicMock()
result1.description = "Button element"
result1.method = "click"
result2 = MagicMock()
result2.description = "Input field"
result2.method = "type"
return [result1, result2]
class MockStagehand(MagicMock):
def init(self):
self.session_id = "test-session-id"
self.page = MockStagehandPage()
def close(self):
pass
@pytest.fixture
def mock_stagehand_instance():
with patch(
"crewai_tools.tools.stagehand_tool.stagehand_tool.Stagehand",
return_value=MockStagehand(),
) as mock:
yield mock
@pytest.fixture
def stagehand_tool():
return StagehandTool(
api_key="test_api_key",
project_id="test_project_id",
model_api_key="test_model_api_key",
_testing=True, # Enable testing mode to bypass dependency check
)
def test_stagehand_tool_initialization():
"""Test that the StagehandTool initializes with the correct default values."""
tool = StagehandTool(
api_key="test_api_key",
project_id="test_project_id",
model_api_key="test_model_api_key",
_testing=True, # Enable testing mode
)
assert tool.api_key == "test_api_key"
assert tool.project_id == "test_project_id"
assert tool.model_api_key == "test_model_api_key"
assert tool.headless is False
assert tool.dom_settle_timeout_ms == 3000
assert tool.self_heal is True
assert tool.wait_for_captcha_solves is True
@patch(
"crewai_tools.tools.stagehand_tool.stagehand_tool.StagehandTool._run", autospec=True
)
def test_act_command(mock_run, stagehand_tool):
"""Test the 'act' command functionality."""
# Setup mock
mock_run.return_value = "Action result: Action completed successfully"
# Run the tool
result = stagehand_tool._run(
instruction="Click the submit button", command_type="act"
)
# Assertions
assert "Action result" in result
assert "Action completed successfully" in result
@patch(
"crewai_tools.tools.stagehand_tool.stagehand_tool.StagehandTool._run", autospec=True
)
def test_navigate_command(mock_run, stagehand_tool):
"""Test the 'navigate' command functionality."""
# Setup mock
mock_run.return_value = "Successfully navigated to https://example.com"
# Run the tool
result = stagehand_tool._run(
instruction="Go to example.com",
url="https://example.com",
command_type="navigate",
)
# Assertions
assert "https://example.com" in result
@patch(
"crewai_tools.tools.stagehand_tool.stagehand_tool.StagehandTool._run", autospec=True
)
def test_extract_command(mock_run, stagehand_tool):
"""Test the 'extract' command functionality."""
# Setup mock
mock_run.return_value = (
'Extracted data: {"data": "Extracted content", "metadata": {"source": "test"}}'
)
# Run the tool
result = stagehand_tool._run(
instruction="Extract all product names and prices", command_type="extract"
)
# Assertions
assert "Extracted data" in result
assert "Extracted content" in result
@patch(
"crewai_tools.tools.stagehand_tool.stagehand_tool.StagehandTool._run", autospec=True
)
def test_observe_command(mock_run, stagehand_tool):
"""Test the 'observe' command functionality."""
# Setup mock
mock_run.return_value = "Element 1: Button element\nSuggested action: click\nElement 2: Input field\nSuggested action: type"
# Run the tool
result = stagehand_tool._run(
instruction="Find all interactive elements", command_type="observe"
)
# Assertions
assert "Element 1: Button element" in result
assert "Element 2: Input field" in result
assert "Suggested action: click" in result
assert "Suggested action: type" in result
@patch(
"crewai_tools.tools.stagehand_tool.stagehand_tool.StagehandTool._run", autospec=True
)
def test_error_handling(mock_run, stagehand_tool):
"""Test error handling in the tool."""
# Setup mock
mock_run.return_value = "Error: Browser automation error"
# Run the tool
result = stagehand_tool._run(
instruction="Click a non-existent button", command_type="act"
)
# Assertions
assert "Error:" in result
assert "Browser automation error" in result
def test_initialization_parameters():
"""Test that the StagehandTool initializes with the correct parameters."""
# Create tool with custom parameters
tool = StagehandTool(
api_key="custom_api_key",
project_id="custom_project_id",
model_api_key="custom_model_api_key",
headless=True,
dom_settle_timeout_ms=5000,
self_heal=False,
wait_for_captcha_solves=False,
verbose=3,
_testing=True, # Enable testing mode
)
# Verify the tool was initialized with the correct parameters
assert tool.api_key == "custom_api_key"
assert tool.project_id == "custom_project_id"
assert tool.model_api_key == "custom_model_api_key"
assert tool.headless is True
assert tool.dom_settle_timeout_ms == 5000
assert tool.self_heal is False
assert tool.wait_for_captcha_solves is False
assert tool.verbose == 3
def test_close_method():
"""Test that the close method cleans up resources correctly."""
# Create the tool with testing mode
tool = StagehandTool(
api_key="test_api_key",
project_id="test_project_id",
model_api_key="test_model_api_key",
_testing=True,
)
# Setup mock stagehand instance
tool._stagehand = MagicMock()
tool._stagehand.close = MagicMock() # Non-async mock
tool._page = MagicMock()
# Call the close method
tool.close()
# Verify resources were cleaned up
assert tool._stagehand is None
assert tool._page is None
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/stagehand_tool_test.py",
"license": "MIT License",
"lines": 221,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/test_code_interpreter_tool.py | from unittest.mock import patch
from crewai_tools.tools.code_interpreter_tool.code_interpreter_tool import (
CodeInterpreterTool,
SandboxPython,
)
import pytest
@pytest.fixture
def printer_mock():
with patch("crewai_tools.printer.Printer.print") as mock:
yield mock
@pytest.fixture
def docker_unavailable_mock():
with patch(
"crewai_tools.tools.code_interpreter_tool.code_interpreter_tool.CodeInterpreterTool._check_docker_available",
return_value=False,
) as mock:
yield mock
@patch("crewai_tools.tools.code_interpreter_tool.code_interpreter_tool.docker_from_env")
def test_run_code_in_docker(docker_mock, printer_mock):
tool = CodeInterpreterTool()
code = "print('Hello, World!')"
libraries_used = ["numpy", "pandas"]
expected_output = "Hello, World!\n"
docker_mock().containers.run().exec_run().exit_code = 0
docker_mock().containers.run().exec_run().output = expected_output.encode()
result = tool.run_code_in_docker(code, libraries_used)
assert result == expected_output
printer_mock.assert_called_with(
"Running code in Docker environment", color="bold_blue"
)
@patch("crewai_tools.tools.code_interpreter_tool.code_interpreter_tool.docker_from_env")
def test_run_code_in_docker_with_error(docker_mock, printer_mock):
tool = CodeInterpreterTool()
code = "print(1/0)"
libraries_used = ["numpy", "pandas"]
expected_output = "Something went wrong while running the code: \nZeroDivisionError: division by zero\n"
docker_mock().containers.run().exec_run().exit_code = 1
docker_mock().containers.run().exec_run().output = (
b"ZeroDivisionError: division by zero\n"
)
result = tool.run_code_in_docker(code, libraries_used)
assert result == expected_output
printer_mock.assert_called_with(
"Running code in Docker environment", color="bold_blue"
)
@patch("crewai_tools.tools.code_interpreter_tool.code_interpreter_tool.docker_from_env")
def test_run_code_in_docker_with_script(docker_mock, printer_mock):
tool = CodeInterpreterTool()
code = """print("This is line 1")
print("This is line 2")"""
libraries_used = []
expected_output = "This is line 1\nThis is line 2\n"
docker_mock().containers.run().exec_run().exit_code = 0
docker_mock().containers.run().exec_run().output = expected_output.encode()
result = tool.run_code_in_docker(code, libraries_used)
assert result == expected_output
printer_mock.assert_called_with(
"Running code in Docker environment", color="bold_blue"
)
def test_restricted_sandbox_basic_code_execution(printer_mock, docker_unavailable_mock):
"""Test basic code execution."""
tool = CodeInterpreterTool()
code = """
result = 2 + 2
print(result)
"""
result = tool.run(code=code, libraries_used=[])
printer_mock.assert_called_with(
"Running code in restricted sandbox", color="yellow"
)
assert result == 4
def test_restricted_sandbox_running_with_blocked_modules(
printer_mock, docker_unavailable_mock
):
"""Test that restricted modules cannot be imported."""
tool = CodeInterpreterTool()
restricted_modules = SandboxPython.BLOCKED_MODULES
for module in restricted_modules:
code = f"""
import {module}
result = "Import succeeded"
"""
result = tool.run(code=code, libraries_used=[])
printer_mock.assert_called_with(
"Running code in restricted sandbox", color="yellow"
)
assert f"An error occurred: Importing '{module}' is not allowed" in result
def test_restricted_sandbox_running_with_blocked_builtins(
printer_mock, docker_unavailable_mock
):
"""Test that restricted builtins are not available."""
tool = CodeInterpreterTool()
restricted_builtins = SandboxPython.UNSAFE_BUILTINS
for builtin in restricted_builtins:
code = f"""
{builtin}("test")
result = "Builtin available"
"""
result = tool.run(code=code, libraries_used=[])
printer_mock.assert_called_with(
"Running code in restricted sandbox", color="yellow"
)
assert f"An error occurred: name '{builtin}' is not defined" in result
def test_restricted_sandbox_running_with_no_result_variable(
printer_mock, docker_unavailable_mock
):
"""Test behavior when no result variable is set."""
tool = CodeInterpreterTool()
code = """
x = 10
"""
result = tool.run(code=code, libraries_used=[])
printer_mock.assert_called_with(
"Running code in restricted sandbox", color="yellow"
)
assert result == "No result variable found."
def test_unsafe_mode_running_with_no_result_variable(
printer_mock, docker_unavailable_mock
):
"""Test behavior when no result variable is set."""
tool = CodeInterpreterTool(unsafe_mode=True)
code = """
x = 10
"""
result = tool.run(code=code, libraries_used=[])
printer_mock.assert_called_with(
"WARNING: Running code in unsafe mode", color="bold_magenta"
)
assert result == "No result variable found."
def test_unsafe_mode_running_unsafe_code(printer_mock, docker_unavailable_mock):
"""Test behavior when no result variable is set."""
tool = CodeInterpreterTool(unsafe_mode=True)
code = """
import os
os.system("ls -la")
result = eval("5/1")
"""
result = tool.run(code=code, libraries_used=[])
printer_mock.assert_called_with(
"WARNING: Running code in unsafe mode", color="bold_magenta"
)
assert 5.0 == result
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/test_code_interpreter_tool.py",
"license": "MIT License",
"lines": 142,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/test_file_writer_tool.py | import os
import shutil
import tempfile
from crewai_tools.tools.file_writer_tool.file_writer_tool import FileWriterTool
import pytest
@pytest.fixture
def tool():
return FileWriterTool()
@pytest.fixture
def temp_env():
temp_dir = tempfile.mkdtemp()
test_file = "test.txt"
test_content = "Hello, World!"
yield {
"temp_dir": temp_dir,
"test_file": test_file,
"test_content": test_content,
}
shutil.rmtree(temp_dir, ignore_errors=True)
def get_test_path(filename, directory):
return os.path.join(directory, filename)
def read_file(path):
with open(path, "r") as f:
return f.read()
def test_basic_file_write(tool, temp_env):
result = tool._run(
filename=temp_env["test_file"],
directory=temp_env["temp_dir"],
content=temp_env["test_content"],
overwrite=True,
)
path = get_test_path(temp_env["test_file"], temp_env["temp_dir"])
assert os.path.exists(path)
assert read_file(path) == temp_env["test_content"]
assert "successfully written" in result
def test_directory_creation(tool, temp_env):
new_dir = os.path.join(temp_env["temp_dir"], "nested_dir")
result = tool._run(
filename=temp_env["test_file"],
directory=new_dir,
content=temp_env["test_content"],
overwrite=True,
)
path = get_test_path(temp_env["test_file"], new_dir)
assert os.path.exists(new_dir)
assert os.path.exists(path)
assert "successfully written" in result
@pytest.mark.parametrize(
"overwrite",
["y", "yes", "t", "true", "on", "1", True],
)
def test_overwrite_true(tool, temp_env, overwrite):
path = get_test_path(temp_env["test_file"], temp_env["temp_dir"])
with open(path, "w") as f:
f.write("Original content")
result = tool._run(
filename=temp_env["test_file"],
directory=temp_env["temp_dir"],
content="New content",
overwrite=overwrite,
)
assert read_file(path) == "New content"
assert "successfully written" in result
def test_invalid_overwrite_value(tool, temp_env):
result = tool._run(
filename=temp_env["test_file"],
directory=temp_env["temp_dir"],
content=temp_env["test_content"],
overwrite="invalid",
)
assert "invalid value" in result
def test_missing_required_fields(tool, temp_env):
result = tool._run(
directory=temp_env["temp_dir"],
content=temp_env["test_content"],
overwrite=True,
)
assert "An error occurred while accessing key: 'filename'" in result
def test_empty_content(tool, temp_env):
result = tool._run(
filename=temp_env["test_file"],
directory=temp_env["temp_dir"],
content="",
overwrite=True,
)
path = get_test_path(temp_env["test_file"], temp_env["temp_dir"])
assert os.path.exists(path)
assert read_file(path) == ""
assert "successfully written" in result
@pytest.mark.parametrize(
"overwrite",
["n", "no", "f", "false", "off", "0", False],
)
def test_file_exists_error_handling(tool, temp_env, overwrite):
path = get_test_path(temp_env["test_file"], temp_env["temp_dir"])
with open(path, "w") as f:
f.write("Pre-existing content")
result = tool._run(
filename=temp_env["test_file"],
directory=temp_env["temp_dir"],
content="Should not be written",
overwrite=overwrite,
)
assert "already exists and overwrite option was not passed" in result
assert read_file(path) == "Pre-existing content"
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/test_file_writer_tool.py",
"license": "MIT License",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/test_import_without_warnings.py | from pydantic.warnings import PydanticDeprecatedSince20
import pytest
@pytest.mark.filterwarnings("error", category=PydanticDeprecatedSince20)
def test_import_tools_without_pydantic_deprecation_warnings():
# This test is to ensure that the import of crewai_tools does not raise any Pydantic deprecation warnings.
import crewai_tools
assert crewai_tools
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/test_import_without_warnings.py",
"license": "MIT License",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/test_mongodb_vector_search_tool.py | import json
from unittest.mock import patch
from crewai_tools import MongoDBVectorSearchConfig, MongoDBVectorSearchTool
import pytest
# Unit Test Fixtures
@pytest.fixture
def mongodb_vector_search_tool():
tool = MongoDBVectorSearchTool(
connection_string="foo", database_name="bar", collection_name="test"
)
tool._embed_texts = lambda x: [[0.1]]
yield tool
# Unit Tests
def test_successful_query_execution(mongodb_vector_search_tool):
# Enable embedding
with patch.object(mongodb_vector_search_tool._coll, "aggregate") as mock_aggregate:
mock_aggregate.return_value = [dict(text="foo", score=0.1, _id=1)]
results = json.loads(mongodb_vector_search_tool._run(query="sandwiches"))
assert len(results) == 1
assert results[0]["text"] == "foo"
assert results[0]["_id"] == 1
def test_provide_config():
query_config = MongoDBVectorSearchConfig(limit=10)
tool = MongoDBVectorSearchTool(
connection_string="foo",
database_name="bar",
collection_name="test",
query_config=query_config,
vector_index_name="foo",
embedding_model="bar",
)
tool._embed_texts = lambda x: [[0.1]]
with patch.object(tool._coll, "aggregate") as mock_aggregate:
mock_aggregate.return_value = [dict(text="foo", score=0.1, _id=1)]
tool._run(query="sandwiches")
assert mock_aggregate.mock_calls[-1].args[0][0]["$vectorSearch"]["limit"] == 10
mock_aggregate.return_value = [dict(text="foo", score=0.1, _id=1)]
def test_cleanup_on_deletion(mongodb_vector_search_tool):
with patch.object(mongodb_vector_search_tool, "_client") as mock_client:
# Trigger cleanup
mongodb_vector_search_tool.__del__()
mock_client.close.assert_called_once()
def test_create_search_index(mongodb_vector_search_tool):
with patch(
"crewai_tools.tools.mongodb_vector_search_tool.vector_search.create_vector_search_index"
) as mock_create_search_index:
mongodb_vector_search_tool.create_vector_search_index(dimensions=10)
kwargs = mock_create_search_index.mock_calls[0].kwargs
assert kwargs["dimensions"] == 10
assert kwargs["similarity"] == "cosine"
def test_add_texts(mongodb_vector_search_tool):
with patch.object(mongodb_vector_search_tool._coll, "bulk_write") as bulk_write:
mongodb_vector_search_tool.add_texts(["foo"])
args = bulk_write.mock_calls[0].args
assert "ReplaceOne" in str(args[0][0])
assert "foo" in str(args[0][0])
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/test_mongodb_vector_search_tool.py",
"license": "MIT License",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/test_oxylabs_tools.py | import json
import os
from unittest.mock import MagicMock
from crewai.tools.base_tool import BaseTool
from crewai_tools import (
OxylabsAmazonProductScraperTool,
OxylabsAmazonSearchScraperTool,
OxylabsGoogleSearchScraperTool,
OxylabsUniversalScraperTool,
)
from crewai_tools.tools.oxylabs_amazon_product_scraper_tool.oxylabs_amazon_product_scraper_tool import (
OxylabsAmazonProductScraperConfig,
)
from crewai_tools.tools.oxylabs_google_search_scraper_tool.oxylabs_google_search_scraper_tool import (
OxylabsGoogleSearchScraperConfig,
)
from oxylabs import RealtimeClient
from oxylabs.sources.response import Response as OxylabsResponse
from pydantic import BaseModel
import pytest
@pytest.fixture
def oxylabs_api() -> RealtimeClient:
oxylabs_api_mock = MagicMock()
html_content = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Scraping Sandbox</title>
</head>
<body>
<div id="main">
<div id="product-list">
<div>
<p>Amazing product</p>
<p>Price $14.99</p>
</div>
<div>
<p>Good product</p>
<p>Price $9.99</p>
</div>
</div>
</div>
</body>
</html>
"""
json_content = {
"results": {
"products": [
{"title": "Amazing product", "price": 14.99, "currency": "USD"},
{"title": "Good product", "price": 9.99, "currency": "USD"},
],
},
}
html_response = OxylabsResponse({"results": [{"content": html_content}]})
json_response = OxylabsResponse({"results": [{"content": json_content}]})
oxylabs_api_mock.universal.scrape_url.side_effect = [json_response, html_response]
oxylabs_api_mock.amazon.scrape_search.side_effect = [json_response, html_response]
oxylabs_api_mock.amazon.scrape_product.side_effect = [json_response, html_response]
oxylabs_api_mock.google.scrape_search.side_effect = [json_response, html_response]
return oxylabs_api_mock
@pytest.mark.parametrize(
("tool_class",),
[
(OxylabsUniversalScraperTool,),
(OxylabsAmazonSearchScraperTool,),
(OxylabsGoogleSearchScraperTool,),
(OxylabsAmazonProductScraperTool,),
],
)
def test_tool_initialization(tool_class: type[BaseTool]):
tool = tool_class(username="username", password="password")
assert isinstance(tool, tool_class)
@pytest.mark.parametrize(
("tool_class",),
[
(OxylabsUniversalScraperTool,),
(OxylabsAmazonSearchScraperTool,),
(OxylabsGoogleSearchScraperTool,),
(OxylabsAmazonProductScraperTool,),
],
)
def test_tool_initialization_with_env_vars(tool_class: type[BaseTool]):
os.environ["OXYLABS_USERNAME"] = "username"
os.environ["OXYLABS_PASSWORD"] = "password"
tool = tool_class()
assert isinstance(tool, tool_class)
del os.environ["OXYLABS_USERNAME"]
del os.environ["OXYLABS_PASSWORD"]
@pytest.mark.parametrize(
("tool_class",),
[
(OxylabsUniversalScraperTool,),
(OxylabsAmazonSearchScraperTool,),
(OxylabsGoogleSearchScraperTool,),
(OxylabsAmazonProductScraperTool,),
],
)
def test_tool_initialization_failure(tool_class: type[BaseTool]):
# making sure env vars are not set
for key in ["OXYLABS_USERNAME", "OXYLABS_PASSWORD"]:
if key in os.environ:
del os.environ[key]
with pytest.raises(ValueError):
tool_class()
@pytest.mark.parametrize(
("tool_class", "tool_config"),
[
(OxylabsUniversalScraperTool, {"geo_location": "Paris, France"}),
(
OxylabsAmazonSearchScraperTool,
{"domain": "co.uk"},
),
(
OxylabsGoogleSearchScraperTool,
OxylabsGoogleSearchScraperConfig(render="html"),
),
(
OxylabsAmazonProductScraperTool,
OxylabsAmazonProductScraperConfig(parse=True),
),
],
)
def test_tool_invocation(
tool_class: type[BaseTool],
tool_config: BaseModel,
oxylabs_api: RealtimeClient,
):
tool = tool_class(username="username", password="password", config=tool_config)
# setting via __dict__ to bypass pydantic validation
tool.__dict__["oxylabs_api"] = oxylabs_api
# verifying parsed job returns json content
result = tool.run("Scraping Query 1")
assert isinstance(result, str)
assert isinstance(json.loads(result), dict)
# verifying raw job returns str
result = tool.run("Scraping Query 2")
assert isinstance(result, str)
assert "<!DOCTYPE html>" in result
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/test_oxylabs_tools.py",
"license": "MIT License",
"lines": 139,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/test_search_tools.py | import os
from pathlib import Path
import tempfile
from unittest.mock import MagicMock
from crewai_tools.rag.data_types import DataType
from crewai_tools.tools import (
CSVSearchTool,
CodeDocsSearchTool,
DOCXSearchTool,
DirectorySearchTool,
GithubSearchTool,
JSONSearchTool,
MDXSearchTool,
PDFSearchTool,
TXTSearchTool,
WebsiteSearchTool,
XMLSearchTool,
YoutubeChannelSearchTool,
YoutubeVideoSearchTool,
)
from crewai_tools.tools.rag.rag_tool import Adapter
import pytest
@pytest.fixture
def mock_adapter():
mock_adapter = MagicMock(spec=Adapter)
return mock_adapter
@pytest.mark.vcr()
def test_directory_search_tool():
with tempfile.TemporaryDirectory() as temp_dir:
test_file = Path(temp_dir) / "test.txt"
test_file.write_text("This is a test file for directory search")
tool = DirectorySearchTool(directory=temp_dir)
result = tool._run(search_query="test file")
assert "test file" in result.lower()
def test_pdf_search_tool(mock_adapter):
mock_adapter.query.return_value = "this is a test"
tool = PDFSearchTool(pdf="test.pdf", adapter=mock_adapter)
result = tool._run(query="test content")
assert "this is a test" in result.lower()
mock_adapter.add.assert_called_once_with("test.pdf", data_type=DataType.PDF_FILE)
mock_adapter.query.assert_called_once_with(
"test content", similarity_threshold=0.6, limit=5
)
mock_adapter.query.reset_mock()
mock_adapter.add.reset_mock()
tool = PDFSearchTool(adapter=mock_adapter)
result = tool._run(pdf="test.pdf", query="test content")
assert "this is a test" in result.lower()
mock_adapter.add.assert_called_once_with("test.pdf", data_type=DataType.PDF_FILE)
mock_adapter.query.assert_called_once_with(
"test content", similarity_threshold=0.6, limit=5
)
@pytest.mark.vcr()
def test_txt_search_tool():
with tempfile.NamedTemporaryFile(suffix=".txt", delete=False) as temp_file:
temp_file.write(b"This is a test file for txt search")
temp_file_path = temp_file.name
try:
tool = TXTSearchTool()
tool.add(temp_file_path)
result = tool._run(search_query="test file")
assert "test file" in result.lower()
finally:
os.unlink(temp_file_path)
def test_docx_search_tool(mock_adapter):
mock_adapter.query.return_value = "this is a test"
tool = DOCXSearchTool(docx="test.docx", adapter=mock_adapter)
result = tool._run(search_query="test content")
assert "this is a test" in result.lower()
mock_adapter.add.assert_called_once_with("test.docx", data_type=DataType.DOCX)
mock_adapter.query.assert_called_once_with(
"test content", similarity_threshold=0.6, limit=5
)
mock_adapter.query.reset_mock()
mock_adapter.add.reset_mock()
tool = DOCXSearchTool(adapter=mock_adapter)
result = tool._run(docx="test.docx", search_query="test content")
assert "this is a test" in result.lower()
mock_adapter.add.assert_called_once_with("test.docx", data_type=DataType.DOCX)
mock_adapter.query.assert_called_once_with(
"test content", similarity_threshold=0.6, limit=5
)
@pytest.mark.vcr()
def test_json_search_tool():
with tempfile.NamedTemporaryFile(suffix=".json", delete=False) as temp_file:
temp_file.write(b'{"test": "This is a test JSON file"}')
temp_file_path = temp_file.name
try:
tool = JSONSearchTool()
result = tool._run(search_query="test JSON", json_path=temp_file_path)
assert "test json" in result.lower()
finally:
os.unlink(temp_file_path)
def test_xml_search_tool(mock_adapter):
mock_adapter.query.return_value = "this is a test"
tool = XMLSearchTool(adapter=mock_adapter)
result = tool._run(search_query="test XML", xml="test.xml")
assert "this is a test" in result.lower()
mock_adapter.add.assert_called_once_with("test.xml")
mock_adapter.query.assert_called_once_with(
"test XML", similarity_threshold=0.6, limit=5
)
@pytest.mark.vcr()
def test_csv_search_tool():
with tempfile.NamedTemporaryFile(suffix=".csv", delete=False) as temp_file:
temp_file.write(b"name,description\ntest,This is a test CSV file")
temp_file_path = temp_file.name
try:
tool = CSVSearchTool()
tool.add(temp_file_path)
result = tool._run(search_query="test CSV")
assert "test csv" in result.lower()
finally:
os.unlink(temp_file_path)
@pytest.mark.vcr()
def test_mdx_search_tool():
with tempfile.NamedTemporaryFile(suffix=".mdx", delete=False) as temp_file:
temp_file.write(b"# Test MDX\nThis is a test MDX file")
temp_file_path = temp_file.name
try:
tool = MDXSearchTool()
tool.add(temp_file_path)
result = tool._run(search_query="test MDX")
assert "test mdx" in result.lower()
finally:
os.unlink(temp_file_path)
def test_website_search_tool(mock_adapter):
mock_adapter.query.return_value = "this is a test"
website = "https://crewai.com"
search_query = "what is crewai?"
tool = WebsiteSearchTool(website=website, adapter=mock_adapter)
result = tool._run(search_query=search_query)
mock_adapter.query.assert_called_once_with(
"what is crewai?", similarity_threshold=0.6, limit=5
)
mock_adapter.add.assert_called_once_with(website, data_type=DataType.WEBSITE)
assert "this is a test" in result.lower()
mock_adapter.query.reset_mock()
mock_adapter.add.reset_mock()
tool = WebsiteSearchTool(adapter=mock_adapter)
result = tool._run(website=website, search_query=search_query)
mock_adapter.query.assert_called_once_with(
"what is crewai?", similarity_threshold=0.6, limit=5
)
mock_adapter.add.assert_called_once_with(website, data_type=DataType.WEBSITE)
assert "this is a test" in result.lower()
def test_youtube_video_search_tool(mock_adapter):
mock_adapter.query.return_value = "some video description"
youtube_video_url = "https://www.youtube.com/watch?v=sample-video-id"
search_query = "what is the video about?"
tool = YoutubeVideoSearchTool(
youtube_video_url=youtube_video_url,
adapter=mock_adapter,
)
result = tool._run(search_query=search_query)
assert "some video description" in result
mock_adapter.add.assert_called_once_with(
youtube_video_url, data_type=DataType.YOUTUBE_VIDEO
)
mock_adapter.query.assert_called_once_with(
search_query, similarity_threshold=0.6, limit=5
)
mock_adapter.query.reset_mock()
mock_adapter.add.reset_mock()
tool = YoutubeVideoSearchTool(adapter=mock_adapter)
result = tool._run(youtube_video_url=youtube_video_url, search_query=search_query)
assert "some video description" in result
mock_adapter.add.assert_called_once_with(
youtube_video_url, data_type=DataType.YOUTUBE_VIDEO
)
mock_adapter.query.assert_called_once_with(
search_query, similarity_threshold=0.6, limit=5
)
def test_youtube_channel_search_tool(mock_adapter):
mock_adapter.query.return_value = "channel description"
youtube_channel_handle = "@crewai"
search_query = "what is the channel about?"
tool = YoutubeChannelSearchTool(
youtube_channel_handle=youtube_channel_handle, adapter=mock_adapter
)
result = tool._run(search_query=search_query)
assert "channel description" in result
mock_adapter.add.assert_called_once_with(
youtube_channel_handle, data_type=DataType.YOUTUBE_CHANNEL
)
mock_adapter.query.assert_called_once_with(
search_query, similarity_threshold=0.6, limit=5
)
mock_adapter.query.reset_mock()
mock_adapter.add.reset_mock()
tool = YoutubeChannelSearchTool(adapter=mock_adapter)
result = tool._run(
youtube_channel_handle=youtube_channel_handle, search_query=search_query
)
assert "channel description" in result
mock_adapter.add.assert_called_once_with(
youtube_channel_handle, data_type=DataType.YOUTUBE_CHANNEL
)
mock_adapter.query.assert_called_once_with(
search_query, similarity_threshold=0.6, limit=5
)
def test_code_docs_search_tool(mock_adapter):
mock_adapter.query.return_value = "test documentation"
docs_url = "https://crewai.com/any-docs-url"
search_query = "test documentation"
tool = CodeDocsSearchTool(docs_url=docs_url, adapter=mock_adapter)
result = tool._run(search_query=search_query)
assert "test documentation" in result
mock_adapter.add.assert_called_once_with(docs_url, data_type=DataType.DOCS_SITE)
mock_adapter.query.assert_called_once_with(
search_query, similarity_threshold=0.6, limit=5
)
mock_adapter.query.reset_mock()
mock_adapter.add.reset_mock()
tool = CodeDocsSearchTool(adapter=mock_adapter)
result = tool._run(docs_url=docs_url, search_query=search_query)
assert "test documentation" in result
mock_adapter.add.assert_called_once_with(docs_url, data_type=DataType.DOCS_SITE)
mock_adapter.query.assert_called_once_with(
search_query, similarity_threshold=0.6, limit=5
)
def test_github_search_tool(mock_adapter):
mock_adapter.query.return_value = "repo description"
# ensure the provided repo and content types are used after initialization
tool = GithubSearchTool(
gh_token="test_token",
github_repo="crewai/crewai",
content_types=["code"],
adapter=mock_adapter,
)
result = tool._run(search_query="tell me about crewai repo")
assert "repo description" in result
mock_adapter.add.assert_called_once_with(
"https://github.com/crewai/crewai",
data_type=DataType.GITHUB,
metadata={"content_types": ["code"], "gh_token": "test_token"},
)
mock_adapter.query.assert_called_once_with(
"tell me about crewai repo", similarity_threshold=0.6, limit=5
)
# ensure content types provided by run call is used
mock_adapter.query.reset_mock()
mock_adapter.add.reset_mock()
tool = GithubSearchTool(gh_token="test_token", adapter=mock_adapter)
result = tool._run(
github_repo="crewai/crewai",
content_types=["code", "issue"],
search_query="tell me about crewai repo",
)
assert "repo description" in result
mock_adapter.add.assert_called_once_with(
"https://github.com/crewai/crewai",
data_type=DataType.GITHUB,
metadata={"content_types": ["code", "issue"], "gh_token": "test_token"},
)
mock_adapter.query.assert_called_once_with(
"tell me about crewai repo", similarity_threshold=0.6, limit=5
)
# ensure default content types are used if not provided
mock_adapter.query.reset_mock()
mock_adapter.add.reset_mock()
tool = GithubSearchTool(gh_token="test_token", adapter=mock_adapter)
result = tool._run(
github_repo="crewai/crewai",
search_query="tell me about crewai repo",
)
assert "repo description" in result
mock_adapter.add.assert_called_once_with(
"https://github.com/crewai/crewai",
data_type=DataType.GITHUB,
metadata={
"content_types": ["code", "repo", "pr", "issue"],
"gh_token": "test_token",
},
)
mock_adapter.query.assert_called_once_with(
"tell me about crewai repo", similarity_threshold=0.6, limit=5
)
# ensure nothing is added if no repo is provided
mock_adapter.query.reset_mock()
mock_adapter.add.reset_mock()
tool = GithubSearchTool(gh_token="test_token", adapter=mock_adapter)
result = tool._run(search_query="tell me about crewai repo")
mock_adapter.add.assert_not_called()
mock_adapter.query.assert_called_once_with(
"tell me about crewai repo", similarity_threshold=0.6, limit=5
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/test_search_tools.py",
"license": "MIT License",
"lines": 287,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/tool_collection_test.py | import unittest
from unittest.mock import MagicMock
from crewai.tools import BaseTool
from crewai_tools.adapters.tool_collection import ToolCollection
class TestToolCollection(unittest.TestCase):
def setUp(self):
self.search_tool = self._create_mock_tool(
"SearcH", "Search Tool"
) # Tool name is case sensitive
self.calculator_tool = self._create_mock_tool("calculator", "Calculator Tool")
self.translator_tool = self._create_mock_tool("translator", "Translator Tool")
self.tools = ToolCollection(
[self.search_tool, self.calculator_tool, self.translator_tool]
)
def _create_mock_tool(self, name, description):
mock_tool = MagicMock(spec=BaseTool)
mock_tool.name = name
mock_tool.description = description
return mock_tool
def test_initialization(self):
self.assertEqual(len(self.tools), 3)
self.assertEqual(self.tools[0].name, "SearcH")
self.assertEqual(self.tools[1].name, "calculator")
self.assertEqual(self.tools[2].name, "translator")
def test_empty_initialization(self):
empty_collection = ToolCollection()
self.assertEqual(len(empty_collection), 0)
self.assertEqual(empty_collection._name_cache, {})
def test_initialization_with_none(self):
collection = ToolCollection(None)
self.assertEqual(len(collection), 0)
self.assertEqual(collection._name_cache, {})
def test_access_by_index(self):
self.assertEqual(self.tools[0], self.search_tool)
self.assertEqual(self.tools[1], self.calculator_tool)
self.assertEqual(self.tools[2], self.translator_tool)
def test_access_by_name(self):
self.assertEqual(self.tools["search"], self.search_tool)
self.assertEqual(self.tools["calculator"], self.calculator_tool)
self.assertEqual(self.tools["translator"], self.translator_tool)
def test_key_error_for_invalid_name(self):
with self.assertRaises(KeyError):
_ = self.tools["nonexistent"]
def test_index_error_for_invalid_index(self):
with self.assertRaises(IndexError):
_ = self.tools[10]
def test_negative_index(self):
self.assertEqual(self.tools[-1], self.translator_tool)
self.assertEqual(self.tools[-2], self.calculator_tool)
self.assertEqual(self.tools[-3], self.search_tool)
def test_append(self):
new_tool = self._create_mock_tool("new", "New Tool")
self.tools.append(new_tool)
self.assertEqual(len(self.tools), 4)
self.assertEqual(self.tools[3], new_tool)
self.assertEqual(self.tools["new"], new_tool)
self.assertIn("new", self.tools._name_cache)
def test_append_duplicate_name(self):
duplicate_tool = self._create_mock_tool("search", "Duplicate Search Tool")
self.tools.append(duplicate_tool)
self.assertEqual(len(self.tools), 4)
self.assertEqual(self.tools["search"], duplicate_tool)
def test_extend(self):
new_tools = [
self._create_mock_tool("tool4", "Tool 4"),
self._create_mock_tool("tool5", "Tool 5"),
]
self.tools.extend(new_tools)
self.assertEqual(len(self.tools), 5)
self.assertEqual(self.tools["tool4"], new_tools[0])
self.assertEqual(self.tools["tool5"], new_tools[1])
self.assertIn("tool4", self.tools._name_cache)
self.assertIn("tool5", self.tools._name_cache)
def test_insert(self):
new_tool = self._create_mock_tool("inserted", "Inserted Tool")
self.tools.insert(1, new_tool)
self.assertEqual(len(self.tools), 4)
self.assertEqual(self.tools[1], new_tool)
self.assertEqual(self.tools["inserted"], new_tool)
self.assertIn("inserted", self.tools._name_cache)
def test_remove(self):
self.tools.remove(self.calculator_tool)
self.assertEqual(len(self.tools), 2)
with self.assertRaises(KeyError):
_ = self.tools["calculator"]
self.assertNotIn("calculator", self.tools._name_cache)
def test_remove_nonexistent_tool(self):
nonexistent_tool = self._create_mock_tool("nonexistent", "Nonexistent Tool")
with self.assertRaises(ValueError):
self.tools.remove(nonexistent_tool)
def test_pop(self):
popped = self.tools.pop(1)
self.assertEqual(popped, self.calculator_tool)
self.assertEqual(len(self.tools), 2)
with self.assertRaises(KeyError):
_ = self.tools["calculator"]
self.assertNotIn("calculator", self.tools._name_cache)
def test_pop_last(self):
popped = self.tools.pop()
self.assertEqual(popped, self.translator_tool)
self.assertEqual(len(self.tools), 2)
with self.assertRaises(KeyError):
_ = self.tools["translator"]
self.assertNotIn("translator", self.tools._name_cache)
def test_clear(self):
self.tools.clear()
self.assertEqual(len(self.tools), 0)
self.assertEqual(self.tools._name_cache, {})
with self.assertRaises(KeyError):
_ = self.tools["search"]
def test_iteration(self):
tools_list = list(self.tools)
self.assertEqual(
tools_list, [self.search_tool, self.calculator_tool, self.translator_tool]
)
def test_contains(self):
self.assertIn(self.search_tool, self.tools)
self.assertIn(self.calculator_tool, self.tools)
self.assertIn(self.translator_tool, self.tools)
nonexistent_tool = self._create_mock_tool("nonexistent", "Nonexistent Tool")
self.assertNotIn(nonexistent_tool, self.tools)
def test_slicing(self):
slice_result = self.tools[1:3]
self.assertEqual(len(slice_result), 2)
self.assertEqual(slice_result[0], self.calculator_tool)
self.assertEqual(slice_result[1], self.translator_tool)
self.assertIsInstance(slice_result, list)
self.assertNotIsInstance(slice_result, ToolCollection)
def test_getitem_with_tool_name_as_int(self):
numeric_name_tool = self._create_mock_tool("123", "Numeric Name Tool")
self.tools.append(numeric_name_tool)
self.assertEqual(self.tools["123"], numeric_name_tool)
with self.assertRaises(IndexError):
_ = self.tools[123]
def test_filter_by_names(self):
filtered = self.tools.filter_by_names(None)
self.assertIsInstance(filtered, ToolCollection)
self.assertEqual(len(filtered), 3)
filtered = self.tools.filter_by_names(["search", "translator"])
self.assertIsInstance(filtered, ToolCollection)
self.assertEqual(len(filtered), 2)
self.assertEqual(filtered[0], self.search_tool)
self.assertEqual(filtered[1], self.translator_tool)
self.assertEqual(filtered["search"], self.search_tool)
self.assertEqual(filtered["translator"], self.translator_tool)
filtered = self.tools.filter_by_names(["search", "nonexistent"])
self.assertIsInstance(filtered, ToolCollection)
self.assertEqual(len(filtered), 1)
self.assertEqual(filtered[0], self.search_tool)
filtered = self.tools.filter_by_names(["nonexistent1", "nonexistent2"])
self.assertIsInstance(filtered, ToolCollection)
self.assertEqual(len(filtered), 0)
filtered = self.tools.filter_by_names([])
self.assertIsInstance(filtered, ToolCollection)
self.assertEqual(len(filtered), 0)
def test_filter_where(self):
filtered = self.tools.filter_where(lambda tool: tool.name.startswith("S"))
self.assertIsInstance(filtered, ToolCollection)
self.assertEqual(len(filtered), 1)
self.assertEqual(filtered[0], self.search_tool)
self.assertEqual(filtered["search"], self.search_tool)
filtered = self.tools.filter_where(lambda tool: True)
self.assertIsInstance(filtered, ToolCollection)
self.assertEqual(len(filtered), 3)
self.assertEqual(filtered[0], self.search_tool)
self.assertEqual(filtered[1], self.calculator_tool)
self.assertEqual(filtered[2], self.translator_tool)
filtered = self.tools.filter_where(lambda tool: False)
self.assertIsInstance(filtered, ToolCollection)
self.assertEqual(len(filtered), 0)
filtered = self.tools.filter_where(lambda tool: len(tool.name) > 8)
self.assertIsInstance(filtered, ToolCollection)
self.assertEqual(len(filtered), 2)
self.assertEqual(filtered[0], self.calculator_tool)
self.assertEqual(filtered[1], self.translator_tool)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/tool_collection_test.py",
"license": "MIT License",
"lines": 175,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/src/crewai/agents/agent_adapters/base_converter_adapter.py | """Base converter adapter for structured output conversion."""
from __future__ import annotations
from abc import ABC, abstractmethod
import json
import re
from typing import TYPE_CHECKING, Any, Final, Literal
from crewai.utilities.pydantic_schema_utils import generate_model_description
if TYPE_CHECKING:
from crewai.agents.agent_adapters.base_agent_adapter import BaseAgentAdapter
from crewai.task import Task
_CODE_BLOCK_PATTERN: Final[re.Pattern[str]] = re.compile(
r"```(?:json)?\s*([\s\S]*?)```"
)
_JSON_OBJECT_PATTERN: Final[re.Pattern[str]] = re.compile(r"\{[\s\S]*}")
class BaseConverterAdapter(ABC):
"""Abstract base class for converter adapters in CrewAI.
Defines the common interface for converting agent outputs to structured formats.
All converter adapters must implement the methods defined here.
Attributes:
agent_adapter: The agent adapter instance.
_output_format: The expected output format (json, pydantic, or None).
_schema: The schema description for the expected output.
"""
def __init__(self, agent_adapter: BaseAgentAdapter) -> None:
"""Initialize the converter adapter.
Args:
agent_adapter: The agent adapter to configure for structured output.
"""
self.agent_adapter = agent_adapter
self._output_format: Literal["json", "pydantic"] | None = None
self._schema: dict[str, Any] | None = None
@abstractmethod
def configure_structured_output(self, task: Task) -> None:
"""Configure agents to return structured output.
Must support both JSON and Pydantic output formats.
Args:
task: The task requiring structured output.
"""
@abstractmethod
def enhance_system_prompt(self, base_prompt: str) -> str:
"""Enhance the system prompt with structured output instructions.
Args:
base_prompt: The original system prompt.
Returns:
Enhanced prompt with structured output guidance.
"""
def post_process_result(self, result: str) -> str:
"""Post-process the result to ensure proper string format.
Extracts valid JSON from text that may contain markdown or other formatting.
Args:
result: The raw result from agent execution.
Returns:
Processed result as a string.
"""
if not self._output_format:
return result
return self._extract_json_from_text(result)
@staticmethod
def _validate_json(text: str) -> str | None:
"""Validate if text is valid JSON and return it, or None if invalid.
Args:
text: The text to validate as JSON.
Returns:
The text if it's valid JSON, None otherwise.
"""
try:
json.loads(text)
return text
except json.JSONDecodeError:
return None
@staticmethod
def _extract_json_from_text(result: str) -> str:
"""Extract valid JSON from text that may contain markdown or other formatting.
This method provides a comprehensive approach to extracting JSON from LLM responses,
handling cases where JSON may be wrapped in Markdown code blocks or embedded in text.
Args:
result: The text potentially containing JSON.
Returns:
Extracted JSON string if found and valid, otherwise the original result.
"""
if not isinstance(result, str):
return str(result)
if valid := BaseConverterAdapter._validate_json(result):
return valid
for match in _CODE_BLOCK_PATTERN.finditer(result):
if valid := BaseConverterAdapter._validate_json(match.group(1).strip()):
return valid
for match in _JSON_OBJECT_PATTERN.finditer(result):
if valid := BaseConverterAdapter._validate_json(match.group()):
return valid
return result
@staticmethod
def _configure_format_from_task(
task: Task,
) -> tuple[Literal["json", "pydantic"] | None, dict[str, Any] | None]:
"""Determine output format and schema from task requirements.
This is a helper method that examines the task's output requirements
and returns the appropriate format type and schema description.
Args:
task: The task containing output format requirements.
Returns:
A tuple of (output_format, schema) where both may be None if no
structured output is required.
"""
if not (task.output_json or task.output_pydantic):
return None, None
if task.output_json:
return "json", generate_model_description(task.output_json)
if task.output_pydantic:
return "pydantic", generate_model_description(task.output_pydantic)
return None, None
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/agents/agent_adapters/base_converter_adapter.py",
"license": "MIT License",
"lines": 111,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
crewAIInc/crewAI:lib/crewai/src/crewai/agents/agent_adapters/openai_agents/structured_output_converter.py | """OpenAI structured output converter for CrewAI task integration.
This module contains the OpenAIConverterAdapter class that handles structured
output conversion for OpenAI agents, supporting JSON and Pydantic model formats.
"""
import json
from typing import Any
from crewai.agents.agent_adapters.base_converter_adapter import BaseConverterAdapter
from crewai.utilities.i18n import get_i18n
class OpenAIConverterAdapter(BaseConverterAdapter):
"""Adapter for handling structured output conversion in OpenAI agents.
This adapter enhances the OpenAI agent to handle structured output formats
and post-processes the results when needed.
Attributes:
_output_model: The Pydantic model for the output (OpenAI-specific).
"""
def __init__(self, agent_adapter: Any) -> None:
"""Initialize the converter adapter with a reference to the agent adapter.
Args:
agent_adapter: The OpenAI agent adapter instance.
"""
super().__init__(agent_adapter=agent_adapter)
self.agent_adapter: Any = agent_adapter
self._output_model: Any = None
def configure_structured_output(self, task: Any) -> None:
"""Configure the structured output for OpenAI agent based on task requirements.
Args:
task: The task containing output format requirements.
"""
self._output_format, self._schema = self._configure_format_from_task(task)
self._output_model = None
if task.output_json:
self.agent_adapter._openai_agent.output_type = task.output_json
self._output_model = task.output_json
elif task.output_pydantic:
self.agent_adapter._openai_agent.output_type = task.output_pydantic
self._output_model = task.output_pydantic
def enhance_system_prompt(self, base_prompt: str) -> str:
"""Enhance the base system prompt with structured output requirements if needed.
Args:
base_prompt: The original system prompt.
Returns:
Enhanced system prompt with output format instructions if needed.
"""
if not self._output_format:
return base_prompt
output_schema: str = (
get_i18n()
.slice("formatted_task_instructions")
.format(output_format=json.dumps(self._schema, indent=2))
)
return f"{base_prompt}\n\n{output_schema}"
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/agents/agent_adapters/openai_agents/structured_output_converter.py",
"license": "MIT License",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
crewAIInc/crewAI:lib/crewai/src/crewai/cli/organization/main.py | from httpx import HTTPStatusError
from rich.console import Console
from rich.table import Table
from crewai.cli.command import BaseCommand, PlusAPIMixin
from crewai.cli.config import Settings
console = Console()
class OrganizationCommand(BaseCommand, PlusAPIMixin):
def __init__(self) -> None:
BaseCommand.__init__(self)
PlusAPIMixin.__init__(self, telemetry=self._telemetry)
def list(self) -> None:
try:
response = self.plus_api_client.get_organizations()
response.raise_for_status()
orgs = response.json()
if not orgs:
console.print(
"You don't belong to any organizations yet.", style="yellow"
)
return
table = Table(title="Your Organizations")
table.add_column("Name", style="cyan")
table.add_column("ID", style="green")
for org in orgs:
table.add_row(org["name"], org["uuid"])
console.print(table)
except HTTPStatusError as e:
if e.response.status_code == 401:
console.print(
"You are not logged in to any organization. Use 'crewai login' to login.",
style="bold red",
)
return
console.print(
f"Failed to retrieve organization list: {e!s}", style="bold red"
)
raise SystemExit(1) from e
except Exception as e:
console.print(
f"Failed to retrieve organization list: {e!s}", style="bold red"
)
raise SystemExit(1) from e
def switch(self, org_id: str) -> None:
try:
response = self.plus_api_client.get_organizations()
response.raise_for_status()
orgs = response.json()
org = next((o for o in orgs if o["uuid"] == org_id), None)
if not org:
console.print(
f"Organization with id '{org_id}' not found.", style="bold red"
)
return
settings = Settings()
settings.org_name = org["name"]
settings.org_uuid = org["uuid"]
settings.dump()
console.print(
f"Successfully switched to {org['name']} ({org['uuid']})",
style="bold green",
)
except HTTPStatusError as e:
if e.response.status_code == 401:
console.print(
"You are not logged in to any organization. Use 'crewai login' to login.",
style="bold red",
)
return
console.print(
f"Failed to retrieve organization list: {e!s}", style="bold red"
)
raise SystemExit(1) from e
except Exception as e:
console.print(f"Failed to switch organization: {e!s}", style="bold red")
raise SystemExit(1) from e
def current(self) -> None:
settings = Settings()
if settings.org_uuid:
console.print(
f"Currently logged in to organization {settings.org_name} ({settings.org_uuid})",
style="bold green",
)
else:
console.print(
"You're not currently logged in to any organization.", style="yellow"
)
console.print(
"Use 'crewai org list' to see available organizations.", style="yellow"
)
console.print(
"Use 'crewai org switch <id>' to switch to an organization.",
style="yellow",
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/cli/organization/main.py",
"license": "MIT License",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/src/crewai/cli/templates/flow/main.py | #!/usr/bin/env python
from random import randint
from pydantic import BaseModel
from crewai.flow import Flow, listen, start
from {{folder_name}}.crews.poem_crew.poem_crew import PoemCrew
class PoemState(BaseModel):
sentence_count: int = 1
poem: str = ""
class PoemFlow(Flow[PoemState]):
@start()
def generate_sentence_count(self, crewai_trigger_payload: dict = None):
print("Generating sentence count")
# Use trigger payload if available
if crewai_trigger_payload:
# Example: use trigger data to influence sentence count
self.state.sentence_count = crewai_trigger_payload.get('sentence_count', randint(1, 5))
print(f"Using trigger payload: {crewai_trigger_payload}")
else:
self.state.sentence_count = randint(1, 5)
@listen(generate_sentence_count)
def generate_poem(self):
print("Generating poem")
result = (
PoemCrew()
.crew()
.kickoff(inputs={"sentence_count": self.state.sentence_count})
)
print("Poem generated", result.raw)
self.state.poem = result.raw
@listen(generate_poem)
def save_poem(self):
print("Saving poem")
with open("poem.txt", "w") as f:
f.write(self.state.poem)
def kickoff():
poem_flow = PoemFlow()
poem_flow.kickoff()
def plot():
poem_flow = PoemFlow()
poem_flow.plot()
def run_with_trigger():
"""
Run the flow with trigger payload.
"""
import json
import sys
# Get trigger payload from command line argument
if len(sys.argv) < 2:
raise Exception("No trigger payload provided. Please provide JSON payload as argument.")
try:
trigger_payload = json.loads(sys.argv[1])
except json.JSONDecodeError:
raise Exception("Invalid JSON payload provided as argument")
# Create flow and kickoff with trigger payload
# The @start() methods will automatically receive crewai_trigger_payload parameter
poem_flow = PoemFlow()
try:
result = poem_flow.kickoff({"crewai_trigger_payload": trigger_payload})
return result
except Exception as e:
raise Exception(f"An error occurred while running the flow with trigger: {e}")
if __name__ == "__main__":
kickoff()
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/cli/templates/flow/main.py",
"license": "MIT License",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/src/crewai/cli/triggers/main.py | import json
import subprocess
from typing import Any
from rich.console import Console
from rich.table import Table
from crewai.cli.command import BaseCommand, PlusAPIMixin
console = Console()
class TriggersCommand(BaseCommand, PlusAPIMixin):
"""
A class to handle trigger-related operations for CrewAI projects.
"""
def __init__(self):
BaseCommand.__init__(self)
PlusAPIMixin.__init__(self, telemetry=self._telemetry)
def list_triggers(self) -> None:
"""List all available triggers from integrations."""
try:
console.print("[bold blue]Fetching available triggers...[/bold blue]")
response = self.plus_api_client.get_triggers()
self._validate_response(response)
triggers_data = response.json()
self._display_triggers(triggers_data)
except Exception as e:
console.print(f"[bold red]Error fetching triggers: {e}[/bold red]")
raise SystemExit(1) from e
def execute_with_trigger(self, trigger_path: str) -> None:
"""Execute crew with trigger payload."""
try:
# Parse app_slug/trigger_slug
if "/" not in trigger_path:
console.print(
"[bold red]Error: Trigger must be in format 'app_slug/trigger_slug'[/bold red]"
)
raise SystemExit(1)
app_slug, trigger_slug = trigger_path.split("/", 1)
console.print(
f"[bold blue]Fetching trigger payload for {app_slug}/{trigger_slug}...[/bold blue]"
)
response = self.plus_api_client.get_trigger_payload(app_slug, trigger_slug)
if response.status_code == 404:
error_data = response.json()
console.print(
f"[bold red]Error: {error_data.get('error', 'Trigger not found')}[/bold red]"
)
raise SystemExit(1)
self._validate_response(response)
trigger_data = response.json()
self._display_trigger_info(trigger_data)
# Run crew with trigger payload
self._run_crew_with_payload(trigger_data.get("sample_payload", {}))
except Exception as e:
console.print(
f"[bold red]Error executing crew with trigger: {e}[/bold red]"
)
raise SystemExit(1) from e
def _display_triggers(self, triggers_data: dict[str, Any]) -> None:
"""Display triggers in a formatted table."""
apps = triggers_data.get("apps", [])
if not apps:
console.print("[yellow]No triggers found.[/yellow]")
return
for app in apps:
app_name = app.get("name", "Unknown App")
app_slug = app.get("slug", "unknown")
is_connected = app.get("is_connected", False)
connection_status = (
"[green]✓ Connected[/green]"
if is_connected
else "[red]✗ Not Connected[/red]"
)
console.print(
f"\n[bold cyan]{app_name}[/bold cyan] ({app_slug}) - {connection_status}"
)
console.print(
f"[dim]{app.get('description', 'No description available')}[/dim]"
)
triggers = app.get("triggers", [])
if triggers:
table = Table(show_header=True, header_style="bold magenta")
table.add_column("Trigger", style="cyan")
table.add_column("Name", style="green")
table.add_column("Description", style="dim")
for trigger in triggers:
trigger_path = f"{app_slug}/{trigger.get('slug', 'unknown')}"
table.add_row(
trigger_path,
trigger.get("name", "Unknown"),
trigger.get("description", "No description"),
)
console.print(table)
else:
console.print("[dim] No triggers available[/dim]")
def _display_trigger_info(self, trigger_data: dict[str, Any]) -> None:
"""Display trigger information before execution."""
sample_payload = trigger_data.get("sample_payload", {})
if sample_payload:
console.print("\n[bold yellow]Sample Payload:[/bold yellow]")
console.print(json.dumps(sample_payload, indent=2))
def _run_crew_with_payload(self, payload: dict[str, Any]) -> None:
"""Run the crew with the trigger payload using the run_with_trigger method."""
try:
subprocess.run( # noqa: S603
["uv", "run", "run_with_trigger", json.dumps(payload)], # noqa: S607
capture_output=False,
text=True,
check=True,
)
except Exception as e:
raise SystemExit(1) from e
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/cli/triggers/main.py",
"license": "MIT License",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/src/crewai/context.py | from collections.abc import Generator
from contextlib import contextmanager
import contextvars
import os
from typing import Any
_platform_integration_token: contextvars.ContextVar[str | None] = (
contextvars.ContextVar("platform_integration_token", default=None)
)
def set_platform_integration_token(integration_token: str) -> None:
"""Set the platform integration token in the current context.
Args:
integration_token: The integration token to set.
"""
_platform_integration_token.set(integration_token)
def get_platform_integration_token() -> str | None:
"""Get the platform integration token from the current context or environment.
Returns:
The integration token if set, otherwise None.
"""
token = _platform_integration_token.get()
if token is None:
token = os.getenv("CREWAI_PLATFORM_INTEGRATION_TOKEN")
return token
@contextmanager
def platform_context(integration_token: str) -> Generator[None, Any, None]:
"""Context manager to temporarily set the platform integration token.
Args:
integration_token: The integration token to set within the context.
"""
token = _platform_integration_token.set(integration_token)
try:
yield
finally:
_platform_integration_token.reset(token)
_current_task_id: contextvars.ContextVar[str | None] = contextvars.ContextVar(
"current_task_id", default=None
)
def set_current_task_id(task_id: str | None) -> contextvars.Token[str | None]:
"""Set the current task ID in the context. Returns a token for reset."""
return _current_task_id.set(task_id)
def reset_current_task_id(token: contextvars.Token[str | None]) -> None:
"""Reset the current task ID to its previous value."""
_current_task_id.reset(token)
def get_current_task_id() -> str | None:
"""Get the current task ID from the context."""
return _current_task_id.get()
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/context.py",
"license": "MIT License",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai/src/crewai/events/depends.py | """Dependency injection system for event handlers.
This module provides a FastAPI-style dependency system that allows event handlers
to declare dependencies on other handlers, ensuring proper execution order while
maintaining parallelism where possible.
"""
from collections.abc import Coroutine
from typing import Any, Generic, Protocol, TypeVar
from crewai.events.base_events import BaseEvent
EventT_co = TypeVar("EventT_co", bound=BaseEvent, contravariant=True)
class EventHandler(Protocol[EventT_co]):
"""Protocol for event handler functions.
Generic protocol that accepts any subclass of BaseEvent.
Handlers can be either synchronous (returning None) or asynchronous
(returning a coroutine).
"""
def __call__(
self, source: Any, event: EventT_co, /
) -> None | Coroutine[Any, Any, None]:
"""Event handler signature.
Args:
source: The object that emitted the event
event: The event instance (any BaseEvent subclass)
Returns:
None for sync handlers, Coroutine for async handlers
"""
...
T = TypeVar("T", bound=EventHandler[Any])
class Depends(Generic[T]):
"""Declares a dependency on another event handler.
Similar to FastAPI's Depends, this allows handlers to specify that they
depend on other handlers completing first. Handlers with dependencies will
execute after their dependencies, while independent handlers can run in parallel.
Args:
handler: The handler function that this handler depends on
Example:
>>> from crewai.events import Depends, crewai_event_bus
>>> from crewai.events import LLMCallStartedEvent
>>> @crewai_event_bus.on(LLMCallStartedEvent)
>>> def setup_context(source, event):
... return {"initialized": True}
>>>
>>> @crewai_event_bus.on(LLMCallStartedEvent, depends_on=Depends(setup_context))
>>> def process(source, event):
... # Runs after setup_context completes
... pass
"""
def __init__(self, handler: T) -> None:
"""Initialize a dependency on a handler.
Args:
handler: The handler function this depends on
"""
self.handler = handler
def __repr__(self) -> str:
"""Return a string representation of the dependency.
Returns:
A string showing the dependent handler name
"""
handler_name = getattr(self.handler, "__name__", repr(self.handler))
return f"Depends({handler_name})"
def __eq__(self, other: object) -> bool:
"""Check equality based on the handler reference.
Args:
other: Another Depends instance to compare
Returns:
True if both depend on the same handler, False otherwise
"""
if not isinstance(other, Depends):
return False
return self.handler is other.handler
def __hash__(self) -> int:
"""Return hash based on handler identity.
Since equality is based on identity (is), we hash the handler
object directly rather than its id for consistency.
Returns:
Hash of the handler object
"""
return id(self.handler)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/events/depends.py",
"license": "MIT License",
"lines": 77,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
crewAIInc/crewAI:lib/crewai/src/crewai/events/event_bus.py | """Event bus for managing and dispatching events in CrewAI.
This module provides a singleton event bus that allows registration and handling
of events throughout the CrewAI system, supporting both synchronous and asynchronous
event handlers with optional dependency management.
"""
import asyncio
import atexit
from collections.abc import Callable, Generator
from concurrent.futures import Future, ThreadPoolExecutor
from contextlib import contextmanager
import contextvars
import threading
from typing import Any, Final, ParamSpec, TypeVar
from typing_extensions import Self
from crewai.events.base_events import BaseEvent, get_next_emission_sequence
from crewai.events.depends import Depends
from crewai.events.event_context import (
SCOPE_ENDING_EVENTS,
SCOPE_STARTING_EVENTS,
VALID_EVENT_PAIRS,
get_current_parent_id,
get_enclosing_parent_id,
get_last_event_id,
get_triggering_event_id,
handle_empty_pop,
handle_mismatch,
pop_event_scope,
push_event_scope,
set_last_event_id,
)
from crewai.events.handler_graph import build_execution_plan
from crewai.events.types.event_bus_types import (
AsyncHandler,
AsyncHandlerSet,
ExecutionPlan,
Handler,
SyncHandler,
SyncHandlerSet,
)
from crewai.events.types.llm_events import LLMStreamChunkEvent
from crewai.events.utils.console_formatter import ConsoleFormatter
from crewai.events.utils.handlers import is_async_handler, is_call_handler_safe
from crewai.utilities.rw_lock import RWLock
P = ParamSpec("P")
R = TypeVar("R")
class CrewAIEventsBus:
"""Singleton event bus for handling events in CrewAI.
This class manages event registration and emission for both synchronous
and asynchronous event handlers, automatically scheduling async handlers
in a dedicated background event loop.
Synchronous handlers execute in a thread pool executor to ensure completion
before program exit. Asynchronous handlers execute in a dedicated event loop
running in a daemon thread, with graceful shutdown waiting for completion.
Attributes:
_instance: Singleton instance of the event bus
_instance_lock: Reentrant lock for singleton initialization (class-level)
_rwlock: Read-write lock for handler registration and access (instance-level)
_sync_handlers: Mapping of event types to registered synchronous handlers
_async_handlers: Mapping of event types to registered asynchronous handlers
_sync_executor: Thread pool executor for running synchronous handlers
_loop: Dedicated asyncio event loop for async handler execution
_loop_thread: Background daemon thread running the event loop
_console: Console formatter for error output
"""
_instance: Self | None = None
_instance_lock: threading.RLock = threading.RLock()
_rwlock: RWLock
_sync_handlers: dict[type[BaseEvent], SyncHandlerSet]
_async_handlers: dict[type[BaseEvent], AsyncHandlerSet]
_handler_dependencies: dict[type[BaseEvent], dict[Handler, list[Depends[Any]]]]
_execution_plan_cache: dict[type[BaseEvent], ExecutionPlan]
_console: ConsoleFormatter
_shutting_down: bool
_pending_futures: set[Future[Any]]
_futures_lock: threading.Lock
def __new__(cls) -> Self:
"""Create or return the singleton instance.
Returns:
The singleton CrewAIEventsBus instance
"""
if cls._instance is None:
with cls._instance_lock:
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance._initialize()
return cls._instance
def _initialize(self) -> None:
"""Initialize the event bus internal state.
Creates handler dictionaries and starts a dedicated background
event loop for async handler execution.
"""
self._shutting_down = False
self._rwlock = RWLock()
self._pending_futures: set[Future[Any]] = set()
self._futures_lock = threading.Lock()
self._sync_handlers: dict[type[BaseEvent], SyncHandlerSet] = {}
self._async_handlers: dict[type[BaseEvent], AsyncHandlerSet] = {}
self._handler_dependencies: dict[
type[BaseEvent], dict[Handler, list[Depends[Any]]]
] = {}
self._execution_plan_cache: dict[type[BaseEvent], ExecutionPlan] = {}
self._sync_executor = ThreadPoolExecutor(
max_workers=10,
thread_name_prefix="CrewAISyncHandler",
)
self._console = ConsoleFormatter()
self._loop = asyncio.new_event_loop()
self._loop_thread = threading.Thread(
target=self._run_loop,
name="CrewAIEventsLoop",
daemon=True,
)
self._loop_thread.start()
def _track_future(self, future: Future[Any]) -> Future[Any]:
"""Track a future and set up automatic cleanup when it completes.
Args:
future: The future to track
Returns:
The same future for chaining
"""
with self._futures_lock:
self._pending_futures.add(future)
def _cleanup(f: Future[Any]) -> None:
with self._futures_lock:
self._pending_futures.discard(f)
future.add_done_callback(_cleanup)
return future
def _run_loop(self) -> None:
"""Run the background async event loop."""
asyncio.set_event_loop(self._loop)
self._loop.run_forever()
def _register_handler(
self,
event_type: type[BaseEvent],
handler: Callable[..., Any],
dependencies: list[Depends[Any]] | None = None,
) -> None:
"""Register a handler for the given event type.
Args:
event_type: The event class to listen for
handler: The handler function to register
dependencies: Optional list of dependencies
"""
with self._rwlock.w_locked():
if is_async_handler(handler):
existing_async = self._async_handlers.get(event_type, frozenset())
self._async_handlers[event_type] = existing_async | {handler}
else:
existing_sync = self._sync_handlers.get(event_type, frozenset())
self._sync_handlers[event_type] = existing_sync | {handler}
if dependencies:
if event_type not in self._handler_dependencies:
self._handler_dependencies[event_type] = {}
self._handler_dependencies[event_type][handler] = dependencies
self._execution_plan_cache.pop(event_type, None)
def on(
self,
event_type: type[BaseEvent],
depends_on: Depends[Any] | list[Depends[Any]] | None = None,
) -> Callable[[Callable[P, R]], Callable[P, R]]:
"""Decorator to register an event handler for a specific event type.
Args:
event_type: The event class to listen for
depends_on: Optional dependency or list of dependencies. Handlers with
dependencies will execute after their dependencies complete.
Returns:
Decorator function that registers the handler
Example:
>>> from crewai.events import crewai_event_bus, Depends
>>> from crewai.events.types.llm_events import LLMCallStartedEvent
>>>
>>> @crewai_event_bus.on(LLMCallStartedEvent)
>>> def setup_context(source, event):
... print("Setting up context")
>>>
>>> @crewai_event_bus.on(LLMCallStartedEvent, depends_on=Depends(setup_context))
>>> def process(source, event):
... print("Processing (runs after setup_context)")
"""
def decorator(handler: Callable[P, R]) -> Callable[P, R]:
"""Register the handler and return it unchanged.
Args:
handler: Event handler function to register
Returns:
The same handler function unchanged
"""
deps = None
if depends_on is not None:
deps = [depends_on] if isinstance(depends_on, Depends) else depends_on
self._register_handler(event_type, handler, dependencies=deps)
return handler
return decorator
def off(
self,
event_type: type[BaseEvent],
handler: Callable[..., Any],
) -> None:
"""Unregister an event handler for a specific event type.
Args:
event_type: The event class to stop listening for
handler: The handler function to unregister
"""
with self._rwlock.w_locked():
if event_type in self._sync_handlers:
existing_sync = self._sync_handlers[event_type]
if handler in existing_sync:
self._sync_handlers[event_type] = existing_sync - {handler}
if not self._sync_handlers[event_type]:
del self._sync_handlers[event_type]
if event_type in self._async_handlers:
existing_async = self._async_handlers[event_type]
if handler in existing_async:
self._async_handlers[event_type] = existing_async - {handler}
if not self._async_handlers[event_type]:
del self._async_handlers[event_type]
if event_type in self._handler_dependencies:
self._handler_dependencies[event_type].pop(handler, None)
if not self._handler_dependencies[event_type]:
del self._handler_dependencies[event_type]
self._execution_plan_cache.pop(event_type, None)
def _call_handlers(
self,
source: Any,
event: BaseEvent,
handlers: SyncHandlerSet,
) -> None:
"""Call provided synchronous handlers.
Args:
source: The emitting object
event: The event instance
handlers: Frozenset of sync handlers to call
"""
errors: list[tuple[SyncHandler, Exception]] = [
(handler, error)
for handler in handlers
if (error := is_call_handler_safe(handler, source, event)) is not None
]
if errors:
for handler, error in errors:
self._console.print(
f"[CrewAIEventsBus] Sync handler error in {handler.__name__}: {error}"
)
async def _acall_handlers(
self,
source: Any,
event: BaseEvent,
handlers: AsyncHandlerSet,
) -> None:
"""Asynchronously call provided async handlers.
Args:
source: The object that emitted the event
event: The event instance
handlers: Frozenset of async handlers to call
"""
coros = [handler(source, event) for handler in handlers]
results = await asyncio.gather(*coros, return_exceptions=True)
for handler, result in zip(handlers, results, strict=False):
if isinstance(result, Exception):
self._console.print(
f"[CrewAIEventsBus] Async handler error in {getattr(handler, '__name__', handler)}: {result}"
)
async def _emit_with_dependencies(self, source: Any, event: BaseEvent) -> None:
"""Emit an event with dependency-aware handler execution.
Handlers are grouped into execution levels based on their dependencies.
Within each level, async handlers run concurrently while sync handlers
run sequentially (or in thread pool). Each level completes before the
next level starts.
Uses a cached execution plan for performance. The plan is built once
per event type and cached until handlers are modified.
Args:
source: The emitting object
event: The event instance to emit
"""
event_type = type(event)
with self._rwlock.r_locked():
if self._shutting_down:
return
cached_plan = self._execution_plan_cache.get(event_type)
if cached_plan is not None:
sync_handlers = self._sync_handlers.get(event_type, frozenset())
async_handlers = self._async_handlers.get(event_type, frozenset())
if cached_plan is None:
with self._rwlock.w_locked():
if self._shutting_down:
return
cached_plan = self._execution_plan_cache.get(event_type)
if cached_plan is None:
sync_handlers = self._sync_handlers.get(event_type, frozenset())
async_handlers = self._async_handlers.get(event_type, frozenset())
dependencies = dict(self._handler_dependencies.get(event_type, {}))
all_handlers = list(sync_handlers | async_handlers)
if not all_handlers:
return
cached_plan = build_execution_plan(all_handlers, dependencies)
self._execution_plan_cache[event_type] = cached_plan
else:
sync_handlers = self._sync_handlers.get(event_type, frozenset())
async_handlers = self._async_handlers.get(event_type, frozenset())
for level in cached_plan:
level_sync = frozenset(h for h in level if h in sync_handlers)
level_async = frozenset(h for h in level if h in async_handlers)
if level_sync:
if event_type is LLMStreamChunkEvent:
self._call_handlers(source, event, level_sync)
else:
ctx = contextvars.copy_context()
future = self._sync_executor.submit(
ctx.run, self._call_handlers, source, event, level_sync
)
await asyncio.get_running_loop().run_in_executor(
None, future.result
)
if level_async:
await self._acall_handlers(source, event, level_async)
def emit(self, source: Any, event: BaseEvent) -> Future[None] | None:
"""Emit an event to all registered handlers.
If handlers have dependencies (registered with depends_on), they execute
in dependency order. Otherwise, handlers execute as before (sync in thread
pool, async fire-and-forget).
Stream chunk events always execute synchronously to preserve ordering.
Args:
source: The emitting object
event: The event instance to emit
Returns:
Future that completes when handlers finish. Returns:
- Future for sync-only handlers (ThreadPoolExecutor future)
- Future for async handlers or mixed handlers (asyncio future)
- Future for dependency-managed handlers (asyncio future)
- None if no handlers or sync stream chunk events
Example:
>>> future = crewai_event_bus.emit(source, event)
>>> if future:
... await asyncio.wrap_future(future) # In async test
... # or future.result(timeout=5.0) in sync code
"""
event.previous_event_id = get_last_event_id()
event.triggered_by_event_id = get_triggering_event_id()
event.emission_sequence = get_next_emission_sequence()
if event.parent_event_id is None:
event_type_name = event.type
if event_type_name in SCOPE_ENDING_EVENTS:
event.parent_event_id = get_enclosing_parent_id()
popped = pop_event_scope()
if popped is None:
handle_empty_pop(event_type_name)
else:
popped_event_id, popped_type = popped
event.started_event_id = popped_event_id
expected_start = VALID_EVENT_PAIRS.get(event_type_name)
if expected_start and popped_type and popped_type != expected_start:
handle_mismatch(event_type_name, popped_type, expected_start)
elif event_type_name in SCOPE_STARTING_EVENTS:
event.parent_event_id = get_current_parent_id()
push_event_scope(event.event_id, event_type_name)
else:
event.parent_event_id = get_current_parent_id()
set_last_event_id(event.event_id)
event_type = type(event)
with self._rwlock.r_locked():
if self._shutting_down:
self._console.print(
"[CrewAIEventsBus] Warning: Attempted to emit event during shutdown. Ignoring."
)
return None
has_dependencies = event_type in self._handler_dependencies
sync_handlers = self._sync_handlers.get(event_type, frozenset())
async_handlers = self._async_handlers.get(event_type, frozenset())
if has_dependencies:
return self._track_future(
asyncio.run_coroutine_threadsafe(
self._emit_with_dependencies(source, event),
self._loop,
)
)
if sync_handlers:
if event_type is LLMStreamChunkEvent:
self._call_handlers(source, event, sync_handlers)
else:
ctx = contextvars.copy_context()
sync_future = self._sync_executor.submit(
ctx.run, self._call_handlers, source, event, sync_handlers
)
if not async_handlers:
return self._track_future(sync_future)
if async_handlers:
return self._track_future(
asyncio.run_coroutine_threadsafe(
self._acall_handlers(source, event, async_handlers),
self._loop,
)
)
return None
def flush(self, timeout: float | None = 30.0) -> bool:
"""Block until all pending event handlers complete.
This method waits for all futures from previously emitted events to
finish executing. Useful at the end of operations (like kickoff) to
ensure all event handlers have completed before returning.
Args:
timeout: Maximum time in seconds to wait for handlers to complete.
Defaults to 30 seconds. Pass None to wait indefinitely.
Returns:
True if all handlers completed, False if timeout occurred.
"""
with self._futures_lock:
futures_to_wait = list(self._pending_futures)
if not futures_to_wait:
return True
from concurrent.futures import wait as wait_futures
done, not_done = wait_futures(futures_to_wait, timeout=timeout)
# Check for exceptions in completed futures
errors = [
future.exception() for future in done if future.exception() is not None
]
for error in errors:
self._console.print(
f"[CrewAIEventsBus] Handler exception during flush: {error}"
)
return len(not_done) == 0
async def aemit(self, source: Any, event: BaseEvent) -> None:
"""Asynchronously emit an event to registered async handlers.
Only processes async handlers. Use in async contexts.
Args:
source: The object emitting the event
event: The event instance to emit
"""
event_type = type(event)
with self._rwlock.r_locked():
if self._shutting_down:
self._console.print(
"[CrewAIEventsBus] Warning: Attempted to emit event during shutdown. Ignoring."
)
return
async_handlers = self._async_handlers.get(event_type, frozenset())
if async_handlers:
await self._acall_handlers(source, event, async_handlers)
def register_handler(
self,
event_type: type[BaseEvent],
handler: SyncHandler | AsyncHandler,
) -> None:
"""Register an event handler for a specific event type.
Args:
event_type: The event class to listen for
handler: The handler function to register
"""
self._register_handler(event_type, handler)
def validate_dependencies(self) -> None:
"""Validate all registered handler dependencies.
Attempts to build execution plans for all event types with dependencies.
This detects circular dependencies and cross-event-type dependencies
before events are emitted.
Raises:
CircularDependencyError: If circular dependencies or unresolved
dependencies (e.g., cross-event-type) are detected
"""
with self._rwlock.r_locked():
for event_type in self._handler_dependencies:
sync_handlers = self._sync_handlers.get(event_type, frozenset())
async_handlers = self._async_handlers.get(event_type, frozenset())
dependencies = dict(self._handler_dependencies.get(event_type, {}))
all_handlers = list(sync_handlers | async_handlers)
if all_handlers and dependencies:
build_execution_plan(all_handlers, dependencies)
@contextmanager
def scoped_handlers(self) -> Generator[None, Any, None]:
"""Context manager for temporary event handling scope.
Useful for testing or temporary event handling. All handlers registered
within this context are cleared when the context exits.
Example:
>>> from crewai.events.event_bus import crewai_event_bus
>>> from crewai.events.event_types import CrewKickoffStartedEvent
>>> with crewai_event_bus.scoped_handlers():
...
... @crewai_event_bus.on(CrewKickoffStartedEvent)
... def temp_handler(source, event):
... print("Temporary handler")
...
... # Do stuff...
... # Handlers are cleared after the context
"""
with self._rwlock.r_locked():
saved_sync: dict[type[BaseEvent], frozenset[SyncHandler]] = dict(
self._sync_handlers
)
saved_async: dict[type[BaseEvent], frozenset[AsyncHandler]] = dict(
self._async_handlers
)
saved_deps: dict[type[BaseEvent], dict[Handler, list[Depends[Any]]]] = {
event_type: dict(handlers)
for event_type, handlers in self._handler_dependencies.items()
}
for event_type, sync_handlers in saved_sync.items():
for sync_handler in sync_handlers:
self.off(event_type, sync_handler)
for event_type, async_handlers in saved_async.items():
for async_handler in async_handlers:
self.off(event_type, async_handler)
try:
yield
finally:
with self._rwlock.r_locked():
current_sync = dict(self._sync_handlers)
current_async = dict(self._async_handlers)
for event_type, cur_sync in current_sync.items():
orig_sync = saved_sync.get(event_type, frozenset())
for new_handler in cur_sync - orig_sync:
self.off(event_type, new_handler)
for event_type, cur_async in current_async.items():
orig_async = saved_async.get(event_type, frozenset())
for new_async_handler in cur_async - orig_async:
self.off(event_type, new_async_handler)
for event_type, sync_handlers in saved_sync.items():
for sync_handler in sync_handlers:
deps = saved_deps.get(event_type, {}).get(sync_handler)
self._register_handler(event_type, sync_handler, deps)
for event_type, async_handlers in saved_async.items():
for async_handler in async_handlers:
deps = saved_deps.get(event_type, {}).get(async_handler)
self._register_handler(event_type, async_handler, deps)
def shutdown(self, wait: bool = True) -> None:
"""Gracefully shutdown the event loop and wait for all tasks to finish.
Args:
wait: If True, wait for all pending tasks to complete before stopping.
If False, cancel all pending tasks immediately.
"""
if wait:
self.flush()
with self._rwlock.w_locked():
self._shutting_down = True
loop = getattr(self, "_loop", None)
if loop is None or loop.is_closed():
return
if wait:
async def _wait_for_all_tasks() -> None:
tasks = {
t
for t in asyncio.all_tasks(loop)
if t is not asyncio.current_task()
}
if tasks:
await asyncio.gather(*tasks, return_exceptions=True)
future = asyncio.run_coroutine_threadsafe(_wait_for_all_tasks(), loop)
try:
future.result()
except Exception as e:
self._console.print(f"[CrewAIEventsBus] Error waiting for tasks: {e}")
else:
def _cancel_tasks() -> None:
for task in asyncio.all_tasks(loop):
if task is not asyncio.current_task():
task.cancel()
loop.call_soon_threadsafe(_cancel_tasks)
loop.call_soon_threadsafe(loop.stop)
self._loop_thread.join()
loop.close()
self._sync_executor.shutdown(wait=wait)
with self._rwlock.w_locked():
self._sync_handlers.clear()
self._async_handlers.clear()
self._execution_plan_cache.clear()
crewai_event_bus: Final[CrewAIEventsBus] = CrewAIEventsBus()
atexit.register(crewai_event_bus.shutdown)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/events/event_bus.py",
"license": "MIT License",
"lines": 562,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/src/crewai/events/handler_graph.py | """Dependency graph resolution for event handlers.
This module resolves handler dependencies into execution levels, ensuring
handlers execute in correct order while maximizing parallelism.
"""
from collections import defaultdict, deque
from collections.abc import Sequence
from crewai.events.depends import Depends
from crewai.events.types.event_bus_types import ExecutionPlan, Handler
class CircularDependencyError(Exception):
"""Exception raised when circular dependencies are detected in event handlers.
Attributes:
handlers: The handlers involved in the circular dependency
"""
def __init__(self, handlers: list[Handler]) -> None:
"""Initialize the circular dependency error.
Args:
handlers: The handlers involved in the circular dependency
"""
handler_names = ", ".join(getattr(h, "__name__", repr(h)) for h in handlers[:5])
message = f"Circular dependency detected in event handlers: {handler_names}"
super().__init__(message)
self.handlers = handlers
class HandlerGraph:
"""Resolves handler dependencies into parallel execution levels.
Handlers are organized into levels where:
- Level 0: Handlers with no dependencies (can run first)
- Level N: Handlers that depend on handlers in levels 0...N-1
Handlers within the same level can execute in parallel.
Attributes:
levels: List of handler sets, where each level can execute in parallel
"""
def __init__(
self,
handlers: dict[Handler, list[Depends]],
) -> None:
"""Initialize the dependency graph.
Args:
handlers: Mapping of handler -> list of `crewai.events.depends.Depends` objects
"""
self.handlers = handlers
self.levels: ExecutionPlan = []
self._resolve()
def _resolve(self) -> None:
"""Resolve dependencies into execution levels using topological sort."""
dependents: dict[Handler, set[Handler]] = defaultdict(set)
in_degree: dict[Handler, int] = {}
for handler in self.handlers:
in_degree[handler] = 0
for handler, deps in self.handlers.items():
in_degree[handler] = len(deps)
for dep in deps:
dependents[dep.handler].add(handler)
queue: deque[Handler] = deque([h for h, deg in in_degree.items() if deg == 0])
while queue:
current_level: set[Handler] = set()
for _ in range(len(queue)):
handler = queue.popleft()
current_level.add(handler)
for dependent in dependents[handler]:
in_degree[dependent] -= 1
if in_degree[dependent] == 0:
queue.append(dependent)
if current_level:
self.levels.append(current_level)
remaining = [h for h, deg in in_degree.items() if deg > 0]
if remaining:
raise CircularDependencyError(remaining)
def get_execution_plan(self) -> ExecutionPlan:
"""Get the ordered execution plan.
Returns:
List of handler sets, where each set represents handlers that can
execute in parallel. Sets are ordered such that dependencies are
satisfied.
"""
return self.levels
def build_execution_plan(
handlers: Sequence[Handler],
dependencies: dict[Handler, list[Depends]],
) -> ExecutionPlan:
"""Build an execution plan from handlers and their dependencies.
Args:
handlers: All handlers for an event type
dependencies: Mapping of handler -> list of dependencies
Returns:
Execution plan as list of levels, where each level is a set of
handlers that can execute in parallel
Raises:
CircularDependencyError: If circular dependencies are detected
"""
handler_dict: dict[Handler, list[Depends]] = {
h: dependencies.get(h, []) for h in handlers
}
graph = HandlerGraph(handler_dict)
return graph.get_execution_plan()
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/events/handler_graph.py",
"license": "MIT License",
"lines": 94,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
crewAIInc/crewAI:lib/crewai/src/crewai/events/types/event_bus_types.py | """Type definitions for event handlers."""
from collections.abc import Callable, Coroutine
from typing import Any, TypeAlias
from crewai.events.base_events import BaseEvent
SyncHandler: TypeAlias = Callable[[Any, BaseEvent], None]
AsyncHandler: TypeAlias = Callable[[Any, BaseEvent], Coroutine[Any, Any, None]]
SyncHandlerSet: TypeAlias = frozenset[SyncHandler]
AsyncHandlerSet: TypeAlias = frozenset[AsyncHandler]
Handler: TypeAlias = Callable[[Any, BaseEvent], Any]
ExecutionPlan: TypeAlias = list[set[Handler]]
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/events/types/event_bus_types.py",
"license": "MIT License",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai/src/crewai/events/utils/handlers.py | """Handler utility functions for event processing."""
import functools
import inspect
from typing import Any
from typing_extensions import TypeIs
from crewai.events.base_events import BaseEvent
from crewai.events.types.event_bus_types import AsyncHandler, SyncHandler
def is_async_handler(
handler: Any,
) -> TypeIs[AsyncHandler]:
"""Type guard to check if handler is an async handler.
Args:
handler: The handler to check
Returns:
True if handler is an async coroutine function
"""
try:
if inspect.iscoroutinefunction(handler) or (
callable(handler) and inspect.iscoroutinefunction(handler.__call__)
):
return True
except AttributeError:
return False
if isinstance(handler, functools.partial) and inspect.iscoroutinefunction(
handler.func
):
return True
return False
def is_call_handler_safe(
handler: SyncHandler,
source: Any,
event: BaseEvent,
) -> Exception | None:
"""Safely call a single handler and return any exception.
Args:
handler: The handler function to call
source: The object that emitted the event
event: The event instance
Returns:
Exception if handler raised one, None otherwise
"""
try:
handler(source, event)
return None
except Exception as e:
return e
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/events/utils/handlers.py",
"license": "MIT License",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.