Final_Assignment / tools.py
at1300's picture
final update
1dc32fe verified
import re
import requests
import os
from markdownify import markdownify
from requests.exceptions import RequestException
from smolagents import tool
from huggingface_hub import InferenceClient
from openai import OpenAI
from urllib.parse import urlparse
from pathlib import Path
@tool
def visit_webpage(url: str) -> str:
"""Visits a webpage at the given URL and returns its content as a markdown string.
Args:
url: The URL of the webpage to visit.
Returns:
The content of the webpage converted to Markdown, or an error message if the request fails.
"""
try:
# Send a GET request to the URL
response = requests.get(url)
response.raise_for_status() # Raise an exception for bad status codes
# Convert the HTML content to Markdown
markdown_content = markdownify(response.text).strip()
# Remove multiple line breaks
markdown_content = re.sub(r"\n{3,}", "\n\n", markdown_content)
return markdown_content
except RequestException as e:
return f"Error fetching the webpage: {str(e)}"
except Exception as e:
return f"An unexpected error occurred: {str(e)}"
@tool
def analyze_image(url: str, prompt: str) -> str:
"""Uses a vision model to identify features in an describe an image.
Args:
url: The URL of the image to analyze
prompt: Specific questions or things you are looking for in the image. Can also specify how to format a response. The model will return a general description if this is blank.
Returns:
Answers to your question(s) or else a textual description of the image
"""
model_id = "Qwen/Qwen2.5-VL-32B-Instruct"
client = InferenceClient()
#image_url = "https://agents-course-unit4-scoring.hf.space/files/cca530fc-4052-43b2-b130-b30968d8aa44"
if prompt is None:
prompt = "Describe the content of the image in detail."
model_prompt = [
{
"role": "user",
"content": [
{"type": "image_url", "image_url": {"url": url}},
{"type": "text", "text": prompt}
]
}
]
response = client.chat_completion(
model=model_id,
messages=model_prompt,
max_tokens=1000,
temperature=0.7
)
description = response.choices[0].message.content
return description
def download_file(url: str, save_path: str) -> str:
"""Download a file from a URL and save it locally."""
try:
response = requests.get(url, stream=True)
response.raise_for_status()
with open(save_path, "wb") as f:
for chunk in response.iter_content(chunk_size=8192):
f.write(chunk)
return save_path
except requests.RequestException as e:
raise Exception(f"Failed to download file from {url}: {e}")
@tool
def transcribe_audio(file_path_or_url: str) -> str:
"""
Transcribe an MP3 file using OpenAI Whisper API.
Args:
file_path_or_url: The local file path or URL of the audio file to transcribe
Returns:
Transcription text as a string.
"""
client = OpenAI(
api_key=os.environ['OPENAI_API_KEY'],
)
try:
# Check if input is a URL
if file_path_or_url.startswith(("http://", "https://")):
# Extract filename from URL
parsed_url = urlparse(file_path_or_url)
filename = os.path.basename(parsed_url.path) or "downloaded_audio.mp3"
temp_file_path = os.path.join(os.getcwd(), filename)
# Download the file
print(f"Downloading file from {file_path_or_url}...")
file_path = download_file(file_path_or_url, temp_file_path)
else:
# Use local file path
file_path = file_path_or_url
if not os.path.exists(file_path):
raise FileNotFoundError(f"Local file {file_path} does not exist.")
# Check file size (Whisper API limit: 25 MB)
file_size = os.path.getsize(file_path) / (1024 * 1024) # Size in MB
if file_size > 25:
raise ValueError(f"File size {file_size:.2f} MB exceeds Whisper API limit of 25 MB.")
# Open and send the file to Whisper API
print(f"Transcribing {file_path}...")
with open(file_path, "rb") as audio_file:
transcription = client.audio.transcriptions.create(
model="whisper-1",
file=audio_file,
response_format="text"
)
# If file was downloaded, clean up
if file_path_or_url.startswith(("http://", "https://")):
os.remove(file_path)
print(f"Cleaned up temporary file: {file_path}")
return transcription
except Exception as e:
raise Exception(f"Error during transcription: {e}")