Spaces:
Sleeping
Sleeping
File size: 2,347 Bytes
656e85a fb8f05d 656e85a fb8f05d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
import re
import requests
from markdownify import markdownify
from requests.exceptions import RequestException
from smolagents import tool
from huggingface_hub import InferenceClient
@tool
def visit_webpage(url: str) -> str:
"""Visits a webpage at the given URL and returns its content as a markdown string.
Args:
url: The URL of the webpage to visit.
Returns:
The content of the webpage converted to Markdown, or an error message if the request fails.
"""
try:
# Send a GET request to the URL
response = requests.get(url)
response.raise_for_status() # Raise an exception for bad status codes
# Convert the HTML content to Markdown
markdown_content = markdownify(response.text).strip()
# Remove multiple line breaks
markdown_content = re.sub(r"\n{3,}", "\n\n", markdown_content)
return markdown_content
except RequestException as e:
return f"Error fetching the webpage: {str(e)}"
except Exception as e:
return f"An unexpected error occurred: {str(e)}"
@tool
def analyze_image(url: str, prompt: str) -> str:
"""Uses a vision model to identify features in an describe an image.
Args:
url: The URL of the image to analyze
prompt: Specific questions or things you are looking for in the image. Can also specify how to format a response. The model will return a general description if this is blank.
Retruns:
Answers to your question(s) or else a textual description of the image
"""
model_id = "Qwen/Qwen2.5-VL-32B-Instruct"
client = InferenceClient()
image_url = "https://agents-course-unit4-scoring.hf.space/files/cca530fc-4052-43b2-b130-b30968d8aa44"
if prompt is None:
prompt = "Describe the content of the image in detail."
model_prompt = [
{
"role": "user",
"content": [
{"type": "image_url", "image_url": {"url": image_url}},
{"type": "text", "text": prompt}
]
}
]
response = client.chat_completion(
model=model_id,
messages=model_prompt,
max_tokens=1000,
temperature=0.7
)
description = response.choices[0].message.content
|