Spaces:
Sleeping
Sleeping
File size: 4,709 Bytes
ac6d714 9b5b26a c19d193 732dcaf 6aae614 fcbe53d d3a3e6a 732dcaf 88feb1b ba6f90b 9b5b26a 1bcd23e 732dcaf ac6d714 1bcd23e a789bd8 cd10abc ac6d714 cd10abc fcbe53d c7dcb0c 9b5b26a a789bd8 ac6d714 d3a3e6a 9b5b26a 60dc65e ac6d714 d3a3e6a ac6d714 d3a3e6a ac6d714 d3a3e6a ac6d714 d3a3e6a ac6d714 95b536e d3a3e6a ac6d714 d3a3e6a 9b5b26a ac6d714 8c01ffb ba6f90b ac6d714 ba6f90b ac6d714 732dcaf ba6f90b 95b536e ba6f90b 95b536e ba6f90b 95b536e ba6f90b 95b536e ac6d714 95b536e ba6f90b ac6d714 95b536e ac6d714 ba6f90b 95b536e ba6f90b 95b536e 88feb1b 863a649 ae7a494 e121372 bf6d34c 6ded866 fe328e0 13d500a 8c01ffb 9b5b26a fcbe53d 8c01ffb 861422e 9b5b26a 8c01ffb 8fe992b 11fb892 8c01ffb 861422e 8fe992b 9b5b26a b7aa966 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 | from smolagents import CodeAgent, HfApiModel,load_tool,tool
import requests
import yaml
import json
from tools.final_answer import FinalAnswerTool
from tools.web_search import DuckDuckGoSearchTool
from bs4 import BeautifulSoup
from duckduckgo_search import DDGS
import re
from typing import List, Dict, Any
from Gradio_UI import GradioUI
@tool
def get_healthy_cheat_meal(diet_type: str) -> List[Dict[str, Any]]:
"""Search for healthy sweet dish alternatives based on specific diet types.
Args:
diet_type: The specific diet type (e.g., Keto, Paleo, Vegan).
Returns:
A list of dictionaries containing 'recipe_name', 'url', 'ingredients', and 'instructions'.
"""
query = f"healthy {diet_type} sweet dish recipe"
recipes = []
try:
ddgs = DDGS()
search_results = ddgs.text(query, region="us-en", safesearch="moderate", max_results=10)
if not search_results:
return [{"error": "No recipes found"}]
for result in search_results:
title = result.get('title', '').split(' - ')[0].strip()
url = result.get('href', '')
if not url:
continue # Skip if no URL
# Fetch recipe details
details = visit_webpage(url)
# Skip recipes with errors
if "error" in details:
continue
recipes.append({
"recipe_name": title,
"url": url,
"ingredients": details.get("ingredients", []),
"instructions": details.get("instructions", [])
})
return recipes if recipes else [{"error": "No valid recipes found"}]
except Exception as e:
return [{"error": f"An error occurred: {str(e)}"}]
@tool
def visit_webpage(url: str) -> Dict[str, Any]:
"""Visits a webpage and extracts ingredients and instructions.
Args:
url: The recipe URL.
Returns:
A dictionary containing 'ingredients' and 'instructions', or an error message if the URL is invalid.
"""
try:
# Validate URL format before making a request
if not url.startswith("http"):
return {"error": f"Invalid URL format: {url}"}
response = requests.get(url, timeout=10, headers={
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
})
response.raise_for_status() # Raise an error for 404, 403, etc.
soup = BeautifulSoup(response.content, 'html.parser')
# Extract ingredients
ingredients = [tag.get_text(strip=True) for tag in soup.select('ul li, .ingredient')]
if not ingredients:
ingredients = [tag.get_text(strip=True) for tag in soup.find_all('li') if "ingredient" in tag.get_text(strip=True).lower()]
# Extract instructions
instructions = [tag.get_text(strip=True) for tag in soup.select('ol li, .instruction, .step')]
if not instructions:
instructions = [tag.get_text(strip=True) for tag in soup.find_all('p') if "step" in tag.get_text(strip=True).lower()]
return {
"ingredients": ingredients if ingredients else [],
"instructions": instructions if instructions else []
}
except requests.exceptions.HTTPError as http_err:
return {"error": f"HTTP error {response.status_code}: {http_err}"}
except requests.exceptions.RequestException as req_err:
return {"error": f"Request failed: {req_err}"}
except Exception as e:
return {"error": f"Failed to scrape {url}: {str(e)}"}
# Initialize tools
final_answer = FinalAnswerTool()
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
model = HfApiModel(
max_tokens=2096,
temperature=0.5,
#model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
model_id = 'https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud',
custom_role_conversions=None,
)
# Import tool from Hub
#image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
agent = CodeAgent(
model=model,
tools=[get_healthy_cheat_meal, visit_webpage, final_answer], ## add your tools here (don't remove final answer)
max_steps=6,
verbosity_level=1,
grammar=None,
planning_interval=None,
name=None,
description=None,
prompt_templates=prompt_templates
)
GradioUI(agent).launch() |