Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -8,87 +8,99 @@ from tools.final_answer import FinalAnswerTool
|
|
| 8 |
from Gradio_UI import GradioUI
|
| 9 |
import os
|
| 10 |
|
| 11 |
-
# -----
|
|
|
|
|
|
|
| 12 |
@tool
|
| 13 |
def my_custom_tool(arg1: str, arg2: int) -> str:
|
| 14 |
"""
|
| 15 |
-
|
| 16 |
-
|
| 17 |
Args:
|
| 18 |
arg1: The first argument
|
| 19 |
arg2: The second argument
|
|
|
|
|
|
|
|
|
|
| 20 |
"""
|
| 21 |
return f"Your inputs were arg1={arg1} and arg2={arg2}"
|
| 22 |
|
| 23 |
@tool
|
| 24 |
def get_current_time_in_timezone(timezone: str) -> str:
|
| 25 |
"""
|
| 26 |
-
Returns current local time in a
|
| 27 |
-
|
| 28 |
-
Args:
|
| 29 |
-
timezone: A valid timezone string.
|
| 30 |
"""
|
| 31 |
try:
|
| 32 |
tz = pytz.timezone(timezone)
|
| 33 |
-
|
| 34 |
-
return f"The current local time in {timezone} is {
|
| 35 |
except Exception as e:
|
| 36 |
-
return f"Error
|
| 37 |
|
| 38 |
-
# -----
|
|
|
|
|
|
|
| 39 |
final_answer = FinalAnswerTool()
|
| 40 |
|
| 41 |
-
# -----
|
|
|
|
|
|
|
| 42 |
model = HfApiModel(
|
| 43 |
model_id="Qwen/Qwen2.5-Coder-32B-Instruct",
|
| 44 |
max_tokens=2096,
|
| 45 |
temperature=0.5
|
| 46 |
)
|
| 47 |
|
| 48 |
-
# -----
|
|
|
|
|
|
|
| 49 |
@tool
|
| 50 |
def generate_image(prompt: str) -> AgentImage:
|
| 51 |
"""
|
| 52 |
-
Generates a
|
| 53 |
-
|
| 54 |
-
Args:
|
| 55 |
-
prompt: text prompt to describe the image.
|
| 56 |
-
|
| 57 |
-
Returns:
|
| 58 |
-
AgentImage that Gradio can render.
|
| 59 |
"""
|
| 60 |
hf_token = os.environ.get("HF_TOKEN")
|
| 61 |
if not hf_token:
|
| 62 |
-
raise ValueError("HF_TOKEN not set in Space
|
| 63 |
|
| 64 |
-
#
|
| 65 |
-
api_url = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-
|
| 66 |
headers = {"Authorization": f"Bearer {hf_token}"}
|
|
|
|
|
|
|
| 67 |
payload = {"inputs": prompt}
|
| 68 |
|
| 69 |
-
|
| 70 |
-
|
|
|
|
|
|
|
|
|
|
| 71 |
|
| 72 |
-
image_bytes = resp.content
|
| 73 |
return AgentImage.from_bytes(image_bytes)
|
| 74 |
|
| 75 |
-
# -----
|
|
|
|
|
|
|
| 76 |
with open("prompts.yaml", "r") as f:
|
| 77 |
prompt_templates = yaml.safe_load(f)
|
| 78 |
|
| 79 |
-
# -----
|
|
|
|
|
|
|
| 80 |
agent = CodeAgent(
|
| 81 |
model=model,
|
| 82 |
tools=[
|
| 83 |
final_answer,
|
| 84 |
generate_image,
|
| 85 |
get_current_time_in_timezone,
|
| 86 |
-
my_custom_tool
|
| 87 |
],
|
| 88 |
max_steps=6,
|
| 89 |
verbosity_level=1,
|
| 90 |
-
prompt_templates=prompt_templates
|
| 91 |
)
|
| 92 |
|
| 93 |
-
# -----
|
|
|
|
|
|
|
| 94 |
GradioUI(agent).launch()
|
|
|
|
| 8 |
from Gradio_UI import GradioUI
|
| 9 |
import os
|
| 10 |
|
| 11 |
+
# ------------------------
|
| 12 |
+
# Custom tools
|
| 13 |
+
# ------------------------
|
| 14 |
@tool
|
| 15 |
def my_custom_tool(arg1: str, arg2: int) -> str:
|
| 16 |
"""
|
| 17 |
+
A demo custom tool.
|
| 18 |
+
|
| 19 |
Args:
|
| 20 |
arg1: The first argument
|
| 21 |
arg2: The second argument
|
| 22 |
+
|
| 23 |
+
Returns:
|
| 24 |
+
A string summarizing the inputs.
|
| 25 |
"""
|
| 26 |
return f"Your inputs were arg1={arg1} and arg2={arg2}"
|
| 27 |
|
| 28 |
@tool
|
| 29 |
def get_current_time_in_timezone(timezone: str) -> str:
|
| 30 |
"""
|
| 31 |
+
Returns the current local time in a specified timezone.
|
|
|
|
|
|
|
|
|
|
| 32 |
"""
|
| 33 |
try:
|
| 34 |
tz = pytz.timezone(timezone)
|
| 35 |
+
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
|
| 36 |
+
return f"The current local time in {timezone} is {local_time}."
|
| 37 |
except Exception as e:
|
| 38 |
+
return f"Error fetching time for '{timezone}': {str(e)}"
|
| 39 |
|
| 40 |
+
# ------------------------
|
| 41 |
+
# Final answer tool
|
| 42 |
+
# ------------------------
|
| 43 |
final_answer = FinalAnswerTool()
|
| 44 |
|
| 45 |
+
# ------------------------
|
| 46 |
+
# Agent model setup
|
| 47 |
+
# ------------------------
|
| 48 |
model = HfApiModel(
|
| 49 |
model_id="Qwen/Qwen2.5-Coder-32B-Instruct",
|
| 50 |
max_tokens=2096,
|
| 51 |
temperature=0.5
|
| 52 |
)
|
| 53 |
|
| 54 |
+
# ------------------------
|
| 55 |
+
# Image generation using HF Inference API
|
| 56 |
+
# ------------------------
|
| 57 |
@tool
|
| 58 |
def generate_image(prompt: str) -> AgentImage:
|
| 59 |
"""
|
| 60 |
+
Generates an image from a text prompt using SDXL (Stable Diffusion XL).
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
"""
|
| 62 |
hf_token = os.environ.get("HF_TOKEN")
|
| 63 |
if not hf_token:
|
| 64 |
+
raise ValueError("HF_TOKEN not set in Space secrets")
|
| 65 |
|
| 66 |
+
# SDXL model
|
| 67 |
+
api_url = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
|
| 68 |
headers = {"Authorization": f"Bearer {hf_token}"}
|
| 69 |
+
|
| 70 |
+
# Request body for image generation
|
| 71 |
payload = {"inputs": prompt}
|
| 72 |
|
| 73 |
+
response = requests.post(api_url, headers=headers, json=payload)
|
| 74 |
+
response.raise_for_status()
|
| 75 |
+
|
| 76 |
+
# The response will be raw image bytes (PNG)
|
| 77 |
+
image_bytes = response.content
|
| 78 |
|
|
|
|
| 79 |
return AgentImage.from_bytes(image_bytes)
|
| 80 |
|
| 81 |
+
# ------------------------
|
| 82 |
+
# Load prompt templates
|
| 83 |
+
# ------------------------
|
| 84 |
with open("prompts.yaml", "r") as f:
|
| 85 |
prompt_templates = yaml.safe_load(f)
|
| 86 |
|
| 87 |
+
# ------------------------
|
| 88 |
+
# Create the agent
|
| 89 |
+
# ------------------------
|
| 90 |
agent = CodeAgent(
|
| 91 |
model=model,
|
| 92 |
tools=[
|
| 93 |
final_answer,
|
| 94 |
generate_image,
|
| 95 |
get_current_time_in_timezone,
|
| 96 |
+
my_custom_tool
|
| 97 |
],
|
| 98 |
max_steps=6,
|
| 99 |
verbosity_level=1,
|
| 100 |
+
prompt_templates=prompt_templates
|
| 101 |
)
|
| 102 |
|
| 103 |
+
# ------------------------
|
| 104 |
+
# Start the UI
|
| 105 |
+
# ------------------------
|
| 106 |
GradioUI(agent).launch()
|