File size: 1,840 Bytes
578ad50
9b5b26a
578ad50
9b5b26a
c19d193
578ad50
8fe992b
578ad50
9b5b26a
 
578ad50
 
 
 
 
 
 
 
 
 
9b5b26a
 
 
578ad50
9b5b26a
578ad50
9b5b26a
 
578ad50
9b5b26a
578ad50
9b5b26a
 
 
578ad50
 
 
 
 
 
 
 
 
 
 
 
 
 
8c01ffb
578ad50
8c01ffb
6aae614
ae7a494
 
e121372
578ad50
 
 
 
13d500a
8c01ffb
 
578ad50
 
8c01ffb
861422e
 
578ad50
 
 
 
8c01ffb
8fe992b
578ad50
 
 
 
 
8c01ffb
 
 
8fe992b
 
9b5b26a
578ad50
 
8c01ffb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
rom smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, load_tool, tool
import datetime

import pytz
import yaml
from huggingface_hub import InferenceClient

from tools.final_answer import FinalAnswerTool
from Gradio_UI import GradioUI


# -------------------- TOOLS --------------------









@tool
def get_current_time_in_timezone(timezone: str) -> str:
    """Fetch current time in a timezone.
    Args:
        timezone: e.g. 'Asia/Kolkata'
    """
    try:

        tz = pytz.timezone(timezone)

        local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
        return f"The current local time in {timezone} is: {local_time}"
    except Exception as e:
        return f"Error fetching time: {str(e)}"


# ✅ Image Generation Tool
client = InferenceClient(model="stabilityai/stable-diffusion-xl-base-1.0")

@tool
def image_gen_tool(prompt: str):
    """Generate an image from a text prompt.
    Args:
        prompt: Description of the image
    """
    image = client.text_to_image(prompt)
    return image  # ✅ return PIL image directly

# -------------------- MODEL --------------------

final_answer = FinalAnswerTool()


model = HfApiModel(
    max_tokens=2096,
    temperature=0.5,
    model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
    custom_role_conversions=None,
)


# -------------------- PROMPTS --------------------


with open("prompts.yaml", 'r') as stream:
    prompt_templates = yaml.safe_load(stream)


# -------------------- AGENT --------------------

agent = CodeAgent(
    model=model,
    tools=[
        final_answer,                    # required
        get_current_time_in_timezone,
        image_gen_tool                  # ✅ added
    ],
    max_steps=6,
    verbosity_level=1,
    grammar=None,
)


# -------------------- UI --------------------

GradioUI(agent).launch()