Files changed (1) hide show
  1. app.py +56 -32
app.py CHANGED
@@ -1,69 +1,93 @@
1
- from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
2
  import datetime
3
- import requests
4
  import pytz
5
  import yaml
6
- from tools.final_answer import FinalAnswerTool
7
 
 
8
  from Gradio_UI import GradioUI
9
 
10
- # Below is an example of a tool that does nothing. Amaze us with your creativity !
11
- @tool
12
- def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type
13
- #Keep this format for the description / args / args description but feel free to modify the tool
14
- """A tool that does nothing yet
15
- Args:
16
- arg1: the first argument
17
- arg2: the second argument
18
- """
19
- return "What magic will you build ?"
20
 
21
  @tool
22
  def get_current_time_in_timezone(timezone: str) -> str:
23
- """A tool that fetches the current local time in a specified timezone.
24
  Args:
25
- timezone: A string representing a valid timezone (e.g., 'America/New_York').
26
  """
27
  try:
28
- # Create timezone object
29
  tz = pytz.timezone(timezone)
30
- # Get current time in that timezone
31
  local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
32
  return f"The current local time in {timezone} is: {local_time}"
33
  except Exception as e:
34
- return f"Error fetching time for timezone '{timezone}': {str(e)}"
35
 
36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  final_answer = FinalAnswerTool()
38
 
39
- # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
40
- # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
41
 
42
  model = HfApiModel(
43
- max_tokens=2096,
44
- temperature=0.5,
45
- model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
46
- custom_role_conversions=None,
47
  )
48
 
49
 
50
- # Import tool from Hub
51
- image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
52
 
53
  with open("prompts.yaml", 'r') as stream:
54
  prompt_templates = yaml.safe_load(stream)
55
-
 
 
 
56
  agent = CodeAgent(
57
  model=model,
58
- tools=[final_answer], ## add your tools here (don't remove final answer)
 
 
 
 
59
  max_steps=6,
60
  verbosity_level=1,
61
  grammar=None,
62
- planning_interval=None,
63
- name=None,
64
- description=None,
65
- prompt_templates=prompt_templates
66
  )
67
 
68
 
 
 
69
  GradioUI(agent).launch()
 
1
+ rom smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, load_tool, tool
2
  import datetime
3
+
4
  import pytz
5
  import yaml
6
+ from huggingface_hub import InferenceClient
7
 
8
+ from tools.final_answer import FinalAnswerTool
9
  from Gradio_UI import GradioUI
10
 
11
+
12
+ # -------------------- TOOLS --------------------
13
+
14
+
15
+
16
+
17
+
18
+
19
+
20
+
21
 
22
  @tool
23
  def get_current_time_in_timezone(timezone: str) -> str:
24
+ """Fetch current time in a timezone.
25
  Args:
26
+ timezone: e.g. 'Asia/Kolkata'
27
  """
28
  try:
29
+
30
  tz = pytz.timezone(timezone)
31
+
32
  local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
33
  return f"The current local time in {timezone} is: {local_time}"
34
  except Exception as e:
35
+ return f"Error fetching time: {str(e)}"
36
 
37
 
38
+ # ✅ Image Generation Tool
39
+ client = InferenceClient(model="stabilityai/stable-diffusion-xl-base-1.0")
40
+
41
+ @tool
42
+ def image_gen_tool(prompt: str):
43
+ """Generate an image from a text prompt.
44
+ Args:
45
+ prompt: Description of the image
46
+ """
47
+ image = client.text_to_image(prompt)
48
+ return image # ✅ return PIL image directly
49
+
50
+ # -------------------- MODEL --------------------
51
+
52
  final_answer = FinalAnswerTool()
53
 
54
+
55
+
56
 
57
  model = HfApiModel(
58
+ max_tokens=2096,
59
+ temperature=0.5,
60
+ model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
61
+ custom_role_conversions=None,
62
  )
63
 
64
 
65
+ # -------------------- PROMPTS --------------------
66
+
67
 
68
  with open("prompts.yaml", 'r') as stream:
69
  prompt_templates = yaml.safe_load(stream)
70
+
71
+
72
+ # -------------------- AGENT --------------------
73
+
74
  agent = CodeAgent(
75
  model=model,
76
+ tools=[
77
+ final_answer, # required
78
+ get_current_time_in_timezone,
79
+ image_gen_tool # ✅ added
80
+ ],
81
  max_steps=6,
82
  verbosity_level=1,
83
  grammar=None,
84
+
85
+
86
+
87
+
88
  )
89
 
90
 
91
+ # -------------------- UI --------------------
92
+
93
  GradioUI(agent).launch()