Files changed (1) hide show
  1. app.py +44 -29
app.py CHANGED
@@ -1,61 +1,74 @@
1
- from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
2
  import datetime
3
- import requests
4
  import pytz
5
  import yaml
6
- from tools.final_answer import FinalAnswerTool
7
 
 
8
  from Gradio_UI import GradioUI
9
 
10
- # Below is an example of a tool that does nothing. Amaze us with your creativity !
11
- @tool
12
- def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type
13
- #Keep this format for the description / args / args description but feel free to modify the tool
14
- """A tool that does nothing yet
15
- Args:
16
- arg1: the first argument
17
- arg2: the second argument
18
- """
19
- return "What magic will you build ?"
20
 
21
  @tool
22
  def get_current_time_in_timezone(timezone: str) -> str:
23
- """A tool that fetches the current local time in a specified timezone.
24
  Args:
25
- timezone: A string representing a valid timezone (e.g., 'America/New_York').
26
  """
27
  try:
28
- # Create timezone object
29
  tz = pytz.timezone(timezone)
30
- # Get current time in that timezone
31
  local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
32
  return f"The current local time in {timezone} is: {local_time}"
33
  except Exception as e:
34
- return f"Error fetching time for timezone '{timezone}': {str(e)}"
35
 
36
 
37
- final_answer = FinalAnswerTool()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
- # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
40
- # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
41
 
42
  model = HfApiModel(
43
- max_tokens=2096,
44
- temperature=0.5,
45
- model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
46
- custom_role_conversions=None,
47
  )
48
 
49
 
50
- # Import tool from Hub
51
- image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
52
 
53
  with open("prompts.yaml", 'r') as stream:
54
  prompt_templates = yaml.safe_load(stream)
55
-
 
 
 
56
  agent = CodeAgent(
57
  model=model,
58
- tools=[final_answer], ## add your tools here (don't remove final answer)
 
 
 
 
59
  max_steps=6,
60
  verbosity_level=1,
61
  grammar=None,
@@ -66,4 +79,6 @@ agent = CodeAgent(
66
  )
67
 
68
 
 
 
69
  GradioUI(agent).launch()
 
1
+ from smolagents import CodeAgent, DuckDuckGoSearchTool, HfApiModel, load_tool, tool
2
  import datetime
 
3
  import pytz
4
  import yaml
5
+ from huggingface_hub import InferenceClient
6
 
7
+ from tools.final_answer import FinalAnswerTool
8
  from Gradio_UI import GradioUI
9
 
10
+
11
+ # -------------------- TOOLS --------------------
 
 
 
 
 
 
 
 
12
 
13
  @tool
14
  def get_current_time_in_timezone(timezone: str) -> str:
15
+ """Fetch current time in a timezone.
16
  Args:
17
+ timezone: e.g. 'Asia/Kolkata'
18
  """
19
  try:
 
20
  tz = pytz.timezone(timezone)
 
21
  local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
22
  return f"The current local time in {timezone} is: {local_time}"
23
  except Exception as e:
24
+ return f"Error fetching time: {str(e)}"
25
 
26
 
27
+ # Image Generation Tool
28
+ client = InferenceClient(model="stabilityai/stable-diffusion-xl-base-1.0")
29
+
30
+ @tool
31
+ def image_gen_tool(prompt: str) -> str:
32
+ """Generate an image from a text prompt.
33
+ Args:
34
+ prompt: Description of the image
35
+ """
36
+ try:
37
+ image = client.text_to_image(prompt)
38
+ file_path = "generated_image.png"
39
+ image.save(file_path)
40
+ return f"Image generated and saved at {file_path}"
41
+ except Exception as e:
42
+ return f"Error generating image: {str(e)}"
43
+
44
+
45
+ # -------------------- MODEL --------------------
46
 
47
+ final_answer = FinalAnswerTool()
 
48
 
49
  model = HfApiModel(
50
+ max_tokens=2096,
51
+ temperature=0.5,
52
+ model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
53
+ custom_role_conversions=None,
54
  )
55
 
56
 
57
+ # -------------------- PROMPTS --------------------
 
58
 
59
  with open("prompts.yaml", 'r') as stream:
60
  prompt_templates = yaml.safe_load(stream)
61
+
62
+
63
+ # -------------------- AGENT --------------------
64
+
65
  agent = CodeAgent(
66
  model=model,
67
+ tools=[
68
+ final_answer, # required
69
+ get_current_time_in_timezone,
70
+ image_gen_tool # ✅ added
71
+ ],
72
  max_steps=6,
73
  verbosity_level=1,
74
  grammar=None,
 
79
  )
80
 
81
 
82
+ # -------------------- UI --------------------
83
+
84
  GradioUI(agent).launch()