Fiononana commited on
Commit
926af80
·
verified ·
1 Parent(s): c86be28

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -28
app.py CHANGED
@@ -4,34 +4,19 @@ import requests
4
  import pytz
5
  import yaml
6
  from tools.final_answer import FinalAnswerTool
 
7
  from Gradio_UI import GradioUI
8
- import torch
9
- from diffusers import FluxPipeline
10
 
11
  # Below is an example of a tool that does nothing. Amaze us with your creativity !
12
  @tool
13
- def my_custom_tool(userprompt: str) -> str: #it's import to specify the return type
14
- """A tool that does helps to generate the image for the given text prompt
 
15
  Args:
16
- userprompt: A string prompt that helps the image to generate
17
- Returns:
18
- str: A generated image.
19
  """
20
- # Import tool from Hub
21
- pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16)
22
- pipe.enable_model_cpu_offload() #save some VRAM by offloading the model to CPU. Remove this if you have enough GPU power
23
-
24
- prompt = userprompt
25
- image = pipe(
26
- prompt,
27
- height=1024,
28
- width=1024,
29
- guidance_scale=3.5,
30
- num_inference_steps=50,
31
- max_sequence_length=512,
32
- generator=torch.Generator("cpu").manual_seed(0)
33
- ).images[0]
34
- return image
35
 
36
  @tool
37
  def get_current_time_in_timezone(timezone: str) -> str:
@@ -55,22 +40,23 @@ web_search = DuckDuckGoSearchTool()
55
  # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
56
  # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
57
 
58
- # Define the AI model
59
  model = HfApiModel(
60
  max_tokens=2096,
61
  temperature=0.5,
62
- model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud', # it is possible that this model may be overloaded
63
  custom_role_conversions=None,
64
  )
65
 
66
- # Load prompt templates
 
 
 
67
  with open("prompts.yaml", 'r') as stream:
68
  prompt_templates = yaml.safe_load(stream)
69
 
70
- # Create the AI agent with tools
71
  agent = CodeAgent(
72
  model=model,
73
- tools=[final_answer,web_search,get_current_time_in_timezone, my_custom_tool], ## add your tools here (don't remove final answer)
74
  max_steps=6,
75
  verbosity_level=1,
76
  grammar=None,
@@ -80,5 +66,5 @@ agent = CodeAgent(
80
  prompt_templates=prompt_templates
81
  )
82
 
83
- # Launch the AI agent with Gradio
84
  GradioUI(agent).launch()
 
4
  import pytz
5
  import yaml
6
  from tools.final_answer import FinalAnswerTool
7
+
8
  from Gradio_UI import GradioUI
 
 
9
 
10
  # Below is an example of a tool that does nothing. Amaze us with your creativity !
11
  @tool
12
+ def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type
13
+ #Keep this format for the description / args / args description but feel free to modify the tool
14
+ """A tool that does nothing yet
15
  Args:
16
+ arg1: the first argument
17
+ arg2: the second argument
 
18
  """
19
+ return "What magic will you build ?"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
  @tool
22
  def get_current_time_in_timezone(timezone: str) -> str:
 
40
  # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
41
  # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
42
 
 
43
  model = HfApiModel(
44
  max_tokens=2096,
45
  temperature=0.5,
46
+ model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
47
  custom_role_conversions=None,
48
  )
49
 
50
+
51
+ # Import tool from Hub
52
+ image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
53
+
54
  with open("prompts.yaml", 'r') as stream:
55
  prompt_templates = yaml.safe_load(stream)
56
 
 
57
  agent = CodeAgent(
58
  model=model,
59
+ tools=[final_answer,image_generation_tool,get_current_time_in_timezone,web_search], ## add your tools here (don't remove final answer)
60
  max_steps=6,
61
  verbosity_level=1,
62
  grammar=None,
 
66
  prompt_templates=prompt_templates
67
  )
68
 
69
+
70
  GradioUI(agent).launch()