dagiro commited on
Commit
f948348
·
1 Parent(s): ea5ffb3

new tool added

Browse files
Files changed (2) hide show
  1. __pycache__/Gradio_UI.cpython-310.pyc +0 -0
  2. app.py +13 -42
__pycache__/Gradio_UI.cpython-310.pyc CHANGED
Binary files a/__pycache__/Gradio_UI.cpython-310.pyc and b/__pycache__/Gradio_UI.cpython-310.pyc differ
 
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
2
  import datetime
3
  import requests
@@ -37,54 +38,24 @@ def get_current_time_in_timezone(timezone: str) -> str:
37
  return f"Error fetching time for timezone '{timezone}': {str(e)}"
38
 
39
  @tool
40
- def generate_image(
41
- prompt: str,
42
- negative_prompt: str = "",
43
- steps: int = 20,
44
- model_id: str = "stabilityai/stable-diffusion-xl-base-1.0"
45
- ) -> str:
46
  """A tool that generates images from text prompts using Hugging Face's Inference API.
47
- Automatically uses the HF_TOKEN environment variable for authentication.
48
-
49
  Args:
50
- prompt: The text description of the image you want to generate.
51
- negative_prompt: Things you don't want to see in the image (optional).
52
- steps: Number of denoising steps (default 20, higher is better quality but slower).
53
- model_id: The model ID to use (default: stabilityai/stable-diffusion-xl-base-1.0).
54
  Returns:
55
- A base64 encoded string of the generated image.
56
  """
57
  try:
58
- # Get token from environment variables (automatically available on HF Spaces)
59
- API_TOKEN = os.environ.get("HF_TOKEN")
60
- if not API_TOKEN:
61
- return "Error: HF_TOKEN not found in environment variables"
62
-
63
- API_URL = f"https://api-inference.huggingface.co/models/{model_id}"
64
- headers = {"Authorization": f"Bearer {API_TOKEN}"}
65
-
66
- payload = {
67
- "inputs": prompt,
68
- "negative_prompt": negative_prompt,
69
- "options": {"use_cache": True, "wait_for_model": True},
70
- "parameters": {
71
- "num_inference_steps": steps,
72
- "guidance_scale": 7.5
73
- }
74
- }
75
-
76
- response = requests.post(API_URL, headers=headers, json=payload)
77
- response.raise_for_status()
78
 
79
- # Convert to base64 for display
80
- image = Image.open(io.BytesIO(response.content))
81
- buffered = io.BytesIO()
82
- image.save(buffered, format="PNG")
83
- img_str = base64.b64encode(buffered.getvalue()).decode()
84
 
85
- return f"data:image/png;base64,{img_str}"
86
- except requests.exceptions.RequestException as e:
87
- return f"API Error: {str(e)}"
88
  except Exception as e:
89
  return f"Error generating image: {str(e)}"
90
 
@@ -110,7 +81,7 @@ with open("prompts.yaml", 'r') as stream:
110
 
111
  agent = CodeAgent(
112
  model=model,
113
- tools=[final_answer], ## add your tools here (don't remove final answer)
114
  max_steps=6,
115
  verbosity_level=1,
116
  grammar=None,
 
1
+ from huggingface_hub import InferenceClient
2
  from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
3
  import datetime
4
  import requests
 
38
  return f"Error fetching time for timezone '{timezone}': {str(e)}"
39
 
40
  @tool
41
+ def generate_image(prompt: str, model: str = "stabilityai/stable-diffusion-xl-base-1.0") -> str:
 
 
 
 
 
42
  """A tool that generates images from text prompts using Hugging Face's Inference API.
 
 
43
  Args:
44
+ prompt: The text description of the image to generate.
45
+ model: The Hugging Face model to use for image generation (default: stabilityai/stable-diffusion-xl-base-1.0).
 
 
46
  Returns:
47
+ URL of the generated image or an error message.
48
  """
49
  try:
50
+ client = InferenceClient()
51
+ image = client.text_to_image(prompt, model=model)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
+ # Save the image locally and return the path
54
+ timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
55
+ image_path = f"generated_image_{timestamp}.png"
56
+ image.save(image_path)
 
57
 
58
+ return f"Image generated successfully! Saved to: {image_path}"
 
 
59
  except Exception as e:
60
  return f"Error generating image: {str(e)}"
61
 
 
81
 
82
  agent = CodeAgent(
83
  model=model,
84
+ tools=[final_answer, get_current_time_in_timezone, generate_image], ## add your tools here (don't remove final answer)
85
  max_steps=6,
86
  verbosity_level=1,
87
  grammar=None,