dagiro commited on
Commit
4b53383
·
verified ·
1 Parent(s): 24c30ac

Update app.py

Browse files

make new updates

Files changed (1) hide show
  1. app.py +55 -0
app.py CHANGED
@@ -6,6 +6,9 @@ import yaml
6
  from tools.final_answer import FinalAnswerTool
7
 
8
  from Gradio_UI import GradioUI
 
 
 
9
 
10
  # Below is an example of a tool that does nothing. Amaze us with your creativity!
11
  @tool
@@ -33,6 +36,58 @@ def get_current_time_in_timezone(timezone: str) -> str:
33
  except Exception as e:
34
  return f"Error fetching time for timezone '{timezone}': {str(e)}"
35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
  final_answer = FinalAnswerTool()
38
  model = InferenceClientModel(
 
6
  from tools.final_answer import FinalAnswerTool
7
 
8
  from Gradio_UI import GradioUI
9
+ from PIL import Image
10
+ import io
11
+ import base64
12
 
13
  # Below is an example of a tool that does nothing. Amaze us with your creativity!
14
  @tool
 
36
  except Exception as e:
37
  return f"Error fetching time for timezone '{timezone}': {str(e)}"
38
 
39
+ @tool
40
+ def generate_image(
41
+ prompt: str,
42
+ negative_prompt: str = "",
43
+ steps: int = 20,
44
+ model_id: str = "stabilityai/stable-diffusion-xl-base-1.0"
45
+ ) -> str:
46
+ """A tool that generates images from text prompts using Hugging Face's Inference API.
47
+ Automatically uses the HF_TOKEN environment variable for authentication.
48
+
49
+ Args:
50
+ prompt: The text description of the image you want to generate.
51
+ negative_prompt: Things you don't want to see in the image (optional).
52
+ steps: Number of denoising steps (default 20, higher is better quality but slower).
53
+ model_id: The model ID to use (default: stabilityai/stable-diffusion-xl-base-1.0).
54
+ Returns:
55
+ A base64 encoded string of the generated image.
56
+ """
57
+ try:
58
+ # Get token from environment variables (automatically available on HF Spaces)
59
+ API_TOKEN = os.environ.get("HF_TOKEN")
60
+ if not API_TOKEN:
61
+ return "Error: HF_TOKEN not found in environment variables"
62
+
63
+ API_URL = f"https://api-inference.huggingface.co/models/{model_id}"
64
+ headers = {"Authorization": f"Bearer {API_TOKEN}"}
65
+
66
+ payload = {
67
+ "inputs": prompt,
68
+ "negative_prompt": negative_prompt,
69
+ "options": {"use_cache": True, "wait_for_model": True},
70
+ "parameters": {
71
+ "num_inference_steps": steps,
72
+ "guidance_scale": 7.5
73
+ }
74
+ }
75
+
76
+ response = requests.post(API_URL, headers=headers, json=payload)
77
+ response.raise_for_status()
78
+
79
+ # Convert to base64 for display
80
+ image = Image.open(io.BytesIO(response.content))
81
+ buffered = io.BytesIO()
82
+ image.save(buffered, format="PNG")
83
+ img_str = base64.b64encode(buffered.getvalue()).decode()
84
+
85
+ return f"data:image/png;base64,{img_str}"
86
+ except requests.exceptions.RequestException as e:
87
+ return f"API Error: {str(e)}"
88
+ except Exception as e:
89
+ return f"Error generating image: {str(e)}"
90
+
91
 
92
  final_answer = FinalAnswerTool()
93
  model = InferenceClientModel(