sreejang commited on
Commit
ab5cbcb
·
verified ·
1 Parent(s): 6a07e95

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -196
app.py CHANGED
@@ -7,248 +7,95 @@ import json
7
  import time
8
 
9
  # ==================== FREE HUGGINGFACE INFERENCE API ====================
10
- # No API key required for public models! Free tier has rate limits (roughly 1-2 requests/minute)
11
 
12
  HF_INFERENCE_API = "https://api-inference.huggingface.co/models"
13
 
14
  def huggingface_generate(model_id, prompt, width=1024, height=1024, seed=0):
15
- """
16
- Generic Hugging Face Inference API caller
17
- Works with Stable Diffusion, FLUX, and other diffusion models
18
- """
19
  try:
20
  url = f"{HF_INFERENCE_API}/{model_id}"
21
-
22
- # Prepare payload - different models accept different parameters
23
  payload = {
24
  "inputs": prompt,
25
  "parameters": {
26
  "width": width,
27
  "height": height,
28
  "seed": seed if seed > 0 else int(time.time()),
29
- "num_inference_steps": 25, # Lower = faster, default for most models
30
  "guidance_scale": 7.5,
31
  }
32
  }
33
-
34
- # For some models, we need to send just the string in inputs
35
- headers = {
36
- "Content-Type": "application/json",
37
- # No Authorization header needed for public models!
38
- }
39
-
40
  response = requests.post(url, json=payload, headers=headers, timeout=120)
41
-
42
  if response.status_code == 200:
43
- # Response is image bytes directly
44
- return Image.open(BytesIO(response.content)), f"Hugging Face ({model_id})"
45
  elif response.status_code == 503:
46
- # Model is loading
47
- return None, f"Model {model_id} is loading... Try again in 30 seconds"
48
  else:
49
- error_msg = response.text[:200] if response.text else f"HTTP {response.status_code}"
50
- return None, f"HF Error: {error_msg}"
51
-
52
  except Exception as e:
53
- return None, f"HF Failed: {str(e)}"
54
 
55
- # Specific model wrappers
56
- def stabilityai_generate(prompt, width=1024, height=1024, seed=0):
57
- """Stable Diffusion 2.1 - Most reliable free model"""
58
- return huggingface_generate(
59
- "stabilityai/stable-diffusion-2-1",
60
- prompt, width, height, seed
61
- )
62
 
63
- def flux_schnell_generate(prompt, width=1024, height=1024, seed=0):
64
- """FLUX.1-schnell - Fast but might be rate limited"""
65
- return huggingface_generate(
66
- "black-forest-labs/FLUX.1-schnell",
67
- prompt, width, height, seed
68
- )
69
 
70
- def flux_dev_generate(prompt, width=1024, height=1024, seed=0):
71
- """FLUX.1-dev - High quality but slower"""
72
- return huggingface_generate(
73
- "black-forest-labs/FLUX.1-dev",
74
- prompt, width, height, seed
75
- )
76
 
77
- def sd_xl_generate(prompt, width=1024, height=1024, seed=0):
78
- """Stable Diffusion XL Base 1.0"""
79
- return huggingface_generate(
80
- "stabilityai/stable-diffusion-xl-base-1.0",
81
- prompt, width, height, seed
82
- )
83
-
84
- def pollinations_generate(prompt, width=1024, height=1024, seed=0):
85
- """Keep as fallback"""
86
  try:
87
  encoded_prompt = urllib.parse.quote(prompt)
88
  url = f"https://image.pollinations.ai/prompt/{encoded_prompt}?width={width}&height={height}&nologo=true&seed={seed if seed > 0 else int(time.time())}"
89
  response = requests.get(url, timeout=30)
90
  if response.status_code == 200:
91
- return Image.open(BytesIO(response.content)), "Pollinations.ai"
92
- return None, f"Pollinations: HTTP {response.status_code}"
93
  except Exception as e:
94
- return None, f"Pollinations Failed: {str(e)}"
95
-
96
- # ==================== MAIN GENERATION LOGIC ====================
97
 
98
  def generate_with_fallback(prompt, width=1024, height=1024, seed=0, primary_model="Stable Diffusion 2.1"):
99
- """
100
- Try Hugging Face first (free, reliable), then Pollinations as backup
101
- """
102
  if not prompt:
103
- return None, "⚠️ Please enter a prompt!"
104
-
105
- # Map friendly names to functions
106
  models = {
107
  "Stable Diffusion 2.1": stabilityai_generate,
108
  "FLUX.1-schnell (Fast)": flux_schnell_generate,
109
- "FLUX.1-dev (Quality)": flux_dev_generate,
110
  "Stable Diffusion XL": sd_xl_generate,
111
  }
112
-
113
  errors = []
114
-
115
- # Try primary model first
116
- if primary_model in models:
117
- func = models[primary_model]
118
  try:
119
- print(f"🔄 Trying {primary_model}...")
120
- image, status = func(prompt, width, height, seed)
121
- if image is not None:
122
- return image, f" Success! ({status})"
123
- else:
124
- errors.append(f"{primary_model}: {status}")
125
  except Exception as e:
126
- errors.append(f"{primary_model}: {str(e)}")
127
-
128
- # Try other models as fallback
129
- for model_name, func in models.items():
130
- if model_name == primary_model:
131
- continue
132
- try:
133
- print(f"🔄 Trying fallback: {model_name}...")
134
- image, status = func(prompt, width, height, seed)
135
- if image is not None:
136
- return image, f"✅ Success! ({status} - Fallback)"
137
- else:
138
- errors.append(f"{model_name}: {status}")
139
- except Exception as e:
140
- errors.append(f"{model_name}: {str(e)}")
141
-
142
- # Last resort: Pollinations
143
- try:
144
- print("🔄 Trying Pollinations as last resort...")
145
- image, status = pollinations_generate(prompt, width, height, seed)
146
- if image is not None:
147
- return image, f"✅ Success! ({status} - Last Resort)"
148
- else:
149
- errors.append(f"Pollinations: {status}")
150
- except Exception as e:
151
- errors.append(f"Pollinations: {str(e)}")
152
-
153
- # All failed
154
- error_msg = "❌ All services failed:\n" + "\n".join(errors)
155
- error_msg += "\n\n💡 Tips:\n- Hugging Face free tier has rate limits (wait 1-2 minutes)\n- Try a different model\n- Check your internet connection"
156
- return None, error_msg
157
 
158
- # ==================== GRADIO UI ====================
159
 
160
- with gr.Blocks(title="Free Image Generator - HF Edition", theme=gr.themes.Soft()) as demo:
161
- gr.Markdown("""
162
- # 🎨 Free Image Generator (Hugging Face Edition)
163
- **Zero Cost • No API Keys • Powered by Stable Diffusion & FLUX**
164
-
165
- Made with ❤️ by Srijan Gajurel | Uses Hugging Face Free Inference API
166
- """)
167
 
168
  with gr.Row():
169
- with gr.Column(scale=1):
170
- prompt = gr.Textbox(
171
- label="📝 Prompt",
172
- lines=3,
173
- placeholder="Describe your image in detail...",
174
- value="A beautiful sunset over mountains, cinematic lighting, 8k quality"
175
- )
176
-
177
  with gr.Row():
178
- width = gr.Slider(
179
- minimum=512, maximum=1024, value=1024, step=64,
180
- label="📐 Width"
181
- )
182
- height = gr.Slider(
183
- minimum=512, maximum=1024, value=1024, step=64,
184
- label="📏 Height"
185
- )
186
-
187
- seed = gr.Number(
188
- value=0, label="🎲 Seed (0 for random)", precision=0
189
- )
190
-
191
- model_selector = gr.Radio(
192
- choices=[
193
- "Stable Diffusion 2.1",
194
- "FLUX.1-schnell (Fast)",
195
- "FLUX.1-dev (Quality)",
196
- "Stable Diffusion XL"
197
- ],
198
- value="Stable Diffusion 2.1",
199
- label="🤖 AI Model (Auto-fallback enabled)"
200
- )
201
-
202
- with gr.Accordion("ℹ️ About Free Limits", open=False):
203
- gr.Markdown("""
204
- **Hugging Face Free Tier:**
205
- - ✅ No API key required for public models
206
- - ⚠️ Rate limited (~1-2 requests/minute per model)
207
- - ⏳ Models may "sleep" after inactivity (first request wakes them up)
208
- - 🔄 Auto-fallback tries all models if one fails
209
-
210
- **Tips:**
211
- - If you get "Model is loading", wait 30 seconds and retry
212
- - Stable Diffusion 2.1 is the most reliable
213
- - FLUX models are higher quality but may be busier
214
- """)
215
-
216
- generate_btn = gr.Button("🚀 Generate Image", variant="primary", size="lg")
217
-
218
- status_text = gr.Textbox(
219
- label="📊 Status",
220
- interactive=False,
221
- value="Ready - Select a model and click Generate"
222
- )
223
 
224
- with gr.Column(scale=1):
225
- output_image = gr.Image(
226
- label="🖼️ Generated Image",
227
- type="pil",
228
- height=600,
229
- show_download_button=True
230
- )
231
-
232
- gr.Markdown("### 🎨 Example Prompts")
233
- examples = gr.Examples(
234
- examples=[
235
- ["A futuristic cyberpunk city at night, neon lights, rain, cinematic", 1024, 1024, 42, "Stable Diffusion 2.1"],
236
- ["Portrait of a wise old owl wearing glasses, oil painting style, detailed", 1024, 1024, 0, "FLUX.1-schnell (Fast)"],
237
- ["Magical forest with glowing mushrooms and fireflies, fantasy art", 1024, 1024, 123, "Stable Diffusion XL"],
238
- ["Cute robot baking cookies in a cozy kitchen, 3d render, pixar style", 1024, 1024, 0, "FLUX.1-dev (Quality)"],
239
- ],
240
- inputs=[prompt, width, height, seed, model_selector]
241
- )
242
 
243
- # Event handler
244
- generate_btn.click(
245
- fn=generate_with_fallback,
246
- inputs=[prompt, width, height, seed, model_selector],
247
- outputs=[output_image, status_text]
248
- )
249
 
250
- if __name__ == "__main__":
251
- print("🚀 Starting Free Image Generator...")
252
- print("🤖 Models available: Stable Diffusion 2.1, SDXL, FLUX.1-schnell, FLUX.1-dev")
253
- print("💡 No API keys required!")
254
- demo.launch(share=False)
 
7
  import time
8
 
9
  # ==================== FREE HUGGINGFACE INFERENCE API ====================
 
10
 
11
  HF_INFERENCE_API = "https://api-inference.huggingface.co/models"
12
 
13
  def huggingface_generate(model_id, prompt, width=1024, height=1024, seed=0):
 
 
 
 
14
  try:
15
  url = f"{HF_INFERENCE_API}/{model_id}"
 
 
16
  payload = {
17
  "inputs": prompt,
18
  "parameters": {
19
  "width": width,
20
  "height": height,
21
  "seed": seed if seed > 0 else int(time.time()),
22
+ "num_inference_steps": 25,
23
  "guidance_scale": 7.5,
24
  }
25
  }
26
+ headers = {"Content-Type": "application/json"}
 
 
 
 
 
 
27
  response = requests.post(url, json=payload, headers=headers, timeout=120)
 
28
  if response.status_code == 200:
29
+ return Image.open(BytesIO(response.content)), f"HF ({model_id})"
 
30
  elif response.status_code == 503:
31
+ return None, f"Model loading... wait 30s"
 
32
  else:
33
+ return None, f"HTTP {response.status_code}: {response.text[:100]}"
 
 
34
  except Exception as e:
35
+ return None, f"Error: {str(e)}"
36
 
37
+ def stabilityai_generate(prompt, width, height, seed):
38
+ return huggingface_generate("stabilityai/stable-diffusion-2-1", prompt, width, height, seed)
 
 
 
 
 
39
 
40
+ def flux_schnell_generate(prompt, width, height, seed):
41
+ return huggingface_generate("black-forest-labs/FLUX.1-schnell", prompt, width, height, seed)
 
 
 
 
42
 
43
+ def sd_xl_generate(prompt, width, height, seed):
44
+ return huggingface_generate("stabilityai/stable-diffusion-xl-base-1.0", prompt, width, height, seed)
 
 
 
 
45
 
46
+ def pollinations_generate(prompt, width, height, seed):
 
 
 
 
 
 
 
 
47
  try:
48
  encoded_prompt = urllib.parse.quote(prompt)
49
  url = f"https://image.pollinations.ai/prompt/{encoded_prompt}?width={width}&height={height}&nologo=true&seed={seed if seed > 0 else int(time.time())}"
50
  response = requests.get(url, timeout=30)
51
  if response.status_code == 200:
52
+ return Image.open(BytesIO(response.content)), "Pollinations"
53
+ return None, f"Pollinations HTTP {response.status_code}"
54
  except Exception as e:
55
+ return None, f"Pollinations: {str(e)}"
 
 
56
 
57
  def generate_with_fallback(prompt, width=1024, height=1024, seed=0, primary_model="Stable Diffusion 2.1"):
 
 
 
58
  if not prompt:
59
+ return None, "⚠️ Enter a prompt!"
 
 
60
  models = {
61
  "Stable Diffusion 2.1": stabilityai_generate,
62
  "FLUX.1-schnell (Fast)": flux_schnell_generate,
 
63
  "Stable Diffusion XL": sd_xl_generate,
64
  }
 
65
  errors = []
66
+ for name, func in list(models.items()) + [("Pollinations", pollinations_generate)]:
 
 
 
67
  try:
68
+ img, status = func(prompt, width, height, seed)
69
+ if img:
70
+ return img, f"✅ {status}"
71
+ errors.append(f"{name}: {status}")
 
 
72
  except Exception as e:
73
+ errors.append(f"{name}: {str(e)}")
74
+ return None, "❌ Failed:\\n" + "\\n".join(errors)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
 
76
+ # ==================== GRADIO UI (FIXED) ====================
77
 
78
+ with gr.Blocks(title="Free Image Generator") as demo: # Removed theme from here
79
+ gr.Markdown("# 🎨 Free Image Generator (HF Edition)")
80
+ gr.Markdown("**No API Keys Multi-Provider Fallback** | By Srijan Gajurel")
 
 
 
 
81
 
82
  with gr.Row():
83
+ with gr.Column():
84
+ prompt = gr.Textbox(label="📝 Prompt", lines=3, value="Sunset over mountains, cinematic")
 
 
 
 
 
 
85
  with gr.Row():
86
+ width = gr.Slider(512, 1024, 1024, step=64, label="Width")
87
+ height = gr.Slider(512, 1024, 1024, step=64, label="Height")
88
+ seed = gr.Number(0, label="Seed (0=random)", precision=0)
89
+ model = gr.Radio(["Stable Diffusion 2.1", "FLUX.1-schnell (Fast)", "Stable Diffusion XL"],
90
+ value="Stable Diffusion 2.1", label="Model")
91
+ btn = gr.Button("🚀 Generate", variant="primary")
92
+ status = gr.Textbox(label="Status", interactive=False, value="Ready")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
+ with gr.Column():
95
+ # REMOVED show_download_button parameter
96
+ img = gr.Image(label="Generated Image", type="pil", height=600)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
 
98
+ btn.click(generate_with_fallback, [prompt, width, height, seed, model], [img, status])
 
 
 
 
 
99
 
100
+ # Moved theme to launch() - fixes Gradio 6.0 warning
101
+ demo.launch(theme=gr.themes.Soft())