Create app.py

#1
by FENST4R - opened
Files changed (1) hide show
  1. app.py +319 -0
app.py ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import requests
3
+ from PIL import Image
4
+ from io import BytesIO
5
+ import os
6
+ import time
7
+ import json
8
+ from datetime import datetime
9
+
10
+ # Load API Token from environment variable
11
+ API_TOKEN = os.getenv("HF_API_TOKEN") # Ensure you've set this environment variable
12
+
13
+ # Hugging Face Inference API URL
14
+ API_URL = "https://api-inference.huggingface.co/models/enhanceaiteam/Flux-uncensored"
15
+
16
+ class ImageGenerator:
17
+ def __init__(self):
18
+ self.headers = {"Authorization": f"Bearer {API_TOKEN}"}
19
+ self.generation_history = []
20
+
21
+ def generate_image(self, prompt, negative_prompt="", num_inference_steps=50, guidance_scale=7.5, seed=None, progress=gr.Progress()):
22
+ """
23
+ Generate an image with advanced parameters
24
+ """
25
+ if not API_TOKEN:
26
+ return None, "Error: HF_API_TOKEN environment variable not set"
27
+
28
+ if not prompt or prompt.strip() == "":
29
+ return None, "Error: Please enter a prompt"
30
+
31
+ # Prepare the payload with additional parameters
32
+ payload = {
33
+ "inputs": prompt,
34
+ "parameters": {
35
+ "negative_prompt": negative_prompt if negative_prompt else None,
36
+ "num_inference_steps": num_inference_steps,
37
+ "guidance_scale": guidance_scale,
38
+ "seed": seed if seed else None
39
+ }
40
+ }
41
+
42
+ # Remove None values
43
+ payload["parameters"] = {k: v for k, v in payload["parameters"].items() if v is not None}
44
+
45
+ try:
46
+ progress(0.1, desc="Initializing generation...")
47
+
48
+ # Make API request
49
+ response = requests.post(API_URL, headers=self.headers, json=payload, timeout=60)
50
+
51
+ progress(0.5, desc="Processing response...")
52
+
53
+ if response.status_code == 200:
54
+ # Parse the response
55
+ image_bytes = BytesIO(response.content)
56
+ image = Image.open(image_bytes)
57
+
58
+ # Save to history
59
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
60
+ self.generation_history.append({
61
+ "timestamp": timestamp,
62
+ "prompt": prompt,
63
+ "negative_prompt": negative_prompt,
64
+ "seed": seed
65
+ })
66
+
67
+ progress(1.0, desc="Complete!")
68
+ return image, f"Success! Image generated at {timestamp}"
69
+
70
+ elif response.status_code == 503:
71
+ # Model is loading
72
+ progress(0.3, desc="Model is loading, please wait...")
73
+ time.sleep(5)
74
+ return self.generate_image(prompt, negative_prompt, num_inference_steps,
75
+ guidance_scale, seed, progress)
76
+
77
+ else:
78
+ error_msg = f"Error {response.status_code}: {response.text}"
79
+ return None, error_msg
80
+
81
+ except requests.exceptions.Timeout:
82
+ return None, "Error: Request timed out. Please try again."
83
+ except requests.exceptions.ConnectionError:
84
+ return None, "Error: Connection error. Please check your internet connection."
85
+ except Exception as e:
86
+ return None, f"Error: {str(e)}"
87
+
88
+ def get_history(self):
89
+ """Return generation history as formatted text"""
90
+ if not self.generation_history:
91
+ return "No generations yet"
92
+
93
+ history_text = "### Generation History\n\n"
94
+ for i, item in enumerate(reversed(self.generation_history[-10:]), 1):
95
+ history_text += f"{i}. **{item['timestamp']}**\n"
96
+ history_text += f" Prompt: {item['prompt'][:50]}...\n"
97
+ if item['negative_prompt']:
98
+ history_text += f" Negative: {item['negative_prompt'][:30]}...\n"
99
+ if item['seed']:
100
+ history_text += f" Seed: {item['seed']}\n"
101
+ history_text += "\n"
102
+
103
+ return history_text
104
+
105
+ def clear_history(self):
106
+ """Clear generation history"""
107
+ self.generation_history = []
108
+ return "History cleared"
109
+
110
+ def create_enhanced_ui():
111
+ """Create an enhanced Gradio interface with more features"""
112
+
113
+ # Initialize generator
114
+ generator = ImageGenerator()
115
+
116
+ # Custom CSS for better styling
117
+ custom_css = """
118
+ .gradio-container {
119
+ max-width: 1200px !important;
120
+ margin: auto !important;
121
+ }
122
+ .generate-btn {
123
+ background: linear-gradient(90deg, #6366f1 0%, #8b5cf6 100%) !important;
124
+ color: white !important;
125
+ border: none !important;
126
+ }
127
+ .generate-btn:hover {
128
+ background: linear-gradient(90deg, #4f46e5 0%, #7c3aed 100%) !important;
129
+ }
130
+ .history-panel {
131
+ background: #f3f4f6;
132
+ border-radius: 8px;
133
+ padding: 10px;
134
+ }
135
+ """
136
+
137
+ with gr.Blocks(theme="hev832/Applio", css=custom_css, title="Flux Uncensored Enhanced") as ui:
138
+ gr.Markdown("""
139
+ # ๐ŸŽจ Flux Uncensored Image Generator
140
+ ### Advanced image generation with Hugging Face
141
+ """)
142
+
143
+ with gr.Tabs():
144
+ # Main Generation Tab
145
+ with gr.TabItem("Generate"):
146
+ with gr.Row():
147
+ with gr.Column(scale=2):
148
+ # Main input
149
+ prompt = gr.Textbox(
150
+ label="๐Ÿ“ Prompt",
151
+ placeholder="Describe the image you want to generate in detail...",
152
+ lines=4
153
+ )
154
+
155
+ # Advanced options (collapsible)
156
+ with gr.Accordion("โš™๏ธ Advanced Options", open=False):
157
+ negative_prompt = gr.Textbox(
158
+ label="Negative Prompt",
159
+ placeholder="What to avoid in the image...",
160
+ lines=2
161
+ )
162
+
163
+ with gr.Row():
164
+ steps = gr.Slider(
165
+ label="Inference Steps",
166
+ minimum=20,
167
+ maximum=100,
168
+ value=50,
169
+ step=1
170
+ )
171
+
172
+ guidance = gr.Slider(
173
+ label="Guidance Scale",
174
+ minimum=1.0,
175
+ maximum=20.0,
176
+ value=7.5,
177
+ step=0.5
178
+ )
179
+
180
+ seed = gr.Number(
181
+ label="Seed (optional)",
182
+ value=None,
183
+ precision=0
184
+ )
185
+
186
+ # Generate button
187
+ generate_btn = gr.Button(
188
+ "๐ŸŽจ Generate Image",
189
+ variant="primary",
190
+ elem_classes="generate-btn"
191
+ )
192
+
193
+ with gr.Column(scale=1):
194
+ # Status and info
195
+ status = gr.Textbox(
196
+ label="Status",
197
+ value="Ready to generate",
198
+ interactive=False
199
+ )
200
+
201
+ # Output
202
+ with gr.Row():
203
+ output_image = gr.Image(
204
+ label="Generated Image",
205
+ type="pil",
206
+ height=400
207
+ )
208
+
209
+ # Download button
210
+ with gr.Row():
211
+ download_btn = gr.File(
212
+ label="Download Image",
213
+ interactive=False,
214
+ visible=False
215
+ )
216
+
217
+ # History Tab
218
+ with gr.TabItem("๐Ÿ“œ History"):
219
+ with gr.Row():
220
+ history_text = gr.Markdown("No generations yet")
221
+
222
+ with gr.Row():
223
+ refresh_history_btn = gr.Button("๐Ÿ”„ Refresh History")
224
+ clear_history_btn = gr.Button("๐Ÿ—‘๏ธ Clear History", variant="stop")
225
+
226
+ # Info Tab
227
+ with gr.TabItem("โ„น๏ธ Info"):
228
+ gr.Markdown("""
229
+ ## About Flux Uncensored
230
+
231
+ This is an unofficial Gradio interface for the Flux Uncensored model on Hugging Face.
232
+
233
+ ### Tips for better results:
234
+ - Be specific and detailed in your prompts
235
+ - Use negative prompts to avoid unwanted elements
236
+ - Experiment with different guidance scales (7.5 is a good starting point)
237
+ - More inference steps generally produce better quality but take longer
238
+
239
+ ### Parameters explained:
240
+ - **Prompt**: What you want to generate
241
+ - **Negative Prompt**: What you don't want in the image
242
+ - **Inference Steps**: Number of denoising steps (higher = better quality but slower)
243
+ - **Guidance Scale**: How closely to follow the prompt (higher = more adherence)
244
+ - **Seed**: Random seed for reproducibility
245
+
246
+ ### Note:
247
+ Make sure to set your `HF_API_TOKEN` environment variable before running.
248
+ """)
249
+
250
+ # Event handlers
251
+ def on_generate(prompt, negative_prompt, steps, guidance, seed):
252
+ img, msg = generator.generate_image(
253
+ prompt,
254
+ negative_prompt,
255
+ steps,
256
+ guidance,
257
+ seed if seed != 0 else None
258
+ )
259
+ if img:
260
+ return img, msg, gr.update(visible=True, value=img)
261
+ return None, msg, gr.update(visible=False)
262
+
263
+ generate_btn.click(
264
+ fn=on_generate,
265
+ inputs=[prompt, negative_prompt, steps, guidance, seed],
266
+ outputs=[output_image, status, download_btn]
267
+ )
268
+
269
+ # History handlers
270
+ def update_history():
271
+ return generator.get_history()
272
+
273
+ refresh_history_btn.click(
274
+ fn=update_history,
275
+ outputs=[history_text]
276
+ )
277
+
278
+ clear_history_btn.click(
279
+ fn=generator.clear_history,
280
+ outputs=[history_text]
281
+ ).then(
282
+ fn=lambda: "History cleared",
283
+ outputs=[history_text]
284
+ )
285
+
286
+ # Auto-refresh history when generating
287
+ generate_btn.click(
288
+ fn=update_history,
289
+ outputs=[history_text]
290
+ )
291
+
292
+ # Clear inputs
293
+ def clear_inputs():
294
+ return "", "", 50, 7.5, None
295
+
296
+ with gr.Row():
297
+ clear_btn = gr.Button("๐Ÿ—‘๏ธ Clear Inputs")
298
+ clear_btn.click(
299
+ fn=clear_inputs,
300
+ outputs=[prompt, negative_prompt, steps, guidance, seed]
301
+ )
302
+
303
+ return ui
304
+
305
+ # Run the interface
306
+ if __name__ == "__main__":
307
+ # Check for API token
308
+ if not API_TOKEN:
309
+ print("โš ๏ธ Warning: HF_API_TOKEN environment variable is not set!")
310
+ print("Please set it using: export HF_API_TOKEN='your_token_here'")
311
+
312
+ # Create and launch the UI
313
+ ui = create_enhanced_ui()
314
+ ui.launch(
315
+ server_name="0.0.0.0", # Allow external connections
316
+ server_port=7860, # Default Gradio port
317
+ share=False, # Set to True to create a public link
318
+ debug=True
319
+ )