Choquinlabs commited on
Commit
97d370c
·
verified ·
1 Parent(s): 69b7c75

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. app.py +351 -0
  2. requirements.txt +10 -0
app.py ADDED
@@ -0,0 +1,351 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import numpy as np
4
+ from PIL import Image
5
+ from typing import Optional, Tuple, List
6
+ import time
7
+ import json
8
+ from pathlib import Path
9
+
10
+ # Mock implementation for demonstration - replace with actual Qwen model loading
11
+ class QwenImageLightingModel:
12
+ """Mock Qwen Image model with lighting LoRA - replace with actual implementation"""
13
+
14
+ def __init__(self):
15
+ self.device = "cuda" if torch.cuda.is_available() else "cpu"
16
+ self.model_loaded = False
17
+
18
+ def load_model(self):
19
+ """Load the actual Qwen model and LoRA weights"""
20
+ # In production, load actual model here
21
+ # Example: self.pipeline = AutoPipelineForText2Image.from_pretrained(...)
22
+ # self.pipeline.load_lora_weights("path/to/lighting-lora")
23
+ self.model_loaded = True
24
+
25
+ def generate_with_progressive_latents(
26
+ self,
27
+ prompt: str,
28
+ negative_prompt: Optional[str] = None,
29
+ num_inference_steps: int = 50,
30
+ guidance_scale: float = 7.5,
31
+ seed: Optional[int] = None,
32
+ width: int = 512,
33
+ height: int = 512
34
+ ) -> Tuple[Image.Image, List[Image.Image]]:
35
+ """
36
+ Generate image with progressive latent space sampling
37
+ First half: smaller latent space
38
+ Second half: full latent space
39
+ """
40
+ if not self.model_loaded:
41
+ self.load_model()
42
+
43
+ # Mock generation - replace with actual implementation
44
+ progress_images = []
45
+
46
+ # Simulate progressive generation
47
+ for i in range(num_inference_steps):
48
+ # First half: smaller latent space (simulated)
49
+ if i < num_inference_steps // 2:
50
+ progress = (i / (num_inference_steps // 2)) * 0.5
51
+ # In production: sample in smaller latent space
52
+ size = int(min(width, height) * 0.5)
53
+ else:
54
+ progress = 0.5 + ((i - num_inference_steps // 2) / (num_inference_steps // 2)) * 0.5
55
+ # In production: switch to full latent space
56
+ size = min(width, height)
57
+
58
+ # Create mock progress image
59
+ mock_image = Image.fromarray(
60
+ np.random.randint(0, 255, (size, size, 3), dtype=np.uint8)
61
+ )
62
+ progress_images.append(mock_image)
63
+
64
+ # Simulate processing time
65
+ time.sleep(0.1)
66
+
67
+ # Final image (mock)
68
+ final_image = Image.fromarray(
69
+ np.random.randint(0, 255, (height, width, 3), dtype=np.uint8)
70
+ )
71
+
72
+ return final_image, progress_images
73
+
74
+ # Initialize model
75
+ model = QwenImageLightingModel()
76
+
77
+ def generate_image(
78
+ prompt: str,
79
+ negative_prompt: str = "",
80
+ num_inference_steps: int = 50,
81
+ guidance_scale: float = 7.5,
82
+ seed: int = -1,
83
+ width: int = 512,
84
+ height: int = 512,
85
+ progress: gr.Progress = gr.Progress()
86
+ ) -> Tuple[Optional[Image.Image], Optional[Image.Image]]:
87
+ """Generate image with progressive latent space sampling"""
88
+
89
+ try:
90
+ if not prompt.strip():
91
+ raise gr.Error("Please enter a prompt")
92
+
93
+ # Set seed
94
+ if seed == -1:
95
+ seed = np.random.randint(0, 2**32 - 1)
96
+
97
+ progress(0.1, desc="Initializing model...")
98
+
99
+ # Generate with progressive latents
100
+ final_image, progress_images = model.generate_with_progressive_latents(
101
+ prompt=prompt,
102
+ negative_prompt=negative_prompt if negative_prompt.strip() else None,
103
+ num_inference_steps=num_inference_steps,
104
+ guidance_scale=guidance_scale,
105
+ seed=seed,
106
+ width=width,
107
+ height=height
108
+ )
109
+
110
+ progress(0.5, desc="Generating with progressive latents...")
111
+
112
+ # Create a combined progress visualization
113
+ progress(0.9, desc="Finalizing image...")
114
+
115
+ # Create progress grid (simplified - in production show actual progress)
116
+ progress_grid = create_progress_grid(progress_images)
117
+
118
+ progress(1.0, desc="Complete!")
119
+
120
+ return final_image, progress_grid
121
+
122
+ except Exception as e:
123
+ gr.Error(f"Generation failed: {str(e)}")
124
+ return None, None
125
+
126
+ def create_progress_grid(images: List[Image.Image]) -> Image.Image:
127
+ """Create a grid showing generation progress"""
128
+ if not images:
129
+ return Image.new('RGB', (512, 512), color='white')
130
+
131
+ # Sample images for grid (every nth image)
132
+ sample_step = max(1, len(images) // 8)
133
+ sampled_images = images[::sample_step][:8]
134
+
135
+ # Create grid
136
+ grid_width = len(sampled_images) * 64
137
+ grid_height = 64
138
+ grid = Image.new('RGB', (grid_width, grid_height), color='white')
139
+
140
+ for i, img in enumerate(sampled_images):
141
+ # Resize to fit grid
142
+ resized = img.resize((64, 64), Image.Resampling.LANCZOS)
143
+ grid.paste(resized, (i * 64, 0))
144
+
145
+ return grid
146
+
147
+ def update_info():
148
+ """Update model info"""
149
+ info = {
150
+ "Model": "Qwen-Image with Lighting LoRA",
151
+ "Sampling": "Progressive Latent Space",
152
+ "Device": model.device,
153
+ "Status": "Ready" if model.model_loaded else "Not Loaded"
154
+ }
155
+ return json.dumps(info, indent=2)
156
+
157
+ # Custom CSS for better styling
158
+ custom_css = """
159
+ .progress-container {
160
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
161
+ border-radius: 10px;
162
+ padding: 20px;
163
+ margin: 10px 0;
164
+ }
165
+ .image-container {
166
+ border: 2px solid #e1e5e9;
167
+ border-radius: 10px;
168
+ padding: 10px;
169
+ background: white;
170
+ }
171
+ .main-header {
172
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
173
+ -webkit-background-clip: text;
174
+ -webkit-text-fill-color: transparent;
175
+ background-clip: text;
176
+ }
177
+ """
178
+
179
+ # Create Gradio interface
180
+ with gr.Blocks() as demo:
181
+ # Header
182
+ gr.HTML("""
183
+ <div style="text-align: center; margin-bottom: 30px;">
184
+ <h1 class="main-header" style="font-size: 2.5em; font-weight: bold; margin-bottom: 10px;">
185
+ Qwen-Image with Lighting LoRA
186
+ </h1>
187
+ <p style="font-size: 1.1em; color: #666;">
188
+ Progressive Latent Space Sampling • First half: 50% latent size • Second half: Full latent size
189
+ </p>
190
+ <p style="font-size: 0.9em; margin-top: 10px;">
191
+ <a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank" style="color: #667eea;">
192
+ Built with anycoder
193
+ </a>
194
+ </p>
195
+ </div>
196
+ """)
197
+
198
+ with gr.Row():
199
+ # Left column - Controls
200
+ with gr.Column(scale=1):
201
+ gr.Markdown("### Generation Settings")
202
+
203
+ # Basic inputs
204
+ prompt = gr.Textbox(
205
+ label="Prompt",
206
+ placeholder="Describe the image you want to generate...",
207
+ lines=3,
208
+ max_lines=5
209
+ )
210
+
211
+ negative_prompt = gr.Textbox(
212
+ label="Negative Prompt",
213
+ placeholder="What to avoid in the image...",
214
+ lines=2,
215
+ max_lines=3
216
+ )
217
+
218
+ # Advanced settings in accordion
219
+ with gr.Accordion("Advanced Settings", open=False):
220
+ with gr.Row():
221
+ num_steps = gr.Slider(
222
+ label="Inference Steps",
223
+ minimum=20,
224
+ maximum=100,
225
+ value=50,
226
+ step=1,
227
+ info="More steps = better quality"
228
+ )
229
+
230
+ guidance_scale = gr.Slider(
231
+ label="Guidance Scale",
232
+ minimum=1.0,
233
+ maximum=20.0,
234
+ value=7.5,
235
+ step=0.5,
236
+ info="Higher = more prompt adherence"
237
+ )
238
+
239
+ with gr.Row():
240
+ width = gr.Dropdown(
241
+ label="Width",
242
+ choices=[256, 512, 768, 1024],
243
+ value=512
244
+ )
245
+
246
+ height = gr.Dropdown(
247
+ label="Height",
248
+ choices=[256, 512, 768, 1024],
249
+ value=512
250
+ )
251
+
252
+ seed = gr.Number(
253
+ label="Seed (-1 for random)",
254
+ value=-1,
255
+ precision=0
256
+ )
257
+
258
+ # Generate button
259
+ generate_btn = gr.Button(
260
+ "Generate Image",
261
+ variant="primary",
262
+ size="lg",
263
+ elem_classes=["generate-button"]
264
+ )
265
+
266
+ # Model info
267
+ with gr.Accordion("Model Information", open=False):
268
+ model_info = gr.JSON(label="Model Status")
269
+ update_info_btn = gr.Button("Refresh Status", size="sm")
270
+
271
+ # Right column - Outputs
272
+ with gr.Column(scale=2):
273
+ gr.Markdown("### Generated Results")
274
+
275
+ # Main output
276
+ with gr.Group(elem_classes=["image-container"]):
277
+ output_image = gr.Image(
278
+ label="Generated Image",
279
+ type="pil",
280
+ height=400
281
+ )
282
+
283
+ # Progress visualization
284
+ with gr.Group(elem_classes=["image-container"]):
285
+ progress_image = gr.Image(
286
+ label="Generation Progress (showing progressive latents)",
287
+ type="pil",
288
+ height=100
289
+ )
290
+
291
+ # Examples
292
+ gr.Markdown("### Examples")
293
+ examples = [
294
+ ["A mystical forest with glowing mushrooms and ethereal lighting", "blurry, low quality", 50, 7.5, -1, 512, 512],
295
+ ["A dramatic portrait with cinematic lighting", "cartoon, anime", 40, 8.0, 42, 768, 768],
296
+ ["An architectural interior with natural light streaming through windows", "dark, artificial lighting", 60, 6.5, -1, 512, 512],
297
+ ["A fantasy landscape with magical lighting effects", "realistic, photographic", 45, 9.0, 123, 1024, 512]
298
+ ]
299
+
300
+ gr.Examples(
301
+ examples=examples,
302
+ inputs=[prompt, negative_prompt, num_steps, guidance_scale, seed, width, height],
303
+ outputs=[output_image, progress_image],
304
+ fn=generate_image,
305
+ cache_examples=True,
306
+ examples_per_page=4
307
+ )
308
+
309
+ # Event handlers
310
+ generate_btn.click(
311
+ fn=generate_image,
312
+ inputs=[prompt, negative_prompt, num_steps, guidance_scale, seed, width, height],
313
+ outputs=[output_image, progress_image],
314
+ api_visibility="public"
315
+ )
316
+
317
+ update_info_btn.click(
318
+ fn=update_info,
319
+ outputs=[model_info],
320
+ api_visibility="private"
321
+ )
322
+
323
+ # Load initial info
324
+ demo.load(
325
+ fn=update_info,
326
+ outputs=[model_info],
327
+ api_visibility="private"
328
+ )
329
+
330
+ # Launch the app
331
+ demo.launch(
332
+ theme=gr.themes.Soft(
333
+ primary_hue="purple",
334
+ secondary_hue="blue",
335
+ neutral_hue="slate",
336
+ font=gr.themes.GoogleFont("Inter"),
337
+ text_size="lg",
338
+ spacing_size="lg",
339
+ radius_size="md"
340
+ ).set(
341
+ button_primary_background_fill="*primary_600",
342
+ button_primary_background_fill_hover="*primary_700",
343
+ block_title_text_weight="600",
344
+ block_border_width="1px",
345
+ block_border_color="*neutral_200"
346
+ ),
347
+ css=custom_css,
348
+ footer_links=[
349
+ {"label": "Built with anycoder", "url": "https://huggingface.co/spaces/akhaliq/anycoder"}
350
+ ]
351
+ )
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ Pillow
2
+ gradio>=6.0
3
+ numpy
4
+ torch
5
+ torchvision
6
+ torchaudio
7
+ requests
8
+ opencv-python
9
+ matplotlib
10
+ scipy