fantos commited on
Commit
b4de204
·
verified ·
1 Parent(s): ac28453

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +187 -205
app.py CHANGED
@@ -1,253 +1,235 @@
1
- import spaces
2
- import argparse
3
  import os
4
- import time
5
- from os import path
6
- from safetensors.torch import load_file
7
- from huggingface_hub import hf_hub_download
8
-
9
- cache_path = path.join(path.dirname(path.abspath(__file__)), "models")
10
- # TRANSFORMERS_CACHE is deprecated, only use HF_HOME
11
- os.environ["HF_HUB_CACHE"] = cache_path
12
- os.environ["HF_HOME"] = cache_path
13
-
14
  import gradio as gr
 
 
 
15
  import torch
16
-
17
- # Try to handle version compatibility issues
18
- try:
19
- from diffusers import FluxPipeline
20
- except ImportError as e:
21
- print(f"Error importing FluxPipeline: {e}")
22
- print("Attempting to use StableDiffusionPipeline as fallback...")
23
- from diffusers import StableDiffusionPipeline as FluxPipeline
24
-
25
- torch.backends.cuda.matmul.allow_tf32 = True
26
-
27
- class timer:
28
- def __init__(self, method_name="timed process"):
29
- self.method = method_name
30
- def __enter__(self):
31
- self.start = time.time()
32
- print(f"{self.method} starts")
33
- def __exit__(self, exc_type, exc_val, exc_tb):
34
- end = time.time()
35
- print(f"{self.method} took {str(round(end - self.start, 2))}s")
36
-
37
- if not path.exists(cache_path):
38
- os.makedirs(cache_path, exist_ok=True)
39
-
40
- pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16)
41
- pipe.load_lora_weights(hf_hub_download("ByteDance/Hyper-SD", "Hyper-FLUX.1-dev-8steps-lora.safetensors"))
42
- pipe.fuse_lora(lora_scale=0.125)
43
- pipe.to(device="cuda", dtype=torch.bfloat16)
44
-
45
- # Custom CSS for gradient effects and visual enhancements
46
- custom_css = """
47
- .container {
48
- max-width: 1200px;
49
- margin: 0 auto;
50
- padding: 20px;
51
- }
52
-
53
- .gradio-container {
54
- background: linear-gradient(135deg, #667eea 0%, #764ba2 50%, #f093fb 100%);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  min-height: 100vh;
56
  }
57
 
58
- .main-content {
59
- background: rgba(255, 255, 255, 0.95);
60
- border-radius: 20px;
61
- padding: 30px;
62
- box-shadow: 0 20px 40px rgba(0, 0, 0, 0.1);
63
- backdrop-filter: blur(10px);
64
- }
65
-
66
- h1 {
67
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
68
- -webkit-background-clip: text;
69
- -webkit-text-fill-color: transparent;
70
- background-clip: text;
71
- text-align: center;
72
- font-size: 3rem !important;
73
- font-weight: 800 !important;
74
- margin-bottom: 1rem !important;
75
- text-shadow: 2px 2px 4px rgba(0, 0, 0, 0.1);
76
  }
77
 
78
- .subtitle {
79
- text-align: center;
80
- color: #666;
81
- font-size: 1.2rem;
82
- margin-bottom: 2rem;
 
 
 
83
  }
84
 
85
  .gr-button-primary {
86
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
87
- border: none !important;
88
  color: white !important;
89
- font-weight: bold !important;
90
- font-size: 1.1rem !important;
91
- padding: 12px 30px !important;
92
- border-radius: 10px !important;
93
  transition: all 0.3s ease !important;
94
- box-shadow: 0 4px 15px rgba(102, 126, 234, 0.3) !important;
95
  }
96
 
97
  .gr-button-primary:hover {
 
98
  transform: translateY(-2px) !important;
99
- box-shadow: 0 6px 20px rgba(102, 126, 234, 0.4) !important;
100
  }
101
 
102
- .gr-input, .gr-box {
103
- border-radius: 10px !important;
104
- border: 2px solid #e0e0e0 !important;
105
- transition: all 0.3s ease !important;
 
 
 
 
 
106
  }
107
 
108
- .gr-input:focus {
109
- border-color: #667eea !important;
110
- box-shadow: 0 0 0 3px rgba(102, 126, 234, 0.1) !important;
111
  }
112
 
113
- .gr-form {
114
- background: white !important;
115
- border-radius: 15px !important;
116
- padding: 20px !important;
117
- box-shadow: 0 4px 10px rgba(0, 0, 0, 0.05) !important;
118
  }
119
 
120
- .gr-padded {
121
- padding: 15px !important;
 
122
  }
123
 
124
- .badge-container {
125
- display: flex;
126
- justify-content: center;
127
- gap: 12px;
128
- margin: 20px 0;
129
  }
130
 
131
- .how-to-use {
132
- background: linear-gradient(135deg, #f5f7fa 0%, #c3cfe2 100%);
133
- border-radius: 15px;
134
- padding: 25px;
135
- margin-top: 30px;
136
- box-shadow: 0 4px 10px rgba(0, 0, 0, 0.05);
137
  }
138
 
139
- .how-to-use h2 {
140
- color: #667eea;
141
- font-size: 1.8rem;
142
- margin-bottom: 1rem;
143
  }
144
 
145
- .how-to-use ol {
146
- color: #555;
147
- line-height: 1.8;
148
  }
149
 
150
- .how-to-use li {
151
- margin-bottom: 10px;
 
152
  }
153
 
154
- .tip {
155
- background: rgba(102, 126, 234, 0.1);
156
- border-left: 4px solid #667eea;
157
- padding: 15px;
158
- margin-top: 20px;
159
- border-radius: 5px;
160
- color: #555;
161
- font-style: italic;
162
  }
163
  """
164
 
165
- with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
166
- with gr.Column(elem_classes="main-content"):
167
- gr.HTML(
168
- """
169
- <div style="text-align: center; max-width: 800px; margin: 0 auto;">
170
- <h1>FLUX Fast & Furious</h1>
171
- <p class="subtitle">Lightning-fast image generation powered by Hyper-FLUX LoRA</p>
172
- </div>
173
- """
174
- )
175
-
176
- gr.HTML(
177
- """
178
- <div class='badge-container'>
179
- <a href="https://huggingface.co/spaces/openfree/Best-AI" target="_blank">
180
- <img src="https://img.shields.io/static/v1?label=OpenFree&message=BEST%20AI%20Services&color=%230000ff&labelColor=%23000080&logo=huggingface&logoColor=%23ffa500&style=for-the-badge" alt="OpenFree badge">
181
- </a>
182
 
183
- <a href="https://discord.gg/openfreeai" target="_blank">
184
- <img src="https://img.shields.io/static/v1?label=Discord&message=Openfree%20AI&color=%230000ff&labelColor=%23800080&logo=discord&logoColor=white&style=for-the-badge" alt="Discord badge">
185
- </a>
186
- </div>
187
- """
188
- )
189
-
190
  with gr.Row():
191
- with gr.Column(scale=3):
192
- with gr.Group():
193
- prompt = gr.Textbox(
194
- label="✨ Your Image Description",
195
- placeholder="E.g., A serene landscape with mountains and a lake at sunset",
196
- lines=3
 
 
 
 
 
 
 
 
 
 
 
 
 
197
  )
 
 
 
 
198
 
199
- with gr.Accordion("🎨 Advanced Settings", open=False):
200
- with gr.Group():
201
- with gr.Row():
202
- height = gr.Slider(label="Height", minimum=256, maximum=1152, step=64, value=1024)
203
- width = gr.Slider(label="Width", minimum=256, maximum=1152, step=64, value=1024)
204
-
205
- with gr.Row():
206
- steps = gr.Slider(label="Inference Steps", minimum=6, maximum=25, step=1, value=8)
207
- scales = gr.Slider(label="Guidance Scale", minimum=0.0, maximum=5.0, step=0.1, value=3.5)
208
-
209
- seed = gr.Number(label="Seed (for reproducibility)", value=3413, precision=0)
210
 
211
- generate_btn = gr.Button("🚀 Generate Image", variant="primary", scale=1)
 
 
 
 
 
 
 
 
 
 
 
212
 
213
- with gr.Column(scale=4):
214
- output = gr.Image(label="🎨 Your Generated Image")
215
-
216
- gr.HTML(
217
- """
218
- <div class="how-to-use">
219
- <h2>📖 How to Use</h2>
220
- <ol>
221
- <li>✍️ Enter a detailed description of the image you want to create</li>
222
- <li>⚙️ Adjust advanced settings if desired (tap to expand)</li>
223
- <li>🎯 Tap "Generate Image" and watch the magic happen!</li>
224
- </ol>
225
- <div class="tip">
226
- 💡 <strong>Pro Tip:</strong> Be specific in your description for best results! Include details about style, mood, colors, and composition.
227
- </div>
228
- </div>
229
- """
230
- )
231
-
232
- @spaces.GPU
233
- def process_image(height, width, steps, scales, prompt, seed):
234
- global pipe
235
- with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16), timer("inference"):
236
- return pipe(
237
- prompt=[prompt],
238
- generator=torch.Generator().manual_seed(int(seed)),
239
- num_inference_steps=int(steps),
240
- guidance_scale=float(scales),
241
- height=int(height),
242
- width=int(width),
243
- max_sequence_length=256
244
- ).images[0]
245
-
246
- generate_btn.click(
247
- process_image,
248
- inputs=[height, width, steps, scales, prompt, seed],
249
- outputs=output
250
  )
251
 
252
- if __name__ == "__main__":
253
- demo.launch()
 
 
 
1
  import os
 
 
 
 
 
 
 
 
 
 
2
  import gradio as gr
3
+ import numpy as np
4
+ import random
5
+ import spaces
6
  import torch
7
+ from diffusers.pipelines.glm_image import GlmImagePipeline
8
+ from PIL import Image
9
+
10
+ dtype = torch.bfloat16
11
+ device = "cuda" if torch.cuda.is_available() else "cpu"
12
+
13
+ MAX_SEED = np.iinfo(np.int32).max
14
+ MAX_IMAGE_SIZE = 2048
15
+
16
+ pipe = GlmImagePipeline.from_pretrained(
17
+ "zai-org/GLM-Image",
18
+ torch_dtype=torch.bfloat16,
19
+ ).to("cuda")
20
+
21
+
22
+ @spaces.GPU(duration=120)
23
+ def infer(prompt, input_images=None, seed=42, randomize_seed=False, width=1024, height=1024,
24
+ num_inference_steps=50, guidance_scale=1.5, progress=gr.Progress(track_tqdm=True)):
25
+ if randomize_seed:
26
+ seed = random.randint(0, MAX_SEED)
27
+
28
+ width = (width // 32) * 32
29
+ height = (height // 32) * 32
30
+
31
+ generator = torch.Generator(device="cuda").manual_seed(seed)
32
+
33
+ image_list = None
34
+ if input_images is not None and len(input_images) > 0:
35
+ image_list = []
36
+ for item in input_images:
37
+ img = item[0] if isinstance(item, tuple) else item
38
+ if isinstance(img, str):
39
+ img = Image.open(img).convert("RGB")
40
+ elif isinstance(img, Image.Image):
41
+ img = img.convert("RGB")
42
+ image_list.append(img)
43
+
44
+ pipe_kwargs = {
45
+ "prompt": prompt,
46
+ "height": height,
47
+ "width": width,
48
+ "num_inference_steps": num_inference_steps,
49
+ "guidance_scale": guidance_scale,
50
+ "generator": generator,
51
+ }
52
+
53
+ if image_list is not None:
54
+ pipe_kwargs["image"] = image_list
55
+
56
+ image = pipe(**pipe_kwargs).images[0]
57
+
58
+ return image, seed
59
+
60
+
61
+ def update_dimensions_from_image(image_list):
62
+ if image_list is None or len(image_list) == 0:
63
+ return 1024, 1024
64
+
65
+ item = image_list[0]
66
+ img = item[0] if isinstance(item, tuple) else item
67
+
68
+ if isinstance(img, str):
69
+ img = Image.open(img)
70
+
71
+ img_width, img_height = img.size
72
+ aspect_ratio = img_width / img_height
73
+
74
+ if aspect_ratio >= 1:
75
+ new_width = 1024
76
+ new_height = int(1024 / aspect_ratio)
77
+ else:
78
+ new_height = 1024
79
+ new_width = int(1024 * aspect_ratio)
80
+
81
+ new_width = round(new_width / 32) * 32
82
+ new_height = round(new_height / 32) * 32
83
+
84
+ new_width = max(256, min(MAX_IMAGE_SIZE, new_width))
85
+ new_height = max(256, min(MAX_IMAGE_SIZE, new_height))
86
+
87
+ return new_width, new_height
88
+
89
+ css = """
90
+ /* Glassmorphism Style */
91
+ body, .gradio-container {
92
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 50%, #f093fb 100%) !important;
93
  min-height: 100vh;
94
  }
95
 
96
+ #col-container {
97
+ margin: 0 auto;
98
+ max-width: 1200px;
99
+ padding: 20px;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
  }
101
 
102
+ .gr-panel, .gr-box, .gr-form, .gr-input, .gr-button,
103
+ .block, .form, .container, [class*="panel"] {
104
+ background: rgba(255, 255, 255, 0.15) !important;
105
+ backdrop-filter: blur(20px) !important;
106
+ -webkit-backdrop-filter: blur(20px) !important;
107
+ border: 1px solid rgba(255, 255, 255, 0.3) !important;
108
+ border-radius: 16px !important;
109
+ box-shadow: 0 8px 32px rgba(0, 0, 0, 0.2) !important;
110
  }
111
 
112
  .gr-button-primary {
113
+ background: linear-gradient(135deg, rgba(102, 126, 234, 0.8), rgba(118, 75, 162, 0.8)) !important;
114
+ border: 1px solid rgba(255, 255, 255, 0.4) !important;
115
  color: white !important;
116
+ font-weight: 600 !important;
 
 
 
117
  transition: all 0.3s ease !important;
 
118
  }
119
 
120
  .gr-button-primary:hover {
121
+ background: linear-gradient(135deg, rgba(102, 126, 234, 1), rgba(118, 75, 162, 1)) !important;
122
  transform: translateY(-2px) !important;
123
+ box-shadow: 0 12px 40px rgba(102, 126, 234, 0.4) !important;
124
  }
125
 
126
+ textarea, input[type="text"], input[type="number"] {
127
+ background: rgba(255, 255, 255, 0.1) !important;
128
+ border: 1px solid rgba(255, 255, 255, 0.25) !important;
129
+ border-radius: 12px !important;
130
+ color: white !important;
131
+ }
132
+
133
+ textarea::placeholder, input::placeholder {
134
+ color: rgba(255, 255, 255, 0.6) !important;
135
  }
136
 
137
+ label, .label-wrap, span, p {
138
+ color: rgba(255, 255, 255, 0.9) !important;
 
139
  }
140
 
141
+ h1, h2, h3, .markdown h1 {
142
+ color: white !important;
143
+ text-shadow: 0 2px 10px rgba(0, 0, 0, 0.3) !important;
 
 
144
  }
145
 
146
+ .gallery-container img {
147
+ object-fit: contain;
148
+ border-radius: 12px !important;
149
  }
150
 
151
+ input[type="range"] {
152
+ accent-color: #667eea !important;
 
 
 
153
  }
154
 
155
+ .gr-accordion {
156
+ background: rgba(255, 255, 255, 0.1) !important;
157
+ border-radius: 12px !important;
 
 
 
158
  }
159
 
160
+ ::-webkit-scrollbar {
161
+ width: 8px;
 
 
162
  }
163
 
164
+ ::-webkit-scrollbar-track {
165
+ background: rgba(255, 255, 255, 0.1);
166
+ border-radius: 4px;
167
  }
168
 
169
+ ::-webkit-scrollbar-thumb {
170
+ background: rgba(255, 255, 255, 0.3);
171
+ border-radius: 4px;
172
  }
173
 
174
+ .image-container, .image-frame {
175
+ border-radius: 16px !important;
176
+ overflow: hidden !important;
 
 
 
 
 
177
  }
178
  """
179
 
180
+ with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
181
+
182
+ with gr.Column(elem_id="col-container"):
183
+ gr.Markdown("# ✨ GLM-Image Generator")
 
 
 
 
 
 
 
 
 
 
 
 
 
184
 
 
 
 
 
 
 
 
185
  with gr.Row():
186
+ with gr.Column():
187
+ prompt = gr.Text(
188
+ label="Prompt",
189
+ show_label=False,
190
+ max_lines=4,
191
+ placeholder="Enter your prompt...",
192
+ container=False,
193
+ scale=3
194
+ )
195
+
196
+ run_button = gr.Button("🎨 Generate", variant="primary", scale=1)
197
+
198
+ with gr.Accordion("📷 Input Images", open=True):
199
+ input_images = gr.Gallery(
200
+ label="Input Image(s)",
201
+ type="pil",
202
+ columns=3,
203
+ rows=1,
204
+ elem_classes="gallery-container"
205
  )
206
+
207
+ with gr.Accordion("⚙️ Settings", open=False):
208
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42)
209
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
210
 
211
+ with gr.Row():
212
+ width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024)
213
+ height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024)
 
 
 
 
 
 
 
 
214
 
215
+ with gr.Row():
216
+ num_inference_steps = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=50)
217
+ guidance_scale = gr.Slider(label="Guidance", minimum=0.0, maximum=10.0, step=0.1, value=1.5)
218
+
219
+ with gr.Column():
220
+ result = gr.Image(label="Result", show_label=False)
221
+
222
+ input_images.upload(
223
+ fn=update_dimensions_from_image,
224
+ inputs=[input_images],
225
+ outputs=[width, height]
226
+ )
227
 
228
+ gr.on(
229
+ triggers=[run_button.click, prompt.submit],
230
+ fn=infer,
231
+ inputs=[prompt, input_images, seed, randomize_seed, width, height, num_inference_steps, guidance_scale],
232
+ outputs=[result, seed]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
233
  )
234
 
235
+ demo.launch(css=css)