akhaliq HF Staff commited on
Commit
990705c
·
verified ·
1 Parent(s): e8ce171

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. app.py +54 -110
  2. requirements.txt +14 -5
app.py CHANGED
@@ -2,76 +2,37 @@ import gradio as gr
2
  import torch
3
  import spaces
4
  from diffusers import Flux2Pipeline
5
- from huggingface_hub import get_token
6
- import requests
7
- import io
8
 
9
  # Model configuration
10
- repo_id = "diffusers/FLUX.2-dev-bnb-4bit"
11
  torch_dtype = torch.bfloat16
12
 
13
- # Global pipeline variable
14
- pipe = None
15
-
16
- def download_model(progress=gr.Progress()):
17
- """Download and cache the model before inference"""
18
- global pipe
19
- if pipe is not None:
20
- return "✓ Model already loaded and ready!"
21
-
22
- progress(0.1, desc="Initializing download...")
23
- progress(0.3, desc="Downloading model weights...")
24
-
25
- # Download model to CPU first (no GPU needed for download)
26
- pipe = Flux2Pipeline.from_pretrained(
27
- repo_id, text_encoder=None, torch_dtype=torch_dtype
28
- )
29
-
30
- progress(1.0, desc="Download complete!")
31
- return "✓ Model downloaded successfully! Ready to generate images."
32
-
33
- def remote_text_encoder(prompts):
34
- response = requests.post(
35
- "https://remote-text-encoder-flux-2.huggingface.co/predict",
36
- json={"prompt": prompts},
37
- headers={
38
- "Authorization": f"Bearer {get_token()}",
39
- "Content-Type": "application/json"
40
- }
41
- )
42
- prompt_embeds = torch.load(io.BytesIO(response.content))
43
- return prompt_embeds.to("cuda")
44
 
45
  @spaces.GPU(duration=120)
46
- def generate_image(prompt, progress=gr.Progress()):
47
- global pipe
48
-
49
  if not prompt:
50
  gr.Warning("Please enter a prompt")
51
  return None
52
 
53
- if pipe is None:
54
- gr.Warning("Please download the model first by clicking 'Download Model'")
55
- return None
56
-
57
- progress(0.1, desc="Moving model to GPU...")
58
- pipe = pipe.to("cuda")
59
-
60
- progress(0.3, desc="Encoding prompt...")
61
- prompt_embeds = remote_text_encoder(prompt)
62
 
63
- generator = torch.Generator(device="cuda").manual_seed(42)
64
 
65
- progress(0.5, desc="Generating...")
66
 
67
  image = pipe(
68
- prompt_embeds=prompt_embeds,
69
  generator=generator,
70
- num_inference_steps=50,
71
- guidance_scale=4.0,
72
  ).images[0]
73
 
74
- progress(1.0, desc="Done")
75
  return image
76
 
77
  # Minimal Apple-inspired CSS
@@ -150,33 +111,6 @@ apple_css = """
150
  background: #0077ed !important;
151
  }
152
 
153
- .download-btn {
154
- background: #34c759 !important;
155
- border: none !important;
156
- border-radius: 980px !important;
157
- padding: 16px 32px !important;
158
- font-size: 17px !important;
159
- font-weight: 400 !important;
160
- color: white !important;
161
- cursor: pointer !important;
162
- transition: all 0.2s ease !important;
163
- margin-top: 16px !important;
164
- }
165
-
166
- .download-btn:hover {
167
- background: #30d158 !important;
168
- }
169
-
170
- .status-box {
171
- background: #f5f5f7 !important;
172
- border: none !important;
173
- border-radius: 12px !important;
174
- padding: 16px !important;
175
- font-size: 15px !important;
176
- color: #1d1d1f !important;
177
- text-align: center !important;
178
- }
179
-
180
  .output-section {
181
  margin-top: 40px;
182
  }
@@ -204,7 +138,7 @@ apple_css = """
204
  }
205
 
206
  /* Hide labels for cleaner look */
207
- .input-section label, .output-section label {
208
  display: none !important;
209
  }
210
 
@@ -219,42 +153,59 @@ apple_css = """
219
  padding: 0 !important;
220
  }
221
 
222
- .button-row {
223
- display: flex;
224
- gap: 12px;
 
 
 
 
 
 
 
 
225
  }
226
  """
227
 
228
- with gr.Blocks(title="Image Studio") as demo:
229
 
230
  gr.HTML("""
231
  <div class="header-text">
232
- <h1>Image Studio</h1>
233
- <p>Describe what you imagine.</p>
234
  </div>
235
  """)
236
 
237
  with gr.Column(elem_classes="input-section"):
238
- status_text = gr.Textbox(
239
- value="⏳ Model not loaded. Click 'Download Model' to start.",
240
- show_label=False,
241
- interactive=False,
242
- elem_classes="status-box"
243
- )
244
-
245
- download_btn = gr.Button(
246
- "Download Model",
247
- elem_classes="download-btn",
248
- variant="secondary"
249
- )
250
-
251
  prompt = gr.Textbox(
252
  placeholder="A peaceful mountain lake at sunrise...",
253
- lines=2,
254
  show_label=False,
255
  container=False
256
  )
257
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
258
  generate_btn = gr.Button(
259
  "Create",
260
  elem_classes="generate-btn",
@@ -274,23 +225,16 @@ with gr.Blocks(title="Image Studio") as demo:
274
  </div>
275
  """)
276
 
277
- download_btn.click(
278
- fn=download_model,
279
- inputs=[],
280
- outputs=[status_text],
281
- api_visibility="private"
282
- )
283
-
284
  generate_btn.click(
285
  fn=generate_image,
286
- inputs=[prompt],
287
  outputs=[output_image],
288
  api_visibility="public"
289
  )
290
 
291
  prompt.submit(
292
  fn=generate_image,
293
- inputs=[prompt],
294
  outputs=[output_image],
295
  api_visibility="private"
296
  )
 
2
  import torch
3
  import spaces
4
  from diffusers import Flux2Pipeline
 
 
 
5
 
6
  # Model configuration
7
+ repo_id = "black-forest-labs/FLUX.2-dev"
8
  torch_dtype = torch.bfloat16
9
 
10
+ # Download and initialize model at startup
11
+ pipe = Flux2Pipeline.from_pretrained(
12
+ repo_id, torch_dtype=torch_dtype
13
+ )
14
+ pipe.enable_model_cpu_offload()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
  @spaces.GPU(duration=120)
17
+ def generate_image(prompt, seed, steps, guidance_scale, progress=gr.Progress()):
 
 
18
  if not prompt:
19
  gr.Warning("Please enter a prompt")
20
  return None
21
 
22
+ progress(0.2, desc="Preparing generation...")
 
 
 
 
 
 
 
 
23
 
24
+ generator = torch.Generator(device="cpu").manual_seed(int(seed))
25
 
26
+ progress(0.4, desc="Generating image...")
27
 
28
  image = pipe(
29
+ prompt=prompt,
30
  generator=generator,
31
+ num_inference_steps=int(steps),
32
+ guidance_scale=guidance_scale,
33
  ).images[0]
34
 
35
+ progress(1.0, desc="Done!")
36
  return image
37
 
38
  # Minimal Apple-inspired CSS
 
111
  background: #0077ed !important;
112
  }
113
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
  .output-section {
115
  margin-top: 40px;
116
  }
 
138
  }
139
 
140
  /* Hide labels for cleaner look */
141
+ .input-section label {
142
  display: none !important;
143
  }
144
 
 
153
  padding: 0 !important;
154
  }
155
 
156
+ .settings-section {
157
+ background: #f5f5f7 !important;
158
+ border-radius: 16px !important;
159
+ padding: 20px !important;
160
+ margin-top: 16px !important;
161
+ }
162
+
163
+ .settings-section label {
164
+ color: #1d1d1f !important;
165
+ font-size: 14px !important;
166
+ font-weight: 500 !important;
167
  }
168
  """
169
 
170
+ with gr.Blocks(title="FLUX.2 Image Studio") as demo:
171
 
172
  gr.HTML("""
173
  <div class="header-text">
174
+ <h1>FLUX.2 Studio</h1>
175
+ <p>Create stunning images with FLUX.2-dev</p>
176
  </div>
177
  """)
178
 
179
  with gr.Column(elem_classes="input-section"):
 
 
 
 
 
 
 
 
 
 
 
 
 
180
  prompt = gr.Textbox(
181
  placeholder="A peaceful mountain lake at sunrise...",
182
+ lines=3,
183
  show_label=False,
184
  container=False
185
  )
186
 
187
+ with gr.Accordion("Advanced Settings", open=False):
188
+ with gr.Row():
189
+ seed = gr.Number(
190
+ label="Seed",
191
+ value=42,
192
+ precision=0
193
+ )
194
+ steps = gr.Slider(
195
+ label="Inference Steps",
196
+ minimum=10,
197
+ maximum=100,
198
+ value=50,
199
+ step=1
200
+ )
201
+ guidance_scale = gr.Slider(
202
+ label="Guidance Scale",
203
+ minimum=1.0,
204
+ maximum=10.0,
205
+ value=4.0,
206
+ step=0.5
207
+ )
208
+
209
  generate_btn = gr.Button(
210
  "Create",
211
  elem_classes="generate-btn",
 
225
  </div>
226
  """)
227
 
 
 
 
 
 
 
 
228
  generate_btn.click(
229
  fn=generate_image,
230
+ inputs=[prompt, seed, steps, guidance_scale],
231
  outputs=[output_image],
232
  api_visibility="public"
233
  )
234
 
235
  prompt.submit(
236
  fn=generate_image,
237
+ inputs=[prompt, seed, steps, guidance_scale],
238
  outputs=[output_image],
239
  api_visibility="private"
240
  )
requirements.txt CHANGED
@@ -1,15 +1,24 @@
1
  spaces
2
- huggingface_hub
3
  gradio
4
  torch
5
- requests
6
  git+https://github.com/huggingface/diffusers
7
  git+https://github.com/huggingface/transformers
8
  sentencepiece
9
  accelerate
10
  tokenizers
 
 
 
11
  torchvision
12
  torchaudio
13
- Pillow
14
- datasets
15
- bitsandbytes
 
 
 
 
 
 
 
 
 
1
  spaces
 
2
  gradio
3
  torch
 
4
  git+https://github.com/huggingface/diffusers
5
  git+https://github.com/huggingface/transformers
6
  sentencepiece
7
  accelerate
8
  tokenizers
9
+ datasets
10
+ requests
11
+ Pillow
12
  torchvision
13
  torchaudio
14
+ numpy
15
+ scipy
16
+ opencv-python
17
+ matplotlib
18
+ safetensors
19
+ omegaconf
20
+ ftfy
21
+ regex
22
+ timm
23
+ xformers
24
+ invisible-watermark