fantasticstar commited on
Commit
a247644
·
verified ·
1 Parent(s): e938f00

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -29
app.py CHANGED
@@ -11,16 +11,12 @@ import spaces
11
  from comfy import model_management
12
 
13
  CHROMA_VERSION = "chroma-unlocked-v48-detail-calibrated.safetensors"
14
- #LORA_VERSION = "DatasetTest4.safetensors"
15
  # Download required models
16
  t5_path = hf_hub_download(repo_id="comfyanonymous/flux_text_encoders", filename="t5xxl_fp8_e4m3fn.safetensors", local_dir="models/text_encoders/")
17
  vae_path = hf_hub_download(repo_id="lodestones/Chroma", filename="ae.safetensors", local_dir="models/vae")
18
  unet_path = hf_hub_download(repo_id="lodestones/Chroma", filename=CHROMA_VERSION, local_dir="models/unet")
19
 
20
- #vae_path = hf_hub_download(repo_id="lodestones/Chroma", filename="ae.safetensors", local_dir="models/vae")
21
- #unet_path = hf_hub_download(repo_id="silveroxides/Chroma-Misc-Models", filename=CHROMA_VERSION, local_dir="models/unet")
22
- #lora_path = hf_hub_download(repo_id="Alme995/DatasetTest_Loras_Chroma", filename=LORA_VERSION, local_dir="models/loras")
23
-
24
  # Example prompts with their parameters
25
  EXAMPLES = [
26
  [
@@ -138,7 +134,7 @@ from nodes import (
138
  CLIPTextEncode,
139
  CLIPLoader,
140
  VAEDecode,
141
- LoraLoaderModelOnly,
142
  VAELoader,
143
  SaveImage,
144
  )
@@ -160,20 +156,16 @@ saveimage = SaveImage()
160
 
161
  # Load models
162
  cliploader_78 = cliploader.load_clip(
163
- #clip_name="flan-t5-xxl_float8_e4m3fn_scaled_stochastic.safetensors", type="chroma", device="default"
164
  clip_name="t5xxl_fp8_e4m3fn.safetensors", type="chroma", device="default"
165
  )
166
  t5tokenizeroptions_82 = t5tokenizeroptions.set_options(
167
- min_padding=0,
168
- min_length=0,
169
- clip=get_value_at_index(cliploader_78, 0)
170
  )
171
  unetloader_76 = unetloader.load_unet(
172
- model_name=CHROMA_VERSION, chroma_hybrid_large=True, radiance_hybrid_large=False, chroma_hybrid_small=False, radiance_hybrid_small=False, wan=False, qwen=False
173
  )
174
  vaeloader_80 = vaeloader.load_vae(vae_name="ae.safetensors")
175
 
176
-
177
  # Add all the models that load a safetensors file
178
  model_loaders = [cliploader_78, unetloader_76, vaeloader_80]
179
 
@@ -244,7 +236,6 @@ def generate_image(prompt, negative_prompt, width, height, steps, cfg, seed):
244
  images=get_value_at_index(vaedecode_79, 0),
245
  )
246
 
247
-
248
  # Return the path to the saved image
249
  saved_path = f"output/{saved['ui']['images'][0]['filename']}"
250
  return saved_path
@@ -252,29 +243,25 @@ def generate_image(prompt, negative_prompt, width, height, steps, cfg, seed):
252
  # Create Gradio interface
253
  with gr.Blocks() as app:
254
  gr.Markdown(f"""
255
- # Chroma
256
-
257
- Model: [Chroma](https://huggingface.co/lodestones/Chroma) by [lodestones](https://huggingface.co/lodestones)
258
-
259
- Chroma1-HD-flash-heun : {CHROMA_VERSION}
260
-
261
- Run any ComfyUI Workflow on Spaces: [ComfyUI Workflows](https://huggingface.co/blog/run-comfyui-workflows-on-spaces)
262
-
263
- Space Author: [GitHub](https://github.com/gokayfem) | [X.com](https://x.com/gokayfem)
264
- """)
265
 
266
  with gr.Row():
267
  with gr.Column():
268
  prompt = gr.Textbox(
269
  label="Prompt",
270
  placeholder="Enter your prompt here...",
271
- value="Overlaid at the center of the image is a title text that says \"CHROMA1-FLASH-HEUN\" in a large white 3D letters. This is a close-up photograph from a nature documentary capturing the right side of the face of a tiger. The photograph is centered on its highly detailed and speckled eye surrounded by intricately detailed fur. Amateur photography. unfiltered. natural lighting. anatomically correct. subtle shadows. perfect composition. highest quality. detailed. sharp focus",
272
  lines=3
273
  )
274
  negative_prompt = gr.Textbox(
275
  label="Negative Prompt",
276
  placeholder="Enter negative prompt here...",
277
- value="This greyscale unfinished sketch has bad proportions, is featureless and disfigured. It is a blurry ugly mess and with excessive gaussian blur. It is riddled with watermarks and signatures. Everything is smudged with leaking colors and nonsensical orientation of objects. Messy and abstract image filled with artifacts disrupt the coherency of the overall composition. The image has extreme chromatic abberations and inconsistent lighting. Dull, monochrome colors and countless artistic errors.",
278
  lines=2
279
  )
280
 
@@ -297,16 +284,16 @@ Space Author: [GitHub](https://github.com/gokayfem) | [X.com](https://x.com/goka
297
  with gr.Row():
298
  steps = gr.Slider(
299
  minimum=1,
300
- maximum=50,
301
- value=12,
302
  step=1,
303
  label="Steps"
304
  )
305
  cfg = gr.Slider(
306
  minimum=1,
307
  maximum=20,
308
- value=1,
309
- step=0.01,
310
  label="CFG Scale"
311
  )
312
  seed = gr.Number(
 
11
  from comfy import model_management
12
 
13
  CHROMA_VERSION = "chroma-unlocked-v48-detail-calibrated.safetensors"
14
+
15
  # Download required models
16
  t5_path = hf_hub_download(repo_id="comfyanonymous/flux_text_encoders", filename="t5xxl_fp8_e4m3fn.safetensors", local_dir="models/text_encoders/")
17
  vae_path = hf_hub_download(repo_id="lodestones/Chroma", filename="ae.safetensors", local_dir="models/vae")
18
  unet_path = hf_hub_download(repo_id="lodestones/Chroma", filename=CHROMA_VERSION, local_dir="models/unet")
19
 
 
 
 
 
20
  # Example prompts with their parameters
21
  EXAMPLES = [
22
  [
 
134
  CLIPTextEncode,
135
  CLIPLoader,
136
  VAEDecode,
137
+ UNETLoader,
138
  VAELoader,
139
  SaveImage,
140
  )
 
156
 
157
  # Load models
158
  cliploader_78 = cliploader.load_clip(
 
159
  clip_name="t5xxl_fp8_e4m3fn.safetensors", type="chroma", device="default"
160
  )
161
  t5tokenizeroptions_82 = t5tokenizeroptions.set_options(
162
+ min_padding=1, min_length=0, clip=get_value_at_index(cliploader_78, 0)
 
 
163
  )
164
  unetloader_76 = unetloader.load_unet(
165
+ unet_name=CHROMA_VERSION, weight_dtype="fp8_e4m3fn"
166
  )
167
  vaeloader_80 = vaeloader.load_vae(vae_name="ae.safetensors")
168
 
 
169
  # Add all the models that load a safetensors file
170
  model_loaders = [cliploader_78, unetloader_76, vaeloader_80]
171
 
 
236
  images=get_value_at_index(vaedecode_79, 0),
237
  )
238
 
 
239
  # Return the path to the saved image
240
  saved_path = f"output/{saved['ui']['images'][0]['filename']}"
241
  return saved_path
 
243
  # Create Gradio interface
244
  with gr.Blocks() as app:
245
  gr.Markdown(f"""
246
+ # Chroma
247
+ Model: [Chroma](https://huggingface.co/lodestones/Chroma) by [lodestones](https://huggingface.co/lodestones)
248
+ Chroma Version: {CHROMA_VERSION}
249
+ Run any ComfyUI Workflow on Spaces: [ComfyUI Workflows](https://huggingface.co/blog/run-comfyui-workflows-on-spaces)
250
+ Compare Outputs: [DualView](https://dualview.ai) - Free side-by-side comparison tool
251
+ Space Author: [GitHub](https://github.com/gokayfem) | [X.com](https://x.com/gokayfem)
252
+ """)
 
 
 
253
 
254
  with gr.Row():
255
  with gr.Column():
256
  prompt = gr.Textbox(
257
  label="Prompt",
258
  placeholder="Enter your prompt here...",
 
259
  lines=3
260
  )
261
  negative_prompt = gr.Textbox(
262
  label="Negative Prompt",
263
  placeholder="Enter negative prompt here...",
264
+ value="low quality, ugly, unfinished, out of focus, deformed, disfigure, blurry, smudged, restricted palette, flat colors",
265
  lines=2
266
  )
267
 
 
284
  with gr.Row():
285
  steps = gr.Slider(
286
  minimum=1,
287
+ maximum=100,
288
+ value=26,
289
  step=1,
290
  label="Steps"
291
  )
292
  cfg = gr.Slider(
293
  minimum=1,
294
  maximum=20,
295
+ value=4,
296
+ step=0.5,
297
  label="CFG Scale"
298
  )
299
  seed = gr.Number(