LPX55 commited on
Commit
693f8ca
·
verified ·
1 Parent(s): 1d751a0

Update app_v2.py

Browse files
Files changed (1) hide show
  1. app_v2.py +3 -2
app_v2.py CHANGED
@@ -6,7 +6,7 @@ from diffusers.hooks import apply_group_offloading
6
  from diffusers import FluxControlNetModel, FluxControlNetPipeline, AutoencoderKL
7
  from diffusers import BitsAndBytesConfig as DiffusersBitsAndBytesConfig
8
  from transformers import T5EncoderModel
9
- from transformers import LlavaForConditionalGeneration, TextIteratorStreamer, AutoProcessor
10
  from transformers import BitsAndBytesConfig as TransformersBitsAndBytesConfig
11
  from liger_kernel.transformers import apply_liger_kernel_to_llama
12
  from PIL import Image
@@ -95,6 +95,7 @@ def generate_image(prompt, scale, steps, control_image, controlnet_conditioning_
95
  h = h - h % 32
96
  control_image = control_image.resize((int(w * scale), int(h * scale)), resample=2) # Resample.BILINEAR
97
  print("Size to: " + str(control_image.size[0]) + ", " + str(control_image.size[1]))
 
98
  with torch.inference_mode():
99
  image = pipe(
100
  generator=generator,
@@ -178,7 +179,7 @@ with gr.Blocks(title="FLUX Turbo Upscaler", fill_height=True) as iface:
178
  caption_button = gr.Button("Generate Caption", variant="secondary")
179
  with gr.Column(scale=1):
180
  seed = gr.Slider(0, MAX_SEED, value=42, label="Seed", step=1)
181
- steps = gr.Slider(2, 16, value=8, label="Steps")
182
  controlnet_conditioning_scale = gr.Slider(0, 1, value=0.6, label="ControlNet Scale")
183
  guidance_scale = gr.Slider(1, 30, value=3.5, label="Guidance Scale")
184
  guidance_end = gr.Slider(0, 1, value=1.0, label="Guidance End")
 
6
  from diffusers import FluxControlNetModel, FluxControlNetPipeline, AutoencoderKL
7
  from diffusers import BitsAndBytesConfig as DiffusersBitsAndBytesConfig
8
  from transformers import T5EncoderModel
9
+ from transformers import LlavaForConditionalGeneration, TextIteratorStreamer, AutoProcessor, AutoTokenizer
10
  from transformers import BitsAndBytesConfig as TransformersBitsAndBytesConfig
11
  from liger_kernel.transformers import apply_liger_kernel_to_llama
12
  from PIL import Image
 
95
  h = h - h % 32
96
  control_image = control_image.resize((int(w * scale), int(h * scale)), resample=2) # Resample.BILINEAR
97
  print("Size to: " + str(control_image.size[0]) + ", " + str(control_image.size[1]))
98
+ print("Cond Prompt: " + str(prompt))
99
  with torch.inference_mode():
100
  image = pipe(
101
  generator=generator,
 
179
  caption_button = gr.Button("Generate Caption", variant="secondary")
180
  with gr.Column(scale=1):
181
  seed = gr.Slider(0, MAX_SEED, value=42, label="Seed", step=1)
182
+ steps = gr.Slider(2, 16, value=8, label="Steps", step=1)
183
  controlnet_conditioning_scale = gr.Slider(0, 1, value=0.6, label="ControlNet Scale")
184
  guidance_scale = gr.Slider(1, 30, value=3.5, label="Guidance Scale")
185
  guidance_end = gr.Slider(0, 1, value=1.0, label="Guidance End")