Spaces:
Running
on
Zero
Running
on
Zero
Update app_v4.py
Browse files
app_v4.py
CHANGED
|
@@ -27,26 +27,37 @@ logger = logging.getLogger(__name__)
|
|
| 27 |
#############################
|
| 28 |
|
| 29 |
presets = {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
"Strict Upscale": {
|
| 31 |
"scale": 1.0,
|
| 32 |
"steps": 8,
|
| 33 |
"controlnet_conditioning_scale": 0.75,
|
| 34 |
"guidance_scale": 4.0,
|
|
|
|
| 35 |
"guidance_end": 0.9
|
| 36 |
},
|
| 37 |
"Creative Upscale": {
|
| 38 |
-
"scale":
|
| 39 |
"steps": 6,
|
| 40 |
"controlnet_conditioning_scale": 0.42,
|
| 41 |
"guidance_scale": 3.0,
|
|
|
|
| 42 |
"guidance_end": 0.5
|
| 43 |
},
|
| 44 |
-
"
|
| 45 |
"scale": 1.25,
|
| 46 |
-
"steps":
|
| 47 |
-
"controlnet_conditioning_scale": 0.
|
| 48 |
-
"guidance_scale":
|
| 49 |
-
"
|
|
|
|
| 50 |
}
|
| 51 |
}
|
| 52 |
|
|
@@ -65,14 +76,14 @@ except Exception as e:
|
|
| 65 |
print(f"Error setting memory usage: {e}")
|
| 66 |
|
| 67 |
text_encoder_2_unquant = T5EncoderModel.from_pretrained(
|
| 68 |
-
"
|
| 69 |
subfolder="text_encoder_2",
|
| 70 |
torch_dtype=torch.bfloat16,
|
| 71 |
token=huggingface_token
|
| 72 |
)
|
| 73 |
|
| 74 |
pipe = FluxControlNetPipeline.from_pretrained(
|
| 75 |
-
"
|
| 76 |
torch_dtype=torch.bfloat16,
|
| 77 |
text_encoder_2=text_encoder_2_unquant,
|
| 78 |
token=huggingface_token
|
|
@@ -221,6 +232,7 @@ def process_image(control_image, user_prompt, system_prompt, scale, steps,
|
|
| 221 |
controlnet_conditioning_scale=controlnet_conditioning_scale,
|
| 222 |
guidance_scale=guidance_scale,
|
| 223 |
seed=seed,
|
|
|
|
| 224 |
guidance_end=guidance_end
|
| 225 |
)
|
| 226 |
|
|
@@ -248,7 +260,7 @@ with gr.Blocks(title="FLUX Turbo Upscaler", fill_height=True) as demo:
|
|
| 248 |
with gr.Column(scale=1):
|
| 249 |
prompt = gr.Textbox(lines=4, info="Enter your prompt here or wait for auto-generation...", label="Image Description")
|
| 250 |
focus = gr.Textbox(label="Area(s) of Focus", info="e.g. 'face', 'eyes', 'hair', 'clothes', 'background', etc.", value="clothing material, textures, ethnicity")
|
| 251 |
-
scale = gr.Slider(1, 3, value=1, label="Scale (Upscale Factor)", step=0.
|
| 252 |
with gr.Row():
|
| 253 |
generate_button = gr.Button("Generate Image", variant="primary")
|
| 254 |
caption_button = gr.Button("Generate Caption", variant="secondary")
|
|
@@ -260,6 +272,8 @@ with gr.Blocks(title="FLUX Turbo Upscaler", fill_height=True) as demo:
|
|
| 260 |
steps = gr.Slider(2, 16, value=8, label="Steps", step=1)
|
| 261 |
controlnet_conditioning_scale = gr.Slider(0, 1, value=0.6, label="ControlNet Scale")
|
| 262 |
guidance_scale = gr.Slider(1, 30, value=3.5, label="Guidance Scale")
|
|
|
|
|
|
|
| 263 |
guidance_end = gr.Slider(0, 1, value=1.0, label="Guidance End")
|
| 264 |
original_dimensions = gr.Markdown(value="Original Image Dimensions: N/A") # New output for dimensions
|
| 265 |
|
|
@@ -306,7 +320,7 @@ with gr.Blocks(title="FLUX Turbo Upscaler", fill_height=True) as demo:
|
|
| 306 |
fn=process_image,
|
| 307 |
inputs=[
|
| 308 |
control_image, prompt, system_prompt, scale, steps,
|
| 309 |
-
controlnet_conditioning_scale, guidance_scale, seed,
|
| 310 |
guidance_end, temperature_slider, max_tokens_slider, log_prompt
|
| 311 |
],
|
| 312 |
outputs=[log_state, generated_image, prompt]
|
|
@@ -340,7 +354,7 @@ with gr.Blocks(title="FLUX Turbo Upscaler", fill_height=True) as demo:
|
|
| 340 |
preset_radio.change(
|
| 341 |
fn=update_parameters,
|
| 342 |
inputs=[preset_radio],
|
| 343 |
-
outputs=[scale, steps, controlnet_conditioning_scale, guidance_scale, guidance_end]
|
| 344 |
)
|
| 345 |
def hello(profile: gr.OAuthProfile | None) -> str:
|
| 346 |
if profile is None:
|
|
|
|
| 27 |
#############################
|
| 28 |
|
| 29 |
presets = {
|
| 30 |
+
"Default": {
|
| 31 |
+
"scale": 1.25,
|
| 32 |
+
"steps": 6,
|
| 33 |
+
"controlnet_conditioning_scale": 0.62,
|
| 34 |
+
"guidance_scale": 3.3,
|
| 35 |
+
"guidance_start": 0,
|
| 36 |
+
"guidance_end": 0.8
|
| 37 |
+
},
|
| 38 |
"Strict Upscale": {
|
| 39 |
"scale": 1.0,
|
| 40 |
"steps": 8,
|
| 41 |
"controlnet_conditioning_scale": 0.75,
|
| 42 |
"guidance_scale": 4.0,
|
| 43 |
+
"guidance_start": 0,
|
| 44 |
"guidance_end": 0.9
|
| 45 |
},
|
| 46 |
"Creative Upscale": {
|
| 47 |
+
"scale": 1.5,
|
| 48 |
"steps": 6,
|
| 49 |
"controlnet_conditioning_scale": 0.42,
|
| 50 |
"guidance_scale": 3.0,
|
| 51 |
+
"guidance_start": 0,
|
| 52 |
"guidance_end": 0.5
|
| 53 |
},
|
| 54 |
+
"Redux": {
|
| 55 |
"scale": 1.25,
|
| 56 |
+
"steps": 6,
|
| 57 |
+
"controlnet_conditioning_scale": 0.3,
|
| 58 |
+
"guidance_scale": 3.0,
|
| 59 |
+
"guidance_start": 0.05,
|
| 60 |
+
"guidance_end": 0.35
|
| 61 |
}
|
| 62 |
}
|
| 63 |
|
|
|
|
| 76 |
print(f"Error setting memory usage: {e}")
|
| 77 |
|
| 78 |
text_encoder_2_unquant = T5EncoderModel.from_pretrained(
|
| 79 |
+
"buildborderless/FLUX.1-merged_lightning_v2",
|
| 80 |
subfolder="text_encoder_2",
|
| 81 |
torch_dtype=torch.bfloat16,
|
| 82 |
token=huggingface_token
|
| 83 |
)
|
| 84 |
|
| 85 |
pipe = FluxControlNetPipeline.from_pretrained(
|
| 86 |
+
"buildborderless/FLUX.1-M_ControlNet_Upscaler",
|
| 87 |
torch_dtype=torch.bfloat16,
|
| 88 |
text_encoder_2=text_encoder_2_unquant,
|
| 89 |
token=huggingface_token
|
|
|
|
| 232 |
controlnet_conditioning_scale=controlnet_conditioning_scale,
|
| 233 |
guidance_scale=guidance_scale,
|
| 234 |
seed=seed,
|
| 235 |
+
guidance_start=guidance_start,
|
| 236 |
guidance_end=guidance_end
|
| 237 |
)
|
| 238 |
|
|
|
|
| 260 |
with gr.Column(scale=1):
|
| 261 |
prompt = gr.Textbox(lines=4, info="Enter your prompt here or wait for auto-generation...", label="Image Description")
|
| 262 |
focus = gr.Textbox(label="Area(s) of Focus", info="e.g. 'face', 'eyes', 'hair', 'clothes', 'background', etc.", value="clothing material, textures, ethnicity")
|
| 263 |
+
scale = gr.Slider(1, 3, value=1, label="Scale (Upscale Factor)", step=0.05)
|
| 264 |
with gr.Row():
|
| 265 |
generate_button = gr.Button("Generate Image", variant="primary")
|
| 266 |
caption_button = gr.Button("Generate Caption", variant="secondary")
|
|
|
|
| 272 |
steps = gr.Slider(2, 16, value=8, label="Steps", step=1)
|
| 273 |
controlnet_conditioning_scale = gr.Slider(0, 1, value=0.6, label="ControlNet Scale")
|
| 274 |
guidance_scale = gr.Slider(1, 30, value=3.5, label="Guidance Scale")
|
| 275 |
+
guidance_start = gr.Slider(0, 1, value=1.0, label="Guidance End")
|
| 276 |
+
|
| 277 |
guidance_end = gr.Slider(0, 1, value=1.0, label="Guidance End")
|
| 278 |
original_dimensions = gr.Markdown(value="Original Image Dimensions: N/A") # New output for dimensions
|
| 279 |
|
|
|
|
| 320 |
fn=process_image,
|
| 321 |
inputs=[
|
| 322 |
control_image, prompt, system_prompt, scale, steps,
|
| 323 |
+
controlnet_conditioning_scale, guidance_scale, seed, guidance_start,
|
| 324 |
guidance_end, temperature_slider, max_tokens_slider, log_prompt
|
| 325 |
],
|
| 326 |
outputs=[log_state, generated_image, prompt]
|
|
|
|
| 354 |
preset_radio.change(
|
| 355 |
fn=update_parameters,
|
| 356 |
inputs=[preset_radio],
|
| 357 |
+
outputs=[scale, steps, controlnet_conditioning_scale, guidance_scale, guidance_start, guidance_end]
|
| 358 |
)
|
| 359 |
def hello(profile: gr.OAuthProfile | None) -> str:
|
| 360 |
if profile is None:
|