Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,109 +1,107 @@
|
|
| 1 |
import os
|
| 2 |
-
import random
|
| 3 |
import sys
|
| 4 |
-
|
| 5 |
import torch
|
|
|
|
|
|
|
| 6 |
import gradio as gr
|
| 7 |
from huggingface_hub import hf_hub_download
|
| 8 |
import spaces
|
| 9 |
-
from
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
-
#
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
-
#
|
| 18 |
-
|
| 19 |
-
|
| 20 |
|
| 21 |
-
# Helper function
|
| 22 |
def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
|
| 23 |
try:
|
| 24 |
return obj[index]
|
| 25 |
except KeyError:
|
| 26 |
return obj["result"][index]
|
| 27 |
|
| 28 |
-
#
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
# Configura莽茫o de Diret贸rios
|
| 51 |
-
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
|
| 52 |
-
output_dir = os.path.join(BASE_DIR, "output")
|
| 53 |
-
models_dir = os.path.join(BASE_DIR, "models")
|
| 54 |
-
os.makedirs(output_dir, exist_ok=True)
|
| 55 |
-
os.makedirs(models_dir, exist_ok=True)
|
| 56 |
-
folder_paths.set_output_directory(output_dir)
|
| 57 |
|
| 58 |
-
#
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
stylemodelloader_441 = stylemodelloader.load_style_model(
|
| 75 |
-
style_model_name="flux1-redux-dev.safetensors"
|
| 76 |
-
)
|
| 77 |
-
vaeloader = NODE_CLASS_MAPPINGS["VAELoader"]()
|
| 78 |
-
vaeloader_359 = vaeloader.load_vae(
|
| 79 |
-
vae_name="ae.safetensors"
|
| 80 |
-
)
|
| 81 |
-
|
| 82 |
-
# Pre-load models
|
| 83 |
-
model_loaders = [dualcliploader_357, vaeloader_359, clip_vision, stylemodelloader_441]
|
| 84 |
-
valid_models = [
|
| 85 |
-
getattr(loader[0], 'patcher', loader[0])
|
| 86 |
for loader in model_loaders
|
| 87 |
-
|
| 88 |
-
]
|
| 89 |
-
model_management.load_models_gpu(valid_models)
|
| 90 |
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
def generate_image(prompt, input_image, lora_weight, guidance, downsampling_factor, weight, seed, width, height, batch_size, steps):
|
| 94 |
try:
|
| 95 |
with torch.inference_mode():
|
| 96 |
-
#
|
| 97 |
cliptextencode = NODE_CLASS_MAPPINGS["CLIPTextEncode"]()
|
| 98 |
encoded_text = cliptextencode.encode(
|
| 99 |
text=prompt,
|
| 100 |
-
clip=
|
| 101 |
)
|
| 102 |
|
| 103 |
-
#
|
| 104 |
loadimage = NODE_CLASS_MAPPINGS["LoadImage"]()
|
| 105 |
loaded_image = loadimage.load_image(image=input_image)
|
| 106 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 107 |
# Flux Guidance
|
| 108 |
fluxguidance = NODE_CLASS_MAPPINGS["FluxGuidance"]()
|
| 109 |
flux_guidance = fluxguidance.append(
|
|
@@ -118,13 +116,14 @@ def generate_image(prompt, input_image, lora_weight, guidance, downsampling_fact
|
|
| 118 |
downsampling_function="area",
|
| 119 |
mode="keep aspect ratio",
|
| 120 |
weight=weight,
|
|
|
|
| 121 |
conditioning=flux_guidance[0],
|
| 122 |
-
style_model=
|
| 123 |
-
clip_vision=
|
| 124 |
image=loaded_image[0]
|
| 125 |
)
|
| 126 |
|
| 127 |
-
# Empty Latent
|
| 128 |
emptylatentimage = NODE_CLASS_MAPPINGS["EmptyLatentImage"]()
|
| 129 |
empty_latent = emptylatentimage.generate(
|
| 130 |
width=width,
|
|
@@ -141,17 +140,17 @@ def generate_image(prompt, input_image, lora_weight, guidance, downsampling_fact
|
|
| 141 |
sampler_name="euler",
|
| 142 |
scheduler="simple",
|
| 143 |
denoise=1,
|
| 144 |
-
model=
|
| 145 |
positive=redux_result[0],
|
| 146 |
negative=flux_guidance[0],
|
| 147 |
latent_image=empty_latent[0]
|
| 148 |
)
|
| 149 |
|
| 150 |
-
#
|
| 151 |
vaedecode = NODE_CLASS_MAPPINGS["VAEDecode"]()
|
| 152 |
decoded = vaedecode.decode(
|
| 153 |
samples=sampled[0],
|
| 154 |
-
vae=
|
| 155 |
)
|
| 156 |
|
| 157 |
# Salvar imagem
|
|
@@ -160,11 +159,12 @@ def generate_image(prompt, input_image, lora_weight, guidance, downsampling_fact
|
|
| 160 |
Image.fromarray((decoded[0] * 255).astype("uint8")).save(temp_path)
|
| 161 |
|
| 162 |
return temp_path
|
|
|
|
| 163 |
except Exception as e:
|
| 164 |
print(f"Erro ao gerar imagem: {str(e)}")
|
| 165 |
return None
|
| 166 |
|
| 167 |
-
# Gradio
|
| 168 |
with gr.Blocks() as app:
|
| 169 |
gr.Markdown("# FLUX Redux Image Generator")
|
| 170 |
|
|
@@ -244,7 +244,19 @@ with gr.Blocks() as app:
|
|
| 244 |
|
| 245 |
generate_btn.click(
|
| 246 |
fn=generate_image,
|
| 247 |
-
inputs=[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 248 |
outputs=[output_image]
|
| 249 |
)
|
| 250 |
|
|
|
|
| 1 |
import os
|
|
|
|
| 2 |
import sys
|
| 3 |
+
import random
|
| 4 |
import torch
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from PIL import Image
|
| 7 |
import gradio as gr
|
| 8 |
from huggingface_hub import hf_hub_download
|
| 9 |
import spaces
|
| 10 |
+
from typing import Union, Sequence, Mapping, Any
|
| 11 |
+
import folder_paths
|
| 12 |
+
from nodes import NODE_CLASS_MAPPINGS, init_extra_nodes
|
| 13 |
+
from comfy import model_management
|
| 14 |
+
|
| 15 |
+
# Configura莽茫o de diret贸rios
|
| 16 |
+
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
|
| 17 |
+
output_dir = os.path.join(BASE_DIR, "output")
|
| 18 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 19 |
+
folder_paths.set_output_directory(output_dir)
|
| 20 |
|
| 21 |
+
# Diagn贸stico CUDA
|
| 22 |
+
print("Python version:", sys.version)
|
| 23 |
+
print("Torch version:", torch.__version__)
|
| 24 |
+
print("CUDA dispon铆vel:", torch.cuda.is_available())
|
| 25 |
+
print("Quantidade de GPUs:", torch.cuda.device_count())
|
| 26 |
+
if torch.cuda.is_available():
|
| 27 |
+
print("GPU atual:", torch.cuda.get_device_name(0))
|
| 28 |
|
| 29 |
+
# Inicializar n贸s extras
|
| 30 |
+
print("Inicializando ComfyUI...")
|
| 31 |
+
init_extra_nodes()
|
| 32 |
|
| 33 |
+
# Helper function
|
| 34 |
def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
|
| 35 |
try:
|
| 36 |
return obj[index]
|
| 37 |
except KeyError:
|
| 38 |
return obj["result"][index]
|
| 39 |
|
| 40 |
+
# Inicializar modelos
|
| 41 |
+
print("Inicializando modelos...")
|
| 42 |
+
with torch.inference_mode():
|
| 43 |
+
# CLIP
|
| 44 |
+
dualcliploader = NODE_CLASS_MAPPINGS["DualCLIPLoader"]()
|
| 45 |
+
CLIP_MODEL = dualcliploader.load_clip(
|
| 46 |
+
clip_name1="t5xxl_fp16.safetensors",
|
| 47 |
+
clip_name2="ViT-L-14-TEXT-detail-improved-hiT-GmP-TE-only-HF.safetensors",
|
| 48 |
+
type="flux"
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
# Style Model
|
| 52 |
+
stylemodelloader = NODE_CLASS_MAPPINGS["StyleModelLoader"]()
|
| 53 |
+
STYLE_MODEL = stylemodelloader.load_style_model(
|
| 54 |
+
style_model_name="flux1-redux-dev.safetensors"
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
# VAE
|
| 58 |
+
vaeloader = NODE_CLASS_MAPPINGS["VAELoader"]()
|
| 59 |
+
VAE_MODEL = vaeloader.load_vae(
|
| 60 |
+
vae_name="ae.safetensors"
|
| 61 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 62 |
|
| 63 |
+
# UNET
|
| 64 |
+
unetloader = NODE_CLASS_MAPPINGS["UNETLoader"]()
|
| 65 |
+
UNET_MODEL = unetloader.load_unet(
|
| 66 |
+
unet_name="flux1-dev.sft",
|
| 67 |
+
weight_dtype="fp8_e4m3fn"
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
# CLIP Vision
|
| 71 |
+
clipvisionloader = NODE_CLASS_MAPPINGS["CLIPVisionLoader"]()
|
| 72 |
+
CLIP_VISION = clipvisionloader.load_clip(
|
| 73 |
+
clip_name="sigclip_vision_patch14_384.safetensors"
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
model_loaders = [CLIP_MODEL, VAE_MODEL, UNET_MODEL, CLIP_VISION]
|
| 77 |
+
model_management.load_models_gpu([
|
| 78 |
+
loader[0].patcher if hasattr(loader[0], 'patcher') else loader[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
for loader in model_loaders
|
| 80 |
+
])
|
|
|
|
|
|
|
| 81 |
|
| 82 |
+
@spaces.GPU
|
| 83 |
+
def generate_image(prompt, input_image, lora_weight, guidance, downsampling_factor, weight, seed, width, height, batch_size, steps, progress=gr.Progress(track_tqdm=True)):
|
|
|
|
| 84 |
try:
|
| 85 |
with torch.inference_mode():
|
| 86 |
+
# Text Encoding
|
| 87 |
cliptextencode = NODE_CLASS_MAPPINGS["CLIPTextEncode"]()
|
| 88 |
encoded_text = cliptextencode.encode(
|
| 89 |
text=prompt,
|
| 90 |
+
clip=CLIP_MODEL[0]
|
| 91 |
)
|
| 92 |
|
| 93 |
+
# Load Input Image
|
| 94 |
loadimage = NODE_CLASS_MAPPINGS["LoadImage"]()
|
| 95 |
loaded_image = loadimage.load_image(image=input_image)
|
| 96 |
|
| 97 |
+
# Load LoRA
|
| 98 |
+
loraloadermodelonly = NODE_CLASS_MAPPINGS["LoraLoaderModelOnly"]()
|
| 99 |
+
lora_model = loraloadermodelonly.load_lora_model_only(
|
| 100 |
+
lora_name="NFTNIK_FLUX.1[dev]_LoRA.safetensors",
|
| 101 |
+
strength_model=lora_weight,
|
| 102 |
+
model=UNET_MODEL[0]
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
# Flux Guidance
|
| 106 |
fluxguidance = NODE_CLASS_MAPPINGS["FluxGuidance"]()
|
| 107 |
flux_guidance = fluxguidance.append(
|
|
|
|
| 116 |
downsampling_function="area",
|
| 117 |
mode="keep aspect ratio",
|
| 118 |
weight=weight,
|
| 119 |
+
autocrop_margin=0.1,
|
| 120 |
conditioning=flux_guidance[0],
|
| 121 |
+
style_model=STYLE_MODEL[0],
|
| 122 |
+
clip_vision=CLIP_VISION[0],
|
| 123 |
image=loaded_image[0]
|
| 124 |
)
|
| 125 |
|
| 126 |
+
# Empty Latent Image
|
| 127 |
emptylatentimage = NODE_CLASS_MAPPINGS["EmptyLatentImage"]()
|
| 128 |
empty_latent = emptylatentimage.generate(
|
| 129 |
width=width,
|
|
|
|
| 140 |
sampler_name="euler",
|
| 141 |
scheduler="simple",
|
| 142 |
denoise=1,
|
| 143 |
+
model=lora_model[0],
|
| 144 |
positive=redux_result[0],
|
| 145 |
negative=flux_guidance[0],
|
| 146 |
latent_image=empty_latent[0]
|
| 147 |
)
|
| 148 |
|
| 149 |
+
# VAE Decode
|
| 150 |
vaedecode = NODE_CLASS_MAPPINGS["VAEDecode"]()
|
| 151 |
decoded = vaedecode.decode(
|
| 152 |
samples=sampled[0],
|
| 153 |
+
vae=VAE_MODEL[0]
|
| 154 |
)
|
| 155 |
|
| 156 |
# Salvar imagem
|
|
|
|
| 159 |
Image.fromarray((decoded[0] * 255).astype("uint8")).save(temp_path)
|
| 160 |
|
| 161 |
return temp_path
|
| 162 |
+
|
| 163 |
except Exception as e:
|
| 164 |
print(f"Erro ao gerar imagem: {str(e)}")
|
| 165 |
return None
|
| 166 |
|
| 167 |
+
# Interface Gradio
|
| 168 |
with gr.Blocks() as app:
|
| 169 |
gr.Markdown("# FLUX Redux Image Generator")
|
| 170 |
|
|
|
|
| 244 |
|
| 245 |
generate_btn.click(
|
| 246 |
fn=generate_image,
|
| 247 |
+
inputs=[
|
| 248 |
+
prompt_input,
|
| 249 |
+
input_image,
|
| 250 |
+
lora_weight,
|
| 251 |
+
guidance,
|
| 252 |
+
downsampling_factor,
|
| 253 |
+
weight,
|
| 254 |
+
seed,
|
| 255 |
+
width,
|
| 256 |
+
height,
|
| 257 |
+
batch_size,
|
| 258 |
+
steps
|
| 259 |
+
],
|
| 260 |
outputs=[output_image]
|
| 261 |
)
|
| 262 |
|