testncysan / app.py
nkzlxs's picture
Update app.py
261d9d4 verified
import gradio as gr
import torch
from diffusers import FluxPipeline
from safetensors.torch import load_file
import os
# CONFIG β€” ADD YOUR HF TOKEN HERE
HF_TOKEN = os.getenv('HF_TOKEN')
HF_MODEL = "black-forest-labs/FLUX.1-dev"
LORA_FILE = "./lora/20.safetensors"
# LOAD PIPELINE WITH AUTH
try:
pipe = FluxPipeline.from_pretrained(
HF_MODEL,
torch_dtype=torch.float16, # Change to float16
use_safetensors=True,
use_auth_token=HF_TOKEN,
).to("cuda")
print("Model loaded successfully.")
except Exception as e:
print(f"Error loading model: {e}")
exit()
# LOAD LORA
if os.path.exists(LORA_FILE):
try:
lora = load_file(LORA_FILE, device="cuda")
pipe.load_lora_weights(lora)
pipe.fuse_lora(lora_scale=1.0)
print("LoRA loaded successfully.")
except Exception as e:
print(f"Error loading LoRA: {e}")
# GENERATE
def generate(prompt, seed=42):
seed = int(seed)
generator = torch.Generator("cuda").manual_seed(seed)
try:
result = pipe(
prompt,
generator=generator,
num_inference_steps=28,
height=1024,
width=1024,
).images[0]
print("Image generated successfully.")
return result
except Exception as e:
print(f"Error during image generation: {e}")
return None
# GRADIO
with gr.Blocks() as demo:
gr.Markdown("# 🎨 FLUX.1 + My LoRA")
prompt = gr.Textbox(label="Prompt", value="portrait of san, realistic, 8k")
seed = gr.Number(label="Seed", value=42)
output = gr.Image()
gr.Button("Generate").click(generate, [prompt, seed], output)
if __name__ == "__main__":
demo.launch()