Spaces:
Running
on
Zero
Running
on
Zero
File size: 5,427 Bytes
cd6f592 3fe7eb4 a7fb3c7 3fe7eb4 df7b4b1 3fe7eb4 1d3e143 b7848a2 15e3c38 85f385f 9e5c459 85f385f 3fe7eb4 a7fb3c7 dfa11c0 9b4a79f dfa11c0 3fe7eb4 3e9a1f3 3fe7eb4 3e9a1f3 3fe7eb4 cd6f592 3fe7eb4 dfa11c0 3fe7eb4 df7b4b1 3fe7eb4 5fc8103 3fe7eb4 5fc8103 d372fdc 7d021b9 5fc8103 f84d1c9 5fc8103 3fe7eb4 991bede 3fe7eb4 1cc732c 3fe7eb4 1cc732c 3fe7eb4 a18fc9f 3fe7eb4 991bede 3fe7eb4 cc9f6c8 3fe7eb4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 |
import os
import gradio as gr
import numpy as np
import random
from huggingface_hub import HfApi
import spaces
from diffusers import DiffusionPipeline
import torch
model_repo_id = "Qwen/Qwen-Image"
hf_token = os.getenv("HF_TOKEN_SPACES")
if hf_token is None:
raise RuntimeError(
"HF_TOKEN_SPACES is not set in the environment. "
"Add it in the Space settings (Variables & secrets)."
)
api = HfApi()
print("WHOAMI:", api.whoami(token=hf_token))
MODEL_CHOICES = [
"black-forest-labs/FLUX.1-dev",
"Qwen/Qwen-Image",
"tencent/HunyuanImage-3.0",
"stabilityai/stable-diffusion-3.5-medium",
"stabilityai/stable-diffusion-2-1",
"SG161222/Realistic_Vision_V5.1_noVAE"
]
if torch.cuda.is_available():
torch_dtype = torch.bfloat16
device = "cuda"
else:
torch_dtype = torch.float32
device = "cpu"
pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype, use_auth_token=hf_token)
pipe = pipe.to(device)
current_model_id = model_repo_id
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1024
@spaces.GPU
def infer(
model_name,
prompt,
negative_prompt,
seed,
randomize_seed,
width,
height,
guidance_scale,
num_inference_steps,
progress=gr.Progress(track_tqdm=True),
):
global pipe, current_model_id
hf_token = os.getenv("HF_TOKEN_SPACES")
if hf_token is None:
raise RuntimeError("HF_TOKEN_SPACES is not available inside GPU worker.")
api = HfApi()
print("WHOAMI:", api.whoami(token=hf_token))
# reload pipeline if user picked a different model
if model_name != current_model_id:
pipe = DiffusionPipeline.from_pretrained(
model_name,
torch_dtype=torch_dtype,
use_auth_token=hf_token
).to(device)
current_model_id = model_name
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator().manual_seed(seed)
image = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
width=width,
height=height,
generator=generator,
).images[0]
return image, seed
examples = [
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
"An astronaut riding a green horse",
"A delicious ceviche cheesecake slice",
]
css = """
#col-container {
margin: 0 auto;
max-width: 640px;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(" # Transform Text to Images")
with gr.Row():
model_dropdown = gr.Dropdown(
label="Diffusion model",
choices=MODEL_CHOICES,
value=model_repo_id,
)
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=0, variant="primary")
result = gr.Image(label="Result", show_label=False)
with gr.Accordion("Advanced Settings", open=False):
negative_prompt = gr.Text(
label="Negative prompt",
max_lines=1,
placeholder="Enter a negative prompt",
visible=True,
)
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
width = gr.Slider(
label="Width",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=1024, # Replace with defaults that work for your model
)
height = gr.Slider(
label="Height",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=1024, # Replace with defaults that work for your model
)
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance scale",
minimum=0.0,
maximum=10.0,
step=0.1,
value=0.0, # Replace with defaults that work for your model
)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=50,
step=1,
value=50, # Replace with defaults that work for your model
)
gr.Examples(examples=examples, inputs=[prompt])
gr.on(
triggers=[run_button.click, prompt.submit],
fn=infer,
inputs=[
model_dropdown,
prompt,
negative_prompt,
seed,
randomize_seed,
width,
height,
guidance_scale,
num_inference_steps,
],
outputs=[result, seed],
)
if __name__ == "__main__":
demo.launch()
|