File size: 3,749 Bytes
06c78ac 2b83eaa 06c78ac b1078f5 06c78ac b1078f5 2b83eaa c97216f b1078f5 06c78ac 2b83eaa c97216f 2b83eaa c97216f 06c78ac 2b83eaa 06c78ac 2b83eaa 06c78ac 2b83eaa 06c78ac 2b83eaa 06c78ac 2b83eaa 06c78ac 2b83eaa 06c78ac 2b83eaa 06c78ac 2b83eaa 06c78ac 2b83eaa 06c78ac 2b83eaa 06c78ac 2b83eaa 06c78ac 2b83eaa 06c78ac 2b83eaa 06c78ac 2b83eaa 06c78ac 2b83eaa 06c78ac 2b83eaa 06c78ac 2b83eaa 06c78ac 2b83eaa 06c78ac 2b83eaa 06c78ac 2b83eaa 06c78ac 2b83eaa 06c78ac 2b83eaa 06c78ac 2b83eaa 06c78ac 2b83eaa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 |
import gradio as gr
import numpy as np
import random
# import spaces #[uncomment to use ZeroGPU]
from diffusers import AutoPipelineForText2Image
import torch
from huggingface_hub import snapshot_download
snapshot_download(repo_id="Roomie/xavyy", cache_dir='./')
pipeline = AutoPipelineForText2Image.from_pretrained(
'black-forest-labs/FLUX.1-schnell', torch_dtype=torch.bfloat16).to('cuda')
pipeline.load_lora_weights('Roomie/xavyy', weight_name='xavyy.safetensors')
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 1024
# @spaces.GPU #[uncomment to use ZeroGPU]
def infer(prompt):
image = pipeline(
prompt=prompt,
# negative_prompt=negative_prompt,
# guidance_scale=guidance_scale,
# num_inference_steps=num_inference_steps,
# width=width,
# height=height,
# generator=generator
).images[0]
return image
examples = [
"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
"An astronaut riding a green horse",
"A delicious ceviche cheesecake slice",
]
css = """
#col-container {
margin: 0 auto;
max-width: 640px;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(f"""
# Text-to-Image Gradio Template
""")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=0)
result = gr.Image(label="Result", show_label=False)
with gr.Accordion("Advanced Settings", open=False):
negative_prompt = gr.Text(
label="Negative prompt",
max_lines=1,
placeholder="Enter a negative prompt",
visible=False,
)
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
width = gr.Slider(
label="Width",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=1024, # Replace with defaults that work for your model
)
height = gr.Slider(
label="Height",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=1024, # Replace with defaults that work for your model
)
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance scale",
minimum=0.0,
maximum=10.0,
step=0.1,
value=0.0, # Replace with defaults that work for your model
)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=50,
step=1,
value=2, # Replace with defaults that work for your model
)
gr.Examples(
examples=examples,
inputs=[prompt]
)
gr.on(
triggers=[run_button.click, prompt.submit],
fn=infer,
inputs=[prompt, negative_prompt, seed, randomize_seed,
width, height, guidance_scale, num_inference_steps],
outputs=[result, seed]
)
demo.queue().launch()
|