Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import numpy as np | |
| import random | |
| # import spaces #[uncomment to use ZeroGPU] | |
| from diffusers import DiffusionPipeline, AutoPipelineForText2Image | |
| from peft import PeftModel | |
| import torch | |
| device = "cuda" if torch.cuda.is_available() \ | |
| else "xpu" if torch.xpu.is_available() \ | |
| else "cpu" | |
| current_model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use | |
| current_lora_repo = None | |
| current_lora_scale = 1.0 | |
| if torch.cuda.is_available() or torch.xpu.is_available(): | |
| torch_dtype = torch.float16 | |
| else: | |
| torch_dtype = torch.float32 | |
| pipe = DiffusionPipeline.from_pretrained(current_model_repo_id, torch_dtype=torch_dtype) | |
| pipe = pipe.to(device) | |
| MAX_SEED = np.iinfo(np.int32).max | |
| MAX_IMAGE_SIZE = 1024 | |
| def clean_vram(): | |
| if torch.cuda.is_available(): | |
| torch.cuda.empty_cache() | |
| if torch.xpu.is_available(): | |
| torch.xpu.empty_cache() | |
| # @spaces.GPU #[uncomment to use ZeroGPU] | |
| def infer( | |
| prompt, | |
| model_repo, | |
| lora_repo, | |
| lora_scale, | |
| negative_prompt, | |
| seed, | |
| randomize_seed, | |
| width, | |
| height, | |
| guidance_scale, | |
| pag_scale, | |
| num_inference_steps, | |
| progress=gr.Progress(track_tqdm=True), | |
| ): | |
| global current_model_repo_id, current_lora_repo, current_lora_scale, pipe | |
| if lora_repo == "None": | |
| lora_repo = None | |
| if (model_repo != current_model_repo_id) or (lora_repo != current_lora_repo) or (current_lora_scale != lora_scale): | |
| print(f"The model changed to {model_repo}, {lora_repo} lora, reloading pipeline...") | |
| current_model_repo_id = model_repo | |
| current_lora_repo = lora_repo | |
| current_lora_scale = lora_scale | |
| del pipe | |
| clean_vram() | |
| pipe = DiffusionPipeline.from_pretrained(model_repo, torch_dtype=torch_dtype).to(device) | |
| if lora_repo: | |
| pipe.unet = PeftModel.from_pretrained(pipe.unet, lora_repo, subfolder="unet").to(device) | |
| pipe.text_encoder = PeftModel.from_pretrained(pipe.text_encoder, lora_repo, subfolder="text_encoder").to(device) | |
| pipe.unet.load_state_dict({k: lora_scale*v if 'lora' in k else v for k, v in pipe.unet.state_dict().items()}) | |
| pipe.text_encoder.load_state_dict({k: lora_scale*v if 'lora' in k else v for k, v in pipe.text_encoder.state_dict().items()}) | |
| pipe = AutoPipelineForText2Image.from_pipe(pipe, enable_pag=True) | |
| if randomize_seed: | |
| seed = random.randint(0, MAX_SEED) | |
| generator = torch.Generator().manual_seed(seed) | |
| image = pipe( | |
| prompt=prompt, | |
| negative_prompt=negative_prompt, | |
| guidance_scale=guidance_scale, | |
| pag_scale=pag_scale, | |
| num_inference_steps=num_inference_steps, | |
| width=width, | |
| height=height, | |
| generator=generator, | |
| ).images[0] | |
| clean_vram() | |
| return image, seed | |
| examples = [ | |
| "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k", | |
| "An astronaut riding a green horse", | |
| "A delicious ceviche cheesecake slice", | |
| ] | |
| css = """ | |
| #col-container { | |
| margin: 0 auto; | |
| max-width: 640px; | |
| } | |
| """ | |
| with gr.Blocks(css=css) as demo: | |
| with gr.Column(elem_id="col-container"): | |
| gr.Markdown(" # Text-to-Image Gradio Template") | |
| model_repo = gr.Dropdown( | |
| label="Model repository path", | |
| choices=["stabilityai/sdxl-turbo", "CompVis/stable-diffusion-v1-4", "stable-diffusion-v1-5/stable-diffusion-v1-5"], | |
| allow_custom_value=True | |
| ) | |
| with gr.Row(): | |
| prompt = gr.Text( | |
| label="Prompt", | |
| show_label=False, | |
| max_lines=1, | |
| placeholder="Enter your prompt", | |
| container=False, | |
| ) | |
| run_button = gr.Button("Run", scale=0, variant="primary") | |
| result = gr.Image(label="Result", show_label=False) | |
| with gr.Accordion("Advanced Settings", open=False): | |
| negative_prompt = gr.Text( | |
| label="Negative prompt", | |
| max_lines=1, | |
| placeholder="Enter a negative prompt", | |
| visible=True, | |
| ) | |
| seed = gr.Slider( | |
| label="Seed", | |
| minimum=0, | |
| maximum=MAX_SEED, | |
| step=1, | |
| value=0, | |
| ) | |
| randomize_seed = gr.Checkbox(label="Randomize seed", value=True) | |
| with gr.Row(): | |
| width = gr.Slider( | |
| label="Width", | |
| minimum=256, | |
| maximum=MAX_IMAGE_SIZE, | |
| step=32, | |
| value=1024, # Replace with defaults that work for your model | |
| ) | |
| height = gr.Slider( | |
| label="Height", | |
| minimum=256, | |
| maximum=MAX_IMAGE_SIZE, | |
| step=32, | |
| value=1024, # Replace with defaults that work for your model | |
| ) | |
| with gr.Row(): | |
| guidance_scale = gr.Slider( | |
| label="Guidance scale", | |
| minimum=0.0, | |
| maximum=10.0, | |
| step=0.1, | |
| value=0.0, # Replace with defaults that work for your model | |
| ) | |
| pag_scale = gr.Slider( | |
| label="PAG scale", | |
| minimum=0.0, | |
| maximum=10.0, | |
| step=0.1, | |
| value=0.0, # Replace with defaults that work for your model | |
| ) | |
| num_inference_steps = gr.Slider( | |
| label="Number of inference steps", | |
| minimum=1, | |
| maximum=50, | |
| step=1, | |
| value=2, # Replace with defaults that work for your model | |
| ) | |
| with gr.Row(): | |
| lora_repo = gr.Dropdown( | |
| label="LoRA repository path", | |
| choices=["None", "AbstractQbit/biskvit_cat_lora"], | |
| allow_custom_value=True | |
| ) | |
| lora_scale = gr.Slider( | |
| label="LoRA scale", | |
| minimum=0.0, | |
| maximum=1.0, | |
| step=0.1, | |
| value=1.0, # Replace with defaults that work for your model | |
| ) | |
| gr.Examples(examples=examples, inputs=[prompt]) | |
| gr.on( | |
| triggers=[run_button.click, prompt.submit], | |
| fn=infer, | |
| inputs=[ | |
| prompt, | |
| model_repo, | |
| lora_repo, | |
| lora_scale, | |
| negative_prompt, | |
| seed, | |
| randomize_seed, | |
| width, | |
| height, | |
| guidance_scale, | |
| pag_scale, | |
| num_inference_steps, | |
| ], | |
| outputs=[result, seed], | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() | |