Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import os | |
| import torch | |
| from download_js_function import download_primary_image_url_js | |
| from diffusers import DPMSolverMultistepScheduler, StableDiffusionImg2ImgPipeline | |
| from PIL import Image | |
| model_path = 'airaspberry/full-sweater-cad-fast-db-v1.5' | |
| auth_token = os.environ.get("AUTH_TOKEN") or True | |
| # dpms_scheduler = DPMSolverMultistepScheduler.from_pretrained(model_path, use_auth_token=auth_token, subfolder="scheduler") | |
| img2img_pipeline = StableDiffusionImg2ImgPipeline.from_pretrained( | |
| model_path, | |
| torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, | |
| # scheduler=DPMSolverMultistepScheduler.from_pretrained(model_path, use_auth_token=auth_token, subfolder="scheduler"), | |
| use_auth_token=auth_token) | |
| if torch.cuda.is_available(): | |
| img2img_pipeline = img2img_pipeline.to("cuda") | |
| print("Running on GPU 🔥") | |
| else: | |
| print("Running on CPU 🥶") | |
| def predict(input_image, prompt, negative_prompt, steps, num_samples, scale, seed, strength): | |
| if torch.cuda.is_available(): | |
| generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None | |
| else: | |
| if seed != 0: | |
| generator = torch.Generator() | |
| generator.manual_seed(seed) | |
| else: | |
| generator = None | |
| result = img2img_pipeline( | |
| prompt, | |
| negative_prompt = negative_prompt, | |
| num_images_per_prompt=num_samples, | |
| image = input_image, | |
| num_inference_steps = int(steps), | |
| strength = strength, | |
| guidance_scale = scale, | |
| # width = width, | |
| # height = height, | |
| generator = generator) | |
| return result.images | |
| block = gr.Blocks().queue() | |
| with block: | |
| with gr.Row(): | |
| gr.Markdown("## Raspberry AI Design") | |
| with gr.Row(): | |
| with gr.Column(): | |
| input_image = gr.Image(source='upload', type="pil") | |
| prompt = gr.Textbox(label="Prompt") | |
| run_button = gr.Button(label="Run") | |
| with gr.Accordion("Advanced options", open=False): | |
| negative_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image") | |
| num_samples = gr.Slider( | |
| label="Images", minimum=1, maximum=4, value=1, step=1) | |
| ddim_steps = gr.Slider(label="Steps", minimum=1, | |
| maximum=50, value=50, step=1) | |
| scale = gr.Slider( | |
| label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1 | |
| ) | |
| # A value of 1, therefore, essentially ignores init_image. | |
| # TODO(): Add to markdown | |
| strength = gr.Slider( | |
| label="Strength", minimum=0.0, maximum=1.0, value=0.9, step=0.01 | |
| ) | |
| seed = gr.Slider( | |
| label="Seed", | |
| minimum=0, | |
| maximum=2147483647, | |
| step=1, | |
| randomize=True, | |
| ) | |
| # eta = gr.Number(visible=False, label="eta (DDIM)", value=0.0) | |
| with gr.Column(): | |
| gallery = gr.Gallery(label="Generated images", show_label=False).style( | |
| grid=[2], height="auto") | |
| download_button = gr.Button(value="Download") | |
| gr.Examples( | |
| examples=[ | |
| ["./hoodie_test3.jpg", | |
| "sweatercad of a maroon zip-up sweater with a hoodie", | |
| "humans, faces, people, men, women, scarf", | |
| 50, 1, 9.0, 123123123, 0.8] | |
| # ["./hoodie_test3.jpg", | |
| # "professional photo of a Elmo jumping between two high rises, beautiful colorful city landscape in the background", | |
| # "big bird, oscar the grouch", | |
| # 50, 1, 9.0, 1734133747, 0.9] | |
| ], | |
| inputs=[input_image, prompt, negative_prompt, ddim_steps, | |
| num_samples, scale, seed, strength], | |
| outputs=[gallery], | |
| fn=predict, | |
| cache_examples=True, | |
| ) | |
| run_button.click(fn=predict, inputs=[ | |
| input_image, prompt, negative_prompt, ddim_steps, num_samples, scale, seed, strength], outputs=[gallery]) | |
| download_button.click(None, inputs=[], outputs=[], _js=get_primary_image_url_js) | |
| # block.launch(show_api=False,debug=True) | |
| block.launch(show_api=True,debug=True,show_error=True) | |