Update app.py
Browse files
app.py
CHANGED
|
@@ -28,7 +28,7 @@ from diffusion.script_util import create_model_and_diffusion, model_and_diffusio
|
|
| 28 |
import numpy as np
|
| 29 |
import imageio
|
| 30 |
|
| 31 |
-
torch.hub.download_url_to_file('https://drive.google.com/uc?export=view&id=1MLfHs2Q-hHBySJqcYdG70cUwjqvJ52H9', 'feifei.jpg')
|
| 32 |
|
| 33 |
def fetch(url_or_path):
|
| 34 |
if str(url_or_path).startswith('http://') or str(url_or_path).startswith('https://'):
|
|
@@ -220,6 +220,6 @@ def inference(text, init_image, skip_timesteps, clip_guidance_scale, tv_scale, r
|
|
| 220 |
|
| 221 |
title = "CLIP Guided Diffusion Model"
|
| 222 |
description = "Gradio demo for CLIP Guided Diffusion. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."
|
| 223 |
-
article = "<p style='text-align: center'> By YuanFu Yang (https://github.com/Yfyangd/diffusion). It uses OpenAI's 256x256 unconditional ImageNet diffusion model (https://github.com/openai/guided-diffusion) together with CLIP (https://github.com/openai/CLIP) to connect text prompts with images. </p>"
|
| 224 |
-
iface = gr.Interface(inference, inputs=["text",gr.inputs.Image(type="file", label='initial image (optional)', optional=True),gr.inputs.Slider(minimum=0, maximum=45, step=1, default=10, label="skip_timesteps"), gr.inputs.Slider(minimum=0, maximum=3000, step=1, default=600, label="clip guidance scale (Controls how much the image should look like the prompt)"), gr.inputs.Slider(minimum=0, maximum=1000, step=1, default=0, label="tv_scale (Controls the smoothness of the final output)"), gr.inputs.Slider(minimum=0, maximum=1000, step=1, default=0, label="range_scale (Controls how far out of range RGB values are allowed to be)"), gr.inputs.Slider(minimum=0, maximum=1000, step=1, default=0, label="init_scale (This enhances the effect of the init image)"), gr.inputs.Number(default=0, label="Seed"), gr.inputs.Image(type="file", label='image prompt (optional)', optional=True), gr.inputs.Slider(minimum=50, maximum=500, step=1, default=50, label="timestep respacing"),gr.inputs.Slider(minimum=1, maximum=64, step=1, default=32, label="cutn")], outputs=["image","video"], title=title,
|
| 225 |
iface.launch()
|
|
|
|
| 28 |
import numpy as np
|
| 29 |
import imageio
|
| 30 |
|
| 31 |
+
#torch.hub.download_url_to_file('https://drive.google.com/uc?export=view&id=1MLfHs2Q-hHBySJqcYdG70cUwjqvJ52H9', 'feifei.jpg')
|
| 32 |
|
| 33 |
def fetch(url_or_path):
|
| 34 |
if str(url_or_path).startswith('http://') or str(url_or_path).startswith('https://'):
|
|
|
|
| 220 |
|
| 221 |
title = "CLIP Guided Diffusion Model"
|
| 222 |
description = "Gradio demo for CLIP Guided Diffusion. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."
|
| 223 |
+
#article = "<p style='text-align: center'> By YuanFu Yang (https://github.com/Yfyangd/diffusion). It uses OpenAI's 256x256 unconditional ImageNet diffusion model (https://github.com/openai/guided-diffusion) together with CLIP (https://github.com/openai/CLIP) to connect text prompts with images. </p>"
|
| 224 |
+
iface = gr.Interface(inference, inputs=["text",gr.inputs.Image(type="file", label='initial image (optional)', optional=True),gr.inputs.Slider(minimum=0, maximum=45, step=1, default=10, label="skip_timesteps"), gr.inputs.Slider(minimum=0, maximum=3000, step=1, default=600, label="clip guidance scale (Controls how much the image should look like the prompt)"), gr.inputs.Slider(minimum=0, maximum=1000, step=1, default=0, label="tv_scale (Controls the smoothness of the final output)"), gr.inputs.Slider(minimum=0, maximum=1000, step=1, default=0, label="range_scale (Controls how far out of range RGB values are allowed to be)"), gr.inputs.Slider(minimum=0, maximum=1000, step=1, default=0, label="init_scale (This enhances the effect of the init image)"), gr.inputs.Number(default=0, label="Seed"), gr.inputs.Image(type="file", label='image prompt (optional)', optional=True), gr.inputs.Slider(minimum=50, maximum=500, step=1, default=50, label="timestep respacing"),gr.inputs.Slider(minimum=1, maximum=64, step=1, default=32, label="cutn")], outputs=["image","video"], title=title, description=description, examples=[["little girl with cat on bed", "feifei.jpg", 0, 1000, 150, 50, 0, 0, "feifei.jpg", 90, 32]])
|
| 225 |
iface.launch()
|