Spaces:
Sleeping
Sleeping
File size: 1,734 Bytes
714bf26 b6ca7e1 714bf26 5ce9bd4 c88e7ae 714bf26 5ce9bd4 62cb566 63074f5 714bf26 63074f5 994ad0d 63074f5 c86aa6e 63074f5 78db239 b6ca7e1 78db239 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 | import gradio as gr
import torch
from model import Model, ModelType
from app_canny import create_demo as create_demo_canny
from app_pose import create_demo as create_demo_pose
from app_text_to_video import create_demo as create_demo_text_to_video
from app_pix2pix_video import create_demo as create_demo_pix2pix_video
from app_canny_db import create_demo as create_demo_canny_db
from app_depth import create_demo as create_demo_depth
import argparse
import os
on_huggingspace = os.environ.get("SPACE_AUTHOR_NAME") == "PAIR"
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = Model(device=device, dtype=torch.float16)
parser = argparse.ArgumentParser()
parser.add_argument('--public_access', action='store_true',
help="if enabled, the app can be access from a public url", default=False)
args = parser.parse_args()
with gr.Blocks(css='style.css') as demo:
gr.HTML(
"""
""")
if on_huggingspace:
gr.HTML("""
""")
with gr.Tab('Zero-Shot Text2Video'):
create_demo_text_to_video(model)
with gr.Tab('Video Instruct Pix2Pix'):
create_demo_pix2pix_video(model)
with gr.Tab('Pose Conditional'):
create_demo_pose(model)
with gr.Tab('Edge Conditional'):
create_demo_canny(model)
with gr.Tab('Edge Conditional and Dreambooth Specialized'):
create_demo_canny_db(model)
with gr.Tab('Depth Conditional'):
create_demo_depth(model)
'''
'''
gr.HTML(
"""
""")
if on_huggingspace:
demo.queue(max_size=20)
demo.launch(debug=True)
else:
_, _, link = demo.queue(api_open=False).launch(
file_directories=['temporal'], share=args.public_access)
print(link) |