genmoai / demos /gradio_ui_fixed.py
versantus's picture
Upload folder using huggingface_hub
d6c2737 verified
#! /usr/bin/env python
import sys
import torch # Ensure PyTorch is imported to configure MPS backend
import click
import gradio as gr
# Configure PyTorch to use the MPS backend if available (for Apple Silicon)
if torch.backends.mps.is_available():
device = torch.device("mps")
print("Using MPS backend for Apple Silicon")
else:
device = torch.device("cpu")
print("MPS backend not available. Using CPU.")
sys.path.append("..")
from cli import configure_model, generate_video
# Set the required arguments for configure_model
model_dir_path_ = "/path/to/model/dir" # Replace with the actual path
lora_path_ = "/path/to/lora" # Replace with the actual path
cpu_offload_ = False # Set True or False based on your needs
# Adjust model loading to set the device directly after configuration
def generate_with_device(prompt, *args):
# Load the model
model = configure_model(model_dir_path_, lora_path_, cpu_offload_)
# Move the model to the specified device
model.to(device)
# Generate video with the model
return generate_video(prompt, model, *args)
with gr.Blocks() as demo:
gr.Markdown("Video Generator")
with gr.Row():
prompt = gr.Textbox(
label="Prompt",
value="A hand with delicate fingers picks up a bright yellow lemon from a wooden bowl filled with lemons and sprigs of mint against a peach-colored background. The hand gently tosses the lemon up and catches it."
)
with gr.Row():
generate_button = gr.Button("Generate Video")
output = gr.Video(label="Generated Video")
generate_button.click(generate_with_device, inputs=[prompt], outputs=[output])
if __name__ == "__main__":
demo.launch(share=True) # Enable public link with share=True