hwonder's picture
Add 3D model (GLTF/GLB) support for Text to Image
b99fda7
"""
StackNet Demo - 1:1 Preview
A Gradio-based demo showcasing StackNet's AI-powered media generation capabilities.
"""
import gradio as gr
from src.ui.tabs import (
create_text_to_image_tab,
create_image_to_image_tab,
create_text_to_video_tab,
create_image_to_video_tab,
create_text_to_music_tab,
create_music_to_music_tab
)
from src.ui.handlers import Handlers
def create_demo():
"""Create the main Gradio demo application."""
with gr.Blocks(
title="StackNet 1:1 Preview Demo",
theme=gr.themes.Soft()
) as demo:
gr.Markdown("# StackNet 1:1 Preview Demo")
# API Key input at the top
with gr.Accordion("Settings", open=False):
api_key = gr.Textbox(
label="StackNet Key",
placeholder="Enter your key (e.g., sn_xxxxx)",
type="password"
)
tabs = {}
# Create tabs in the requested order: Image/Video first, Music last
with gr.Tabs():
with gr.Tab("Text to Image", id="text-to-image"):
tabs["text_to_image"] = create_text_to_image_tab()
with gr.Tab("Image to Image", id="image-to-image"):
tabs["image_to_image"] = create_image_to_image_tab()
with gr.Tab("Text to Video", id="text-to-video"):
tabs["text_to_video"] = create_text_to_video_tab()
with gr.Tab("Image to Video", id="image-to-video"):
tabs["image_to_video"] = create_image_to_video_tab()
with gr.Tab("Text to Music", id="text-to-music"):
tabs["text_to_music"] = create_text_to_music_tab()
with gr.Tab("Music to Music", id="music-to-music"):
tabs["music_to_music"] = create_music_to_music_tab()
# Wire up event handlers
# Text to Image
tabs["text_to_image"]["generate_btn"].click(
fn=Handlers.generate_image,
inputs=[
tabs["text_to_image"]["prompt"],
tabs["text_to_image"]["format_type"],
api_key
],
outputs=[
tabs["text_to_image"]["output_image"],
tabs["text_to_image"]["output_model"],
tabs["text_to_image"]["status"]
],
api_name=None
)
# Image to Image
tabs["image_to_image"]["edit_btn"].click(
fn=Handlers.edit_image,
inputs=[
tabs["image_to_image"]["input_image"],
tabs["image_to_image"]["edit_prompt"],
tabs["image_to_image"]["strength"],
api_key
],
outputs=[
tabs["image_to_image"]["output_image"],
tabs["image_to_image"]["status"]
],
api_name=None
)
# Text to Video
tabs["text_to_video"]["generate_btn"].click(
fn=Handlers.generate_video,
inputs=[
tabs["text_to_video"]["prompt"],
tabs["text_to_video"]["duration"],
tabs["text_to_video"]["style"],
api_key
],
outputs=[
tabs["text_to_video"]["output_video"],
tabs["text_to_video"]["status"]
],
api_name=None
)
# Image to Video
tabs["image_to_video"]["animate_btn"].click(
fn=Handlers.animate_image,
inputs=[
tabs["image_to_video"]["input_image"],
tabs["image_to_video"]["motion_prompt"],
tabs["image_to_video"]["duration"],
api_key
],
outputs=[
tabs["image_to_video"]["output_video"],
tabs["image_to_video"]["status"]
],
api_name=None
)
# Text to Music
tabs["text_to_music"]["generate_btn"].click(
fn=Handlers.generate_music,
inputs=[
tabs["text_to_music"]["prompt"],
tabs["text_to_music"]["tags"],
tabs["text_to_music"]["instrumental"],
tabs["text_to_music"]["lyrics"],
tabs["text_to_music"]["title"],
api_key
],
outputs=[
tabs["text_to_music"]["output_audio"],
tabs["text_to_music"]["status"]
],
api_name=None
)
# Music to Music - Cover
tabs["music_to_music"]["cover_btn"].click(
fn=Handlers.create_cover,
inputs=[
tabs["music_to_music"]["cover_audio_input"],
tabs["music_to_music"]["cover_style_prompt"],
tabs["music_to_music"]["cover_tags"],
tabs["music_to_music"]["cover_title"],
api_key
],
outputs=[
tabs["music_to_music"]["cover_output"],
tabs["music_to_music"]["cover_status"]
],
api_name=None
)
# Music to Music - Stems
tabs["music_to_music"]["stems_btn"].click(
fn=Handlers.extract_stems,
inputs=[
tabs["music_to_music"]["stems_audio_input"],
api_key
],
outputs=[
tabs["music_to_music"]["vocals_output"],
tabs["music_to_music"]["drums_output"],
tabs["music_to_music"]["bass_output"],
tabs["music_to_music"]["other_output"],
tabs["music_to_music"]["stems_status"]
],
api_name=None
)
return demo
demo = create_demo()
demo.launch()