Spaces:
Running
Running
Commit ·
59e4b3b
1
Parent(s): f3e294e
Rewrite app with custom UI using InferenceClient
Browse filesgr.load() does not support the text-to-video pipeline type.
Use huggingface_hub.InferenceClient.text_to_video() directly
with a custom Gradio interface instead.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
app.py
CHANGED
|
@@ -1,20 +1,34 @@
|
|
| 1 |
import gradio as gr
|
|
|
|
|
|
|
| 2 |
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
Generate videos from text prompts using **Wan-AI/Wan2.2-T2V-A14B**.
|
| 7 |
-
Powered by the **fal-ai** inference provider. Sign in with your Hugging Face account to get started.
|
| 8 |
-
"""
|
| 9 |
|
| 10 |
EXAMPLES = [
|
| 11 |
"A golden retriever running through a sunlit meadow in slow motion, cinematic lighting",
|
| 12 |
"A futuristic city at night with flying cars and neon lights reflecting off wet streets",
|
| 13 |
"An astronaut floating in space with Earth in the background, gentle camera rotation",
|
| 14 |
-
"Ocean waves crashing on a rocky coastline at sunset, dramatic sky with
|
| 15 |
"A steaming cup of coffee on a wooden table with rain falling outside the window",
|
| 16 |
]
|
| 17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
with gr.Blocks(fill_height=True, title="Wan 2.2 Text-to-Video") as demo:
|
| 19 |
|
| 20 |
with gr.Sidebar():
|
|
@@ -26,21 +40,29 @@ with gr.Blocks(fill_height=True, title="Wan 2.2 Text-to-Video") as demo:
|
|
| 26 |
button = gr.LoginButton("Sign in")
|
| 27 |
gr.Markdown("---")
|
| 28 |
gr.Markdown(
|
| 29 |
-
"**Model:** [
|
| 30 |
-
"**Provider:**
|
| 31 |
)
|
| 32 |
|
| 33 |
-
gr.Markdown(
|
| 34 |
-
|
| 35 |
-
"
|
| 36 |
-
|
| 37 |
-
provider="fal-ai",
|
| 38 |
-
)
|
| 39 |
-
gr.Markdown("### Example Prompts")
|
| 40 |
-
gr.Dataframe(
|
| 41 |
-
value=[[e] for e in EXAMPLES],
|
| 42 |
-
headers=["Prompt idea — copy and paste above"],
|
| 43 |
-
interactive=False,
|
| 44 |
)
|
| 45 |
|
| 46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
from huggingface_hub import InferenceClient
|
| 3 |
+
import tempfile
|
| 4 |
|
| 5 |
+
MODEL_ID = "Wan-AI/Wan2.2-T2V-A14B"
|
| 6 |
+
PROVIDER = "fal-ai"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
|
| 8 |
EXAMPLES = [
|
| 9 |
"A golden retriever running through a sunlit meadow in slow motion, cinematic lighting",
|
| 10 |
"A futuristic city at night with flying cars and neon lights reflecting off wet streets",
|
| 11 |
"An astronaut floating in space with Earth in the background, gentle camera rotation",
|
| 12 |
+
"Ocean waves crashing on a rocky coastline at sunset, dramatic sky with purple clouds",
|
| 13 |
"A steaming cup of coffee on a wooden table with rain falling outside the window",
|
| 14 |
]
|
| 15 |
|
| 16 |
+
|
| 17 |
+
def generate_video(prompt, oauth_token: gr.OAuthToken | None = None):
|
| 18 |
+
if oauth_token is None:
|
| 19 |
+
raise gr.Error("Please sign in with your Hugging Face account first.")
|
| 20 |
+
if not prompt or not prompt.strip():
|
| 21 |
+
raise gr.Error("Please enter a text prompt.")
|
| 22 |
+
|
| 23 |
+
client = InferenceClient(provider=PROVIDER, token=oauth_token.token)
|
| 24 |
+
video_bytes = client.text_to_video(prompt, model=MODEL_ID)
|
| 25 |
+
|
| 26 |
+
tmp = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
|
| 27 |
+
tmp.write(video_bytes)
|
| 28 |
+
tmp.close()
|
| 29 |
+
return tmp.name
|
| 30 |
+
|
| 31 |
+
|
| 32 |
with gr.Blocks(fill_height=True, title="Wan 2.2 Text-to-Video") as demo:
|
| 33 |
|
| 34 |
with gr.Sidebar():
|
|
|
|
| 40 |
button = gr.LoginButton("Sign in")
|
| 41 |
gr.Markdown("---")
|
| 42 |
gr.Markdown(
|
| 43 |
+
f"**Model:** [{MODEL_ID}](https://huggingface.co/{MODEL_ID})\n\n"
|
| 44 |
+
f"**Provider:** {PROVIDER}"
|
| 45 |
)
|
| 46 |
|
| 47 |
+
gr.Markdown(
|
| 48 |
+
"# Wan 2.2 — Text to Video Generator\n\n"
|
| 49 |
+
f"Generate videos from text prompts using **{MODEL_ID}**. "
|
| 50 |
+
"Sign in with your Hugging Face account to get started."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 51 |
)
|
| 52 |
|
| 53 |
+
with gr.Row():
|
| 54 |
+
with gr.Column():
|
| 55 |
+
prompt = gr.Textbox(
|
| 56 |
+
label="Prompt",
|
| 57 |
+
placeholder="Describe the video you want to generate...",
|
| 58 |
+
lines=3,
|
| 59 |
+
)
|
| 60 |
+
generate_btn = gr.Button("Generate Video", variant="primary")
|
| 61 |
+
gr.Examples(examples=EXAMPLES, inputs=prompt)
|
| 62 |
+
with gr.Column():
|
| 63 |
+
video_output = gr.Video(label="Generated Video")
|
| 64 |
+
|
| 65 |
+
generate_btn.click(fn=generate_video, inputs=prompt, outputs=video_output)
|
| 66 |
+
prompt.submit(fn=generate_video, inputs=prompt, outputs=video_output)
|
| 67 |
+
|
| 68 |
+
demo.launch(ssr_mode=False)
|