sora-2 / app.py
akhaliq's picture
akhaliq HF Staff
Update app.py
223a766 verified
raw
history blame
7.99 kB
import gradio as gr
import os
import tempfile
import shutil
from typing import Optional, Union
from huggingface_hub import InferenceClient
from pathlib import Path
# ---------- Utilities ----------
def cleanup_temp_files():
try:
temp_dir = tempfile.gettempdir()
for file_path in Path(temp_dir).glob("*.mp4"):
try:
import time
if file_path.stat().st_mtime < (time.time() - 300):
file_path.unlink(missing_ok=True)
except Exception:
pass
except Exception as e:
print(f"Cleanup error: {e}")
def _client_from_token(token: Optional[str]) -> InferenceClient:
if not token:
raise gr.Error("Please sign in first. This app requires your Hugging Face login.")
return InferenceClient(
provider="fal-ai",
api_key=token, # OAuth token from gr.LoginButton
bill_to="huggingface", # keep if you need consolidated billing; otherwise remove
)
# ---------- Inference wrappers (NO env fallback) ----------
def generate_video(
prompt: str,
token: gr.OAuthToken | None,
duration: int = 8,
size: str = "1280x720",
) -> Optional[str]:
if token is None or not getattr(token, "token", None):
raise gr.Error("Sign in with Hugging Face to continue. This app uses your inference credits.")
if not prompt or not prompt.strip():
return None
cleanup_temp_files()
try:
client = _client_from_token(token.token)
video_bytes = client.text_to_video(
prompt,
model="akhaliq/sora-2", # served by fal-ai
# You can pass provider-specific kwargs here if supported
)
temp_file = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
try:
temp_file.write(video_bytes)
temp_file.flush()
return temp_file.name
finally:
temp_file.close()
except Exception as e:
# Optional: surface a friendlier message
raise gr.Error("Generation failed. If this keeps happening, check your provider quota or try again.")
def generate_video_from_image(
image: Union[str, bytes, None],
prompt: str,
token: gr.OAuthToken | None,
) -> Optional[str]:
if token is None or not getattr(token, "token", None):
raise gr.Error("Sign in with Hugging Face to continue. This app uses your inference credits.")
if not image or not prompt or not prompt.strip():
return None
cleanup_temp_files()
try:
if isinstance(image, str):
with open(image, "rb") as f:
input_image = f.read()
elif isinstance(image, (bytes, bytearray)):
input_image = image
else:
return None
client = _client_from_token(token.token)
video_bytes = client.image_to_video(
input_image,
prompt=prompt,
model="akhaliq/sora-2-image-to-video",
)
temp_file = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
try:
temp_file.write(video_bytes)
temp_file.flush()
return temp_file.name
finally:
temp_file.close()
except Exception as e:
raise gr.Error("Generation failed. If this keeps happening, check your provider quota or try again.")
# ---------- UI ----------
def create_ui():
css = '''
.logo-dark{display: none}
.dark .logo-dark{display: block !important}
.dark .logo-light{display: none}
#sub_title{margin-top: -20px !important}
.notice {
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
color: white;
padding: 14px 16px;
border-radius: 12px;
margin: 18px auto 6px;
max-width: 820px;
text-align: center;
font-size: 0.98rem;
}
'''
with gr.Blocks(title="Sora-2 (paid via your provider credits)", theme=gr.themes.Soft(), css=css) as demo:
gr.HTML("""
<div style="text-align:center; max-width:860px; margin:0 auto;">
<h1 style="font-size:2.2em; margin-bottom:6px;">🎬 Sora-2</h1>
<p style="color:#777; margin:0 0 8px;">Generate videos via the Hugging Face Inference API (provider: fal-ai)</p>
<div class="notice">
<b>Heads up:</b> This app is <b>paid</b> and uses <b>your</b> inference provider credits when you run generations.
Please sign in with your Hugging Face account to continue.
</div>
<p style="font-size: 0.9em; color: #999; margin-top: 10px;">
Built with <a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank" style="color:#fff; text-decoration:underline;">anycoder</a>
</p>
</div>
""")
# Required login. Token is passed into all callbacks.
login_btn = gr.LoginButton("Sign in with Hugging Face")
with gr.Row():
with gr.Column(scale=1):
prompt_input = gr.Textbox(
label="Enter your prompt",
placeholder="Describe the video you want to create…",
lines=4,
elem_id="prompt-text-input"
)
generate_btn = gr.Button("🎥 Generate Video", variant="primary")
with gr.Column(scale=1):
video_output = gr.Video(
label="Generated Video",
height=400,
interactive=False,
show_download_button=True,
elem_id="text-to-video"
)
# Note: we pass BOTH prompt and login_btn (token) to the function
generate_btn.click(
fn=generate_video,
inputs=[prompt_input, login_btn],
outputs=[video_output],
)
gr.HTML("""
<div style="text-align:center; margin: 34px 0 10px;">
<h3 style="margin-bottom:6px;">🖼️ ➜ 🎬 Image → Video (beta)</h3>
<p style="color:#666; margin:0;">Turn a single image into a short video with a guiding prompt.</p>
</div>
""")
with gr.Row():
with gr.Column(scale=1):
img_prompt_input = gr.Textbox(
label="Describe how the scene should evolve",
placeholder="e.g., The cat starts to dance and spins playfully",
lines=3,
elem_id="img-prompt-text-input"
)
image_input = gr.Image(label="Upload an image", type="filepath")
generate_img_btn = gr.Button("🎥 Generate from Image", variant="primary")
with gr.Column(scale=1):
video_output_img = gr.Video(
label="Generated Video (from Image)",
height=400,
interactive=False,
show_download_button=True,
elem_id="image-to-video"
)
generate_img_btn.click(
fn=generate_video_from_image,
inputs=[image_input, img_prompt_input, login_btn],
outputs=[video_output_img],
)
# Keep examples UI-only to avoid auto-charging on click
gr.Examples(
examples=[["A majestic golden eagle soaring through a vibrant sunset sky"]],
inputs=prompt_input
)
return demo
if __name__ == "__main__":
try:
cleanup_temp_files()
if os.path.exists("gradio_cached_examples"):
shutil.rmtree("gradio_cached_examples", ignore_errors=True)
except Exception as e:
print(f"Initial cleanup error: {e}")
app = create_ui()
app.queue(status_update_rate="auto", api_open=False, default_concurrency_limit=None)
app.launch(show_api=False, enable_monitoring=False, quiet=True, ssr_mode=True)