Spaces:
Runtime error
Runtime error
File size: 5,372 Bytes
1fe864e | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 | #!/usr/bin/env python3
"""
Advanced Live Portrait Demo
Simplified version for Hugging Face Spaces
"""
import os
os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
# IMPORTANT: Force early import of huggingface_hub with our patch
import sys
# Create mock HfFolder class BEFORE anything imports huggingface_hub
class MockHfFolder:
@staticmethod
def get_token():
return os.environ.get("HF_TOKEN", "")
@staticmethod
def save_token(token):
os.environ["HF_TOKEN"] = token
# Monkey patch at module level
import types
hf_hub_module = types.ModuleType('huggingface_hub')
hf_hub_module.HfFolder = MockHfFolder
hf_hub_module.whoami = lambda: {"name": "demo_user"}
sys.modules['huggingface_hub'] = hf_hub_module
# Now import gradio - it will use our patched module
import gradio as gr
import numpy as np
from PIL import Image
import tempfile
import cv2
def create_demo_video(image):
"""Create a simple demo video from image"""
if image is None:
return None, "Please upload an image first"
try:
# Convert to numpy array
img_array = np.array(image)
# Create output video path
output_path = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False).name
# Video parameters
height, width = img_array.shape[:2]
fps = 24
duration = 2 # seconds
# Initialize video writer
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video_writer = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
# Create frames with simple animation
for i in range(fps * duration):
frame = img_array.copy()
# Simple animation effect
if i < fps:
# Fade in
alpha = i / fps
frame = (frame * alpha).astype(np.uint8)
elif i > fps:
# Slight zoom
scale = 1 + (i - fps) * 0.001
new_h, new_w = int(height * scale), int(width * scale)
if new_h > 0 and new_w > 0:
frame = cv2.resize(frame, (new_w, new_h))
# Crop to original size
y_start = (new_h - height) // 2
x_start = (new_w - width) // 2
frame = frame[y_start:y_start+height, x_start:x_start+width]
# Convert back to BGR for OpenCV
frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
video_writer.write(frame_bgr)
video_writer.release()
return output_path, "✅ Demo video created successfully!"
except Exception as e:
return None, f"❌ Error: {str(e)}"
# Create the interface
with gr.Blocks(title="Advanced Live Portrait Demo", theme=gr.themes.Soft()) as demo:
gr.Markdown("""
# 🎬 Advanced Live Portrait - Demo
*A preview of the portrait animation tool*
""")
with gr.Row():
with gr.Column():
image_input = gr.Image(
label="Upload a face image",
type="pil",
height=300
)
generate_btn = gr.Button(
"Generate Demo Animation",
variant="primary",
size="lg"
)
with gr.Column():
output_video = gr.Video(
label="Generated Animation",
height=300
)
status_text = gr.Textbox(
label="Status",
value="Ready to generate...",
interactive=False
)
# Connect button
generate_btn.click(
fn=create_demo_video,
inputs=[image_input],
outputs=[output_video, status_text]
)
# Add examples
gr.Examples(
examples=[
["https://images.unsplash.com/photo-1494790108755-2616b786d4b9?w=512&h=512&fit=crop"],
["https://images.unsplash.com/photo-1534528741775-53994a69daeb?w=512&h=512&fit=crop"],
["https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?w=512&h=512&fit=crop"],
],
inputs=[image_input],
outputs=[output_video, status_text],
fn=create_demo_video,
cache_examples=True,
label="Try these examples:"
)
# Add info section
with gr.Accordion("ℹ️ About this demo", open=False):
gr.Markdown("""
This is a **lightweight demo** of the Advanced Live Portrait tool.
### For the full version:
1. **Clone locally:**
```bash
git clone https://github.com/Ayeeee45/AdvancedLivePortrait-WebUI.git
cd AdvancedLivePortrait-WebUI
```
2. **Install dependencies:**
```bash
pip install -r requirements.txt
```
3. **Download models** (from the repository links)
4. **Run:**
```bash
python webui.py
```
### Requirements for full version:
- 8GB+ VRAM GPU
- 20GB+ disk space
- Python 3.10
""")
# Launch the app
if __name__ == "__main__":
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=False,
debug=False
) |