Ayeeee45 commited on
Commit
1fe864e
·
verified ·
1 Parent(s): b26310c

Create App.py

Browse files
Files changed (1) hide show
  1. App.py +180 -0
App.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Advanced Live Portrait Demo
4
+ Simplified version for Hugging Face Spaces
5
+ """
6
+
7
+ import os
8
+ os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
9
+ os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
10
+
11
+ # IMPORTANT: Force early import of huggingface_hub with our patch
12
+ import sys
13
+
14
+ # Create mock HfFolder class BEFORE anything imports huggingface_hub
15
+ class MockHfFolder:
16
+ @staticmethod
17
+ def get_token():
18
+ return os.environ.get("HF_TOKEN", "")
19
+ @staticmethod
20
+ def save_token(token):
21
+ os.environ["HF_TOKEN"] = token
22
+
23
+ # Monkey patch at module level
24
+ import types
25
+ hf_hub_module = types.ModuleType('huggingface_hub')
26
+ hf_hub_module.HfFolder = MockHfFolder
27
+ hf_hub_module.whoami = lambda: {"name": "demo_user"}
28
+ sys.modules['huggingface_hub'] = hf_hub_module
29
+
30
+ # Now import gradio - it will use our patched module
31
+ import gradio as gr
32
+ import numpy as np
33
+ from PIL import Image
34
+ import tempfile
35
+ import cv2
36
+
37
+ def create_demo_video(image):
38
+ """Create a simple demo video from image"""
39
+ if image is None:
40
+ return None, "Please upload an image first"
41
+
42
+ try:
43
+ # Convert to numpy array
44
+ img_array = np.array(image)
45
+
46
+ # Create output video path
47
+ output_path = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False).name
48
+
49
+ # Video parameters
50
+ height, width = img_array.shape[:2]
51
+ fps = 24
52
+ duration = 2 # seconds
53
+
54
+ # Initialize video writer
55
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
56
+ video_writer = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
57
+
58
+ # Create frames with simple animation
59
+ for i in range(fps * duration):
60
+ frame = img_array.copy()
61
+
62
+ # Simple animation effect
63
+ if i < fps:
64
+ # Fade in
65
+ alpha = i / fps
66
+ frame = (frame * alpha).astype(np.uint8)
67
+ elif i > fps:
68
+ # Slight zoom
69
+ scale = 1 + (i - fps) * 0.001
70
+ new_h, new_w = int(height * scale), int(width * scale)
71
+ if new_h > 0 and new_w > 0:
72
+ frame = cv2.resize(frame, (new_w, new_h))
73
+ # Crop to original size
74
+ y_start = (new_h - height) // 2
75
+ x_start = (new_w - width) // 2
76
+ frame = frame[y_start:y_start+height, x_start:x_start+width]
77
+
78
+ # Convert back to BGR for OpenCV
79
+ frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
80
+ video_writer.write(frame_bgr)
81
+
82
+ video_writer.release()
83
+
84
+ return output_path, "✅ Demo video created successfully!"
85
+
86
+ except Exception as e:
87
+ return None, f"❌ Error: {str(e)}"
88
+
89
+ # Create the interface
90
+ with gr.Blocks(title="Advanced Live Portrait Demo", theme=gr.themes.Soft()) as demo:
91
+ gr.Markdown("""
92
+ # 🎬 Advanced Live Portrait - Demo
93
+ *A preview of the portrait animation tool*
94
+ """)
95
+
96
+ with gr.Row():
97
+ with gr.Column():
98
+ image_input = gr.Image(
99
+ label="Upload a face image",
100
+ type="pil",
101
+ height=300
102
+ )
103
+
104
+ generate_btn = gr.Button(
105
+ "Generate Demo Animation",
106
+ variant="primary",
107
+ size="lg"
108
+ )
109
+
110
+ with gr.Column():
111
+ output_video = gr.Video(
112
+ label="Generated Animation",
113
+ height=300
114
+ )
115
+
116
+ status_text = gr.Textbox(
117
+ label="Status",
118
+ value="Ready to generate...",
119
+ interactive=False
120
+ )
121
+
122
+ # Connect button
123
+ generate_btn.click(
124
+ fn=create_demo_video,
125
+ inputs=[image_input],
126
+ outputs=[output_video, status_text]
127
+ )
128
+
129
+ # Add examples
130
+ gr.Examples(
131
+ examples=[
132
+ ["https://images.unsplash.com/photo-1494790108755-2616b786d4b9?w=512&h=512&fit=crop"],
133
+ ["https://images.unsplash.com/photo-1534528741775-53994a69daeb?w=512&h=512&fit=crop"],
134
+ ["https://images.unsplash.com/photo-1507003211169-0a1dd7228f2d?w=512&h=512&fit=crop"],
135
+ ],
136
+ inputs=[image_input],
137
+ outputs=[output_video, status_text],
138
+ fn=create_demo_video,
139
+ cache_examples=True,
140
+ label="Try these examples:"
141
+ )
142
+
143
+ # Add info section
144
+ with gr.Accordion("ℹ️ About this demo", open=False):
145
+ gr.Markdown("""
146
+ This is a **lightweight demo** of the Advanced Live Portrait tool.
147
+
148
+ ### For the full version:
149
+ 1. **Clone locally:**
150
+ ```bash
151
+ git clone https://github.com/Ayeeee45/AdvancedLivePortrait-WebUI.git
152
+ cd AdvancedLivePortrait-WebUI
153
+ ```
154
+
155
+ 2. **Install dependencies:**
156
+ ```bash
157
+ pip install -r requirements.txt
158
+ ```
159
+
160
+ 3. **Download models** (from the repository links)
161
+
162
+ 4. **Run:**
163
+ ```bash
164
+ python webui.py
165
+ ```
166
+
167
+ ### Requirements for full version:
168
+ - 8GB+ VRAM GPU
169
+ - 20GB+ disk space
170
+ - Python 3.10
171
+ """)
172
+
173
+ # Launch the app
174
+ if __name__ == "__main__":
175
+ demo.launch(
176
+ server_name="0.0.0.0",
177
+ server_port=7860,
178
+ share=False,
179
+ debug=False
180
+ )