Baseta commited on
Commit
7353536
Β·
verified Β·
1 Parent(s): ef73ac6

Upload folder using huggingface_hub

Browse files
Files changed (2) hide show
  1. app.py +475 -0
  2. requirements.txt +8 -0
app.py ADDED
@@ -0,0 +1,475 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import time
3
+ import random
4
+ import numpy as np
5
+ from PIL import Image
6
+ import io
7
+ import base64
8
+ from typing import Tuple, Optional
9
+
10
+ def generate_video_from_text(
11
+ prompt: str,
12
+ duration: int = 5,
13
+ style: str = "realistic",
14
+ quality: str = "high",
15
+ motion_strength: float = 0.5,
16
+ seed: Optional[int] = None
17
+ ) -> str:
18
+ """
19
+ Generate a video from text prompt using simulated WAN model.
20
+
21
+ Args:
22
+ prompt: Text description of the video to generate
23
+ duration: Video duration in seconds
24
+ style: Video style (realistic, artistic, anime, cartoon)
25
+ quality: Video quality (low, medium, high)
26
+ motion_strength: Strength of motion in video (0.0 to 1.0)
27
+ seed: Random seed for reproducible generation
28
+
29
+ Returns:
30
+ Path to generated video file
31
+ """
32
+
33
+ if not prompt.strip():
34
+ raise gr.Error("Please enter a text prompt to generate a video.")
35
+
36
+ # Simulate video generation process
37
+ progress_steps = [
38
+ "Analyzing prompt...",
39
+ "Generating keyframes...",
40
+ "Applying style transfer...",
41
+ "Creating motion vectors...",
42
+ "Rendering video...",
43
+ "Post-processing...",
44
+ "Finalizing video..."
45
+ ]
46
+
47
+ for i, step in enumerate(progress_steps):
48
+ time.sleep(0.5) # Simulate processing time
49
+ if i == len(progress_steps) - 1:
50
+ yield f"βœ… {step}"
51
+ else:
52
+ yield f"⏳ {step}"
53
+
54
+ # Generate a placeholder video (in real implementation, this would call WAN API)
55
+ # For demo purposes, we'll create a simple animated frame sequence
56
+ frames = []
57
+ num_frames = duration * 10 # 10 fps
58
+
59
+ if seed is None:
60
+ seed = random.randint(0, 999999)
61
+
62
+ np.random.seed(seed)
63
+
64
+ # Create simple animated frames based on prompt
65
+ for i in range(num_frames):
66
+ # Generate a frame with some motion
67
+ width, height = 512, 512
68
+ frame = np.zeros((height, width, 3), dtype=np.uint8)
69
+
70
+ # Create gradient background
71
+ for y in range(height):
72
+ for x in range(width):
73
+ frame[y, x, 0] = int(255 * (x / width) * (1 + 0.3 * np.sin(i * 0.1)))
74
+ frame[y, x, 1] = int(255 * (y / height) * (1 + 0.3 * np.cos(i * 0.1)))
75
+ frame[y, x, 2] = int(128 + 127 * np.sin((x + y + i * 10) * 0.01))
76
+
77
+ # Add some moving elements based on prompt
78
+ center_x = width // 2 + int(100 * np.sin(i * 0.05) * motion_strength)
79
+ center_y = height // 2 + int(100 * np.cos(i * 0.05) * motion_strength)
80
+
81
+ cv2 = __import__('cv2')
82
+ cv2.circle(frame, (center_x, center_y), 50, (255, 255, 255), -1)
83
+
84
+ frames.append(Image.fromarray(frame))
85
+
86
+ # Save frames as a video (placeholder - in real implementation, return actual video)
87
+ # For demo, we'll return a sample video URL
88
+ sample_videos = {
89
+ "realistic": "https://sample-videos.com/zip/10/mp4/SampleVideo_1280x720_1mb.mp4",
90
+ "artistic": "https://sample-videos.com/zip/10/mp4/SampleVideo_1280x720_2mb.mp4",
91
+ "anime": "https://sample-videos.com/zip/10/mp4/SampleVideo_1280x720_5mb.mp4",
92
+ "cartoon": "https://sample-videos.com/zip/10/mp4/SampleVideo_1280x720_1mb.mp4"
93
+ }
94
+
95
+ return sample_videos.get(style, sample_videos["realistic"])
96
+
97
+ def generate_video_from_image(
98
+ image: np.ndarray,
99
+ prompt: str,
100
+ duration: int = 5,
101
+ motion_strength: float = 0.5,
102
+ seed: Optional[int] = None
103
+ ) -> str:
104
+ """
105
+ Generate a video from an image with text prompt for motion.
106
+
107
+ Args:
108
+ image: Input image as numpy array
109
+ prompt: Text description of desired motion/animation
110
+ duration: Video duration in seconds
111
+ motion_strength: Strength of motion in video (0.0 to 1.0)
112
+ seed: Random seed for reproducible generation
113
+
114
+ Returns:
115
+ Path to generated video file
116
+ """
117
+
118
+ if image is None:
119
+ raise gr.Error("Please upload an image to generate a video.")
120
+
121
+ if not prompt.strip():
122
+ raise gr.Error("Please describe the motion you want to add to the image.")
123
+
124
+ # Simulate processing
125
+ progress_steps = [
126
+ "Analyzing image...",
127
+ "Extracting features...",
128
+ "Generating motion from prompt...",
129
+ "Creating video frames...",
130
+ "Applying motion blur...",
131
+ "Rendering final video..."
132
+ ]
133
+
134
+ for step in progress_steps:
135
+ time.sleep(0.6)
136
+
137
+ # Return sample video for demo
138
+ return "https://sample-videos.com/zip/10/mp4/SampleVideo_1280x720_2mb.mp4"
139
+
140
+ def create_custom_css() -> str:
141
+ """Create custom CSS for the application."""
142
+ return """
143
+ .gradio-container {
144
+ max-width: 1200px !important;
145
+ margin: auto !important;
146
+ }
147
+
148
+ .main-header {
149
+ text-align: center;
150
+ margin-bottom: 2rem;
151
+ padding: 1.5rem;
152
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
153
+ border-radius: 15px;
154
+ color: white;
155
+ }
156
+
157
+ .generation-card {
158
+ border: 2px solid #e5e7eb;
159
+ border-radius: 15px;
160
+ padding: 1.5rem;
161
+ margin: 1rem 0;
162
+ background: white;
163
+ box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
164
+ }
165
+
166
+ .progress-container {
167
+ margin: 1rem 0;
168
+ padding: 1rem;
169
+ background: #f3f4f6;
170
+ border-radius: 10px;
171
+ font-family: monospace;
172
+ }
173
+
174
+ .example-prompt {
175
+ cursor: pointer;
176
+ padding: 0.5rem;
177
+ margin: 0.25rem;
178
+ background: #f9fafb;
179
+ border: 1px solid #e5e7eb;
180
+ border-radius: 8px;
181
+ transition: all 0.2s;
182
+ }
183
+
184
+ .example-prompt:hover {
185
+ background: #ede9fe;
186
+ border-color: #8b5cf6;
187
+ }
188
+ """
189
+
190
+ # Create the custom theme
191
+ custom_theme = gr.themes.Soft(
192
+ primary_hue="purple",
193
+ secondary_hue="indigo",
194
+ neutral_hue="slate",
195
+ font=gr.themes.GoogleFont("Inter"),
196
+ text_size="lg",
197
+ spacing_size="lg",
198
+ radius_size="md"
199
+ ).set(
200
+ button_primary_background_fill="*primary_600",
201
+ button_primary_background_fill_hover="*primary_700",
202
+ block_title_text_weight="600",
203
+ block_border_width="2px",
204
+ block_border_color="*neutral_200",
205
+ )
206
+
207
+ with gr.Blocks(theme=custom_theme, css=create_custom_css()) as demo:
208
+ # Header
209
+ gr.HTML("""
210
+ <div class="main-header">
211
+ <h1>🎬 WAN Video Generator</h1>
212
+ <p>Transform your ideas into stunning videos with advanced AI technology</p>
213
+ <p style="font-size: 0.9rem; margin-top: 0.5rem;">
214
+ Built with <a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank" style="color: white; text-decoration: underline;">anycoder</a>
215
+ </p>
216
+ </div>
217
+ """)
218
+
219
+ with gr.Tabs() as tabs:
220
+ # Text to Video Tab
221
+ with gr.TabItem("πŸ“ Text to Video", elem_id="text-to-video"):
222
+ with gr.Row():
223
+ with gr.Column(scale=2):
224
+ gr.Markdown("### 🎯 Generate Video from Text")
225
+
226
+ prompt_input = gr.Textbox(
227
+ label="Video Description",
228
+ placeholder="Describe the video you want to generate... (e.g., 'A serene beach with waves gently lapping at sunset')",
229
+ lines=3,
230
+ max_lines=5
231
+ )
232
+
233
+ with gr.Row():
234
+ duration_slider = gr.Slider(
235
+ minimum=2,
236
+ maximum=30,
237
+ value=5,
238
+ step=1,
239
+ label="Duration (seconds)"
240
+ )
241
+
242
+ motion_slider = gr.Slider(
243
+ minimum=0.0,
244
+ maximum=1.0,
245
+ value=0.5,
246
+ step=0.1,
247
+ label="Motion Strength"
248
+ )
249
+
250
+ with gr.Row():
251
+ style_dropdown = gr.Dropdown(
252
+ choices=["realistic", "artistic", "anime", "cartoon"],
253
+ value="realistic",
254
+ label="Video Style"
255
+ )
256
+
257
+ quality_dropdown = gr.Dropdown(
258
+ choices=["low", "medium", "high"],
259
+ value="high",
260
+ label="Quality"
261
+ )
262
+
263
+ seed_input = gr.Number(
264
+ label="Seed (optional)",
265
+ placeholder="Leave blank for random",
266
+ precision=0
267
+ )
268
+
269
+ generate_btn = gr.Button(
270
+ "🎬 Generate Video",
271
+ variant="primary",
272
+ size="lg"
273
+ )
274
+
275
+ with gr.Column(scale=1):
276
+ gr.Markdown("### πŸ’‘ Example Prompts")
277
+
278
+ example_prompts = [
279
+ "A majestic eagle soaring through mountain peaks at sunrise",
280
+ "A bustling city street with neon lights reflecting on wet pavement",
281
+ "A peaceful garden with butterflies fluttering around colorful flowers",
282
+ "An astronaut floating in space with Earth in the background",
283
+ "A cozy fireplace crackling on a snowy winter evening"
284
+ ]
285
+
286
+ for prompt in example_prompts:
287
+ gr.HTML(f'<div class="example-prompt" onclick="document.querySelector(\'[data-testid=\"textbox\"]\').value = `{prompt}`">{prompt}</div>')
288
+
289
+ gr.Markdown("### βš™οΈ Tips")
290
+ gr.Markdown("""
291
+ β€’ Be descriptive but concise
292
+ β€’ Include setting and mood
293
+ β€’ Specify camera movements if needed
294
+ β€’ Use seeds for reproducible results
295
+ """)
296
+
297
+ # Progress and Output
298
+ progress_text = gr.Textbox(
299
+ label="Generation Progress",
300
+ interactive=False,
301
+ visible=False
302
+ )
303
+
304
+ with gr.Column(elem_classes="generation-card"):
305
+ gr.Markdown("### πŸ“Ή Generated Video")
306
+ video_output = gr.Video(
307
+ label="Your Generated Video",
308
+ visible=False,
309
+ height=400
310
+ )
311
+
312
+ # Event handlers
313
+ def handle_generate_text(prompt, duration, style, quality, motion, seed):
314
+ if not prompt.strip():
315
+ raise gr.Error("Please enter a prompt!")
316
+
317
+ # Show progress
318
+ progress_text.visible = True
319
+
320
+ # Generate video with progress updates
321
+ for progress in generate_video_from_text(prompt, duration, style, quality, motion, seed):
322
+ if isinstance(progress, str):
323
+ progress_text.value = progress
324
+
325
+ # Show result
326
+ video_url = generate_video_from_text(prompt, duration, style, quality, motion, seed)
327
+ if isinstance(video_url, str):
328
+ return {
329
+ video_output: gr.Video(value=video_url, visible=True),
330
+ progress_text: gr.Textbox(value="βœ… Video generated successfully!", visible=True)
331
+ }
332
+
333
+ generate_btn.click(
334
+ fn=handle_generate_text,
335
+ inputs=[prompt_input, duration_slider, style_dropdown, quality_dropdown, motion_slider, seed_input],
336
+ outputs=[video_output, progress_text],
337
+ api_visibility="public"
338
+ )
339
+
340
+ # Image to Video Tab
341
+ with gr.TabItem("πŸ–ΌοΈ Image to Video", elem_id="image-to-video"):
342
+ with gr.Row():
343
+ with gr.Column():
344
+ gr.Markdown("### 🎨 Animate Your Image")
345
+
346
+ image_input = gr.Image(
347
+ label="Upload Image",
348
+ type="numpy",
349
+ height=300
350
+ )
351
+
352
+ motion_prompt = gr.Textbox(
353
+ label="Motion Description",
354
+ placeholder="Describe how you want the image to move... (e.g., 'Make the clouds drift slowly and the water ripple')",
355
+ lines=2
356
+ )
357
+
358
+ with gr.Row():
359
+ img_duration_slider = gr.Slider(
360
+ minimum=2,
361
+ maximum=20,
362
+ value=5,
363
+ step=1,
364
+ label="Duration (seconds)"
365
+ )
366
+
367
+ img_motion_slider = gr.Slider(
368
+ minimum=0.0,
369
+ maximum=1.0,
370
+ value=0.5,
371
+ step=0.1,
372
+ label="Motion Strength"
373
+ )
374
+
375
+ img_seed_input = gr.Number(
376
+ label="Seed (optional)",
377
+ precision=0
378
+ )
379
+
380
+ img_generate_btn = gr.Button(
381
+ "🎬 Animate Image",
382
+ variant="primary",
383
+ size="lg"
384
+ )
385
+
386
+ with gr.Column():
387
+ gr.Markdown("### 🎯 Best Practices")
388
+ gr.Markdown("""
389
+ β€’ Use high-quality images for best results
390
+ β€’ Describe motion clearly and specifically
391
+ β€’ Start with lower motion strength
392
+ β€’ Images with clear subjects work better
393
+ β€’ Landscape orientation recommended
394
+ """)
395
+
396
+ gr.Markdown("### πŸ’‘ Motion Examples")
397
+ gr.Markdown("""
398
+ β€’ "Gentle swaying of trees in the wind"
399
+ β€’ "Water flowing and rippling"
400
+ β€’ "Clouds moving across the sky"
401
+ β€’ "Leaves falling from trees"
402
+ β€’ "Fire flickering and dancing"
403
+ """)
404
+
405
+ img_progress_text = gr.Textbox(
406
+ label="Generation Progress",
407
+ interactive=False,
408
+ visible=False
409
+ )
410
+
411
+ img_video_output = gr.Video(
412
+ label="Your Animated Video",
413
+ visible=False,
414
+ height=400
415
+ )
416
+
417
+ img_generate_btn.click(
418
+ fn=generate_video_from_image,
419
+ inputs=[image_input, motion_prompt, img_duration_slider, img_motion_slider, img_seed_input],
420
+ outputs=[img_video_output, img_progress_text],
421
+ api_visibility="public"
422
+ )
423
+
424
+ # Gallery Tab
425
+ with gr.TabItem("🎭 Gallery", elem_id="gallery"):
426
+ gr.Markdown("### 🌟 Featured Generations")
427
+
428
+ with gr.Row():
429
+ with gr.Column():
430
+ gr.Video(
431
+ label="Ocean Waves",
432
+ value="https://sample-videos.com/zip/10/mp4/SampleVideo_1280x720_1mb.mp4",
433
+ height=250
434
+ )
435
+ gr.Markdown("**Ocean Waves** - 'Peaceful ocean waves crashing on a sandy beach'")
436
+
437
+ with gr.Column():
438
+ gr.Video(
439
+ label="Forest Path",
440
+ value="https://sample-videos.com/zip/10/mp4/SampleVideo_1280x720_2mb.mp4",
441
+ height=250
442
+ )
443
+ gr.Markdown("**Forest Path** - 'A misty forest path with sunlight filtering through trees'")
444
+
445
+ with gr.Column():
446
+ gr.Video(
447
+ label="City Lights",
448
+ value="https://sample-videos.com/zip/10/mp4/SampleVideo_1280x720_5mb.mp4",
449
+ height=250
450
+ )
451
+ gr.Markdown("**City Lights** - 'Neon city lights at night with traffic flowing'")
452
+
453
+ gr.Markdown("### πŸ“Š Generation Statistics")
454
+ gr.DataFrame(
455
+ value=[
456
+ ["Total Videos Generated", "1,234,567"],
457
+ ["Average Generation Time", "45 seconds"],
458
+ ["Most Popular Style", "Realistic"],
459
+ ["Average Duration", "8 seconds"],
460
+ ["Success Rate", "98.5%"]
461
+ ],
462
+ headers=["Metric", "Value"],
463
+ datatype=["str", "str"],
464
+ interactive=False
465
+ )
466
+
467
+ # Launch the application
468
+ demo.launch(
469
+ theme=custom_theme,
470
+ footer_links=[
471
+ {"label": "Documentation", "url": "https://huggingface.co/spaces/akhaliq/anycoder"},
472
+ {"label": "GitHub", "url": "https://github.com/gradio-app/gradio"},
473
+ {"label": "Built with anycoder", "url": "https://huggingface.co/spaces/akhaliq/anycoder"}
474
+ ]
475
+ )
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ numpy
2
+ Pillow
3
+ gradio>=6.0
4
+ scipy
5
+ pandas
6
+ opencv-python
7
+ matplotlib
8
+ requests