garyuzair commited on
Commit
8bdae3e
·
verified ·
1 Parent(s): 05c631c

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +126 -0
app.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import tempfile
3
+ import os
4
+ import numpy as np
5
+ from PIL import Image
6
+ from moviepy.editor import (
7
+ VideoFileClip, concatenate_videoclips, CompositeVideoClip,
8
+ TextClip, AudioFileClip, afx
9
+ )
10
+ from transformers import AutoProcessor, BlipForConditionalGeneration, MusicgenForConditionalGeneration
11
+ import torch
12
+ import soundfile as sf
13
+ import imageio
14
+
15
+ def enhance_prompt(base_description):
16
+ base = base_description.lower().strip()
17
+ actions = {
18
+ "walk": "crisp footsteps on a wooden floor",
19
+ "run": "rapid footsteps and heavy breathing",
20
+ "drive": "engine roar and tires screeching",
21
+ "talk": "soft voices and background murmur",
22
+ "crash": "loud crash and debris scattering",
23
+ "fall": "thud of impact and rustling debris"
24
+ }
25
+ objects = {
26
+ "person": "human activity with subtle breathing",
27
+ "dog": "playful barks and pawsteps",
28
+ "car": "mechanical hum and tire friction",
29
+ "tree": "rustling leaves in a breeze",
30
+ "forest": "gentle wind and distant bird calls"
31
+ }
32
+ environments = {
33
+ "room": "echoing footsteps and muffled sounds",
34
+ "street": "distant traffic and urban hum",
35
+ "forest": "wind through trees and twigs snapping",
36
+ "outside": "open air with faint wind"
37
+ }
38
+
39
+ sound_description = next((sound for k, sound in actions.items() if k in base), "subtle ambient hum")
40
+ sound_description += next((f" and {sound}" for k, sound in objects.items() if k in base), "")
41
+ sound_description += next((f" in a {env} with {sound}" for env, sound in environments.items() if env in base), "")
42
+
43
+ return f"{base} with {sound_description}"
44
+
45
+ @torch.inference_mode()
46
+ def generate_caption(frame, processor, model):
47
+ inputs = processor(images=frame, return_tensors="pt")
48
+ if torch.cuda.is_available():
49
+ inputs = {k: v.to("cuda") for k, v in inputs.items()}
50
+ model = model.to("cuda").half()
51
+ out = model.generate(**inputs)
52
+ return processor.decode(out[0], skip_special_tokens=True)
53
+
54
+ def generate_audio(prompt, musicgen_processor, musicgen_model, duration):
55
+ inputs = musicgen_processor(text=[prompt], padding=True, return_tensors="pt")
56
+ if torch.cuda.is_available():
57
+ inputs = {k: v.to("cuda") for k, v in inputs.items()}
58
+ musicgen_model = musicgen_model.to("cuda").half()
59
+ audio_values = musicgen_model.generate(
60
+ **inputs, max_new_tokens=256, do_sample=True,
61
+ guidance_scale=3.0, top_k=50, top_p=0.95
62
+ )
63
+ audio_array = audio_values[0].cpu().numpy().flatten()
64
+ audio_array = audio_array / np.max(np.abs(audio_array)) * 0.9
65
+ audio_array = np.clip(audio_array, -1.0, 1.0)
66
+ temp_audio = tempfile.NamedTemporaryFile(delete=False, suffix=".wav")
67
+ sf.write(temp_audio.name, audio_array, 32000)
68
+ return temp_audio.name
69
+
70
+ def process_clips(video_files, top_texts, bottom_texts):
71
+ processor_blip = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
72
+ model_blip = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
73
+ processor_music = AutoProcessor.from_pretrained("facebook/musicgen-small")
74
+ model_music = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small")
75
+
76
+ clips = []
77
+ for i, file in enumerate(video_files):
78
+ clip = VideoFileClip(file.name)
79
+ # Extract frame and caption
80
+ frame = Image.fromarray(imageio.v2.imread(clip.filename))
81
+ caption = generate_caption(frame, processor_blip, model_blip)
82
+ prompt = enhance_prompt(caption)
83
+
84
+ # Generate audio
85
+ audio_path = generate_audio(prompt, processor_music, model_music, clip.duration)
86
+ audio_clip = AudioFileClip(audio_path).subclip(0, clip.duration)
87
+ clip = clip.set_audio(audio_clip)
88
+
89
+ # Add text overlays
90
+ overlays = [clip]
91
+ if top_texts[i]:
92
+ top_txt = TextClip(top_texts[i], fontsize=30, color='white').set_position("top").set_duration(clip.duration)
93
+ overlays.append(top_txt)
94
+ if bottom_texts[i]:
95
+ btm_txt = TextClip(bottom_texts[i], fontsize=30, color='white').set_position("bottom").set_duration(clip.duration)
96
+ overlays.append(btm_txt)
97
+ clip = CompositeVideoClip(overlays).fadein(0.5).fadeout(0.5)
98
+ clips.append(clip)
99
+
100
+ final_video = concatenate_videoclips(clips, method="compose")
101
+ output_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name
102
+ final_video.write_videofile(output_path, codec="libx264", audio_codec="aac")
103
+ return output_path
104
+
105
+ def launch_interface():
106
+ with gr.Blocks() as demo:
107
+ gr.Markdown("# POV Video Generator with AI Sound Effects")
108
+ video_files = gr.File(file_types=[".mp4"], file_count="multiple", label="Upload your video clips")
109
+ top_texts = gr.Textbox(label="Top Texts (comma-separated for each clip)")
110
+ bottom_texts = gr.Textbox(label="Bottom Texts (comma-separated for each clip)")
111
+ generate_btn = gr.Button("Generate Final POV Video")
112
+ output_video = gr.Video()
113
+
114
+ def run(files, tops, bottoms):
115
+ top_list = [t.strip() for t in tops.split(",")] if tops else ["" for _ in files]
116
+ bottom_list = [b.strip() for b in bottoms.split(",")] if bottoms else ["" for _ in files]
117
+ while len(top_list) < len(files): top_list.append("")
118
+ while len(bottom_list) < len(files): bottom_list.append("")
119
+ return process_clips(files, top_list, bottom_list)
120
+
121
+ generate_btn.click(fn=run, inputs=[video_files, top_texts, bottom_texts], outputs=output_video)
122
+
123
+ demo.launch()
124
+
125
+ if __name__ == "__main__":
126
+ launch_interface()