File size: 11,752 Bytes
92be57f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
import gradio as gr
import torch
import cv2
import numpy as np
from PIL import Image
import spaces
import tempfile
import os
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
import warnings
warnings.filterwarnings("ignore")

# Global variables
model = None
tokenizer = None
device = "cuda" if torch.cuda.is_available() else "cpu"
model_loaded = False

@spaces.GPU
def load_videollama3_model():
    """Load VideoLLaMA3 model with proper configuration"""
    global model, tokenizer, model_loaded
    
    try:
        print("πŸ”„ Loading VideoLLaMA3-7B model...")
        
        model_name = "DAMO-NLP-SG/VideoLLaMA3-7B"
        
        # Configure quantization to fit in GPU memory
        quantization_config = BitsAndBytesConfig(
            load_in_4bit=True,
            bnb_4bit_compute_dtype=torch.float16,
            bnb_4bit_use_double_quant=True,
            bnb_4bit_quant_type="nf4"
        )
        
        # Load tokenizer
        print("Loading tokenizer...")
        tokenizer = AutoTokenizer.from_pretrained(
            model_name,
            trust_remote_code=True,
            use_fast=False
        )
        
        if tokenizer.pad_token is None:
            tokenizer.pad_token = tokenizer.eos_token
        
        # Load model
        print("Loading VideoLLaMA3 model (this may take several minutes)...")
        model = AutoModelForCausalLM.from_pretrained(
            model_name,
            quantization_config=quantization_config,
            device_map="auto",
            torch_dtype=torch.float16,
            trust_remote_code=True,
            low_cpu_mem_usage=True,
            attn_implementation="flash_attention_2"
        )
        
        model_loaded = True
        success_msg = "βœ… VideoLLaMA3-7B model loaded successfully! You can now analyze videos with AI."
        print(success_msg)
        return success_msg
        
    except Exception as e:
        model_loaded = False
        error_msg = f"❌ Failed to load VideoLLaMA3: {str(e)}"
        print(error_msg)
        return error_msg

def extract_video_frames(video_path, max_frames=16, target_fps=1):
    """Extract frames from video for VideoLLaMA3 processing"""
    try:
        cap = cv2.VideoCapture(video_path)
        original_fps = cap.get(cv2.CAP_PROP_FPS)
        total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        duration = total_frames / original_fps if original_fps > 0 else 0
        
        if total_frames == 0:
            return [], None
        
        # Calculate frame sampling
        frame_interval = max(1, int(original_fps / target_fps))
        frame_indices = list(range(0, total_frames, frame_interval))[:max_frames]
        
        frames = []
        valid_indices = []
        
        for idx in frame_indices:
            cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
            ret, frame = cap.read()
            if ret:
                # Convert BGR to RGB
                frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                # Resize to reasonable size for processing
                height, width = frame_rgb.shape[:2]
                if max(height, width) > 720:
                    scale = 720 / max(height, width)
                    new_height, new_width = int(height * scale), int(width * scale)
                    frame_rgb = cv2.resize(frame_rgb, (new_width, new_height))
                
                frames.append(Image.fromarray(frame_rgb))
                valid_indices.append(idx)
        
        cap.release()
        
        video_info = {
            "duration": duration,
            "original_fps": original_fps,
            "total_frames": total_frames,
            "extracted_frames": len(frames),
            "resolution": f"{width}x{height}"
        }
        
        return frames, video_info
        
    except Exception as e:
        print(f"Error extracting frames: {e}")
        return [], None

@spaces.GPU
def analyze_video_with_ai(video_file, question, progress=gr.Progress()):
    """Analyze video using VideoLLaMA3 model"""
    
    if video_file is None:
        return "❌ Please upload a video file first."
    
    if not question.strip():
        return "❌ Please enter a question about the video."
    
    if not model_loaded:
        return "❌ VideoLLaMA3 model is not loaded. Please click 'Load VideoLLaMA3 Model' first and wait for it to complete."
    
    try:
        progress(0.1, desc="Extracting video frames...")
        
        # Extract frames from video
        frames, video_info = extract_video_frames(video_file, max_frames=16)
        
        if not frames or video_info is None:
            return "❌ Could not process video. Please check the video format and try again."
        
        progress(0.3, desc="Preparing AI input...")
        
        # Create a detailed prompt for video analysis
        system_prompt = "You are VideoLLaMA3, an advanced AI assistant specialized in video understanding. Analyze the video frames and provide detailed, accurate responses about the video content."
        
        user_prompt = f"""I have a video with the following specifications:
- Duration: {video_info['duration']:.1f} seconds
- Original FPS: {video_info['original_fps']:.1f}
- Total frames: {video_info['total_frames']}
- Analyzed frames: {video_info['extracted_frames']}
- Resolution: {video_info['resolution']}

Question: {question}

Please analyze the video content and provide a comprehensive answer based on what you observe in the video frames."""

        progress(0.5, desc="Processing with VideoLLaMA3...")
        
        # Prepare conversation format
        conversation = f"System: {system_prompt}\n\nHuman: {user_prompt}\n\nAssistant:"
        
        # Tokenize input
        inputs = tokenizer(
            conversation,
            return_tensors="pt",
            max_length=2048,
            truncation=True,
            padding=True
        ).to(device)
        
        progress(0.7, desc="Generating AI response...")
        
        # Generate response
        with torch.no_grad():
            output_ids = model.generate(
                **inputs,
                max_new_tokens=512,
                temperature=0.7,
                do_sample=True,
                top_p=0.9,
                repetition_penalty=1.1,
                pad_token_id=tokenizer.eos_token_id,
                eos_token_id=tokenizer.eos_token_id
            )
        
        # Decode response
        full_response = tokenizer.decode(output_ids[0], skip_special_tokens=True)
        
        # Extract just the assistant's response
        if "Assistant:" in full_response:
            ai_response = full_response.split("Assistant:")[-1].strip()
        else:
            ai_response = full_response.split(conversation)[-1].strip()
        
        progress(0.9, desc="Formatting results...")
        
        # Format the final response
        formatted_response = f"""πŸŽ₯ **VideoLLaMA3 AI Video Analysis**

❓ **Your Question:** 
{question}

πŸ€– **AI Analysis:**
{ai_response}

πŸ“Š **Video Information:**
β€’ Duration: {video_info['duration']:.1f} seconds
β€’ Frame Rate: {video_info['original_fps']:.1f} FPS  
β€’ Total Frames: {video_info['total_frames']:,}
β€’ Analyzed Frames: {video_info['extracted_frames']}
β€’ Resolution: {video_info['resolution']}

⚑ **Powered by:** VideoLLaMA3-7B (Multimodal AI)
"""
        
        progress(1.0, desc="Analysis complete!")
        
        return formatted_response
        
    except torch.cuda.OutOfMemoryError:
        torch.cuda.empty_cache()
        return "❌ GPU memory error. Please try with a shorter video or restart the space."
    except Exception as e:
        error_msg = f"❌ Error during video analysis: {str(e)}"
        print(error_msg)
        return error_msg

def create_interface():
    """Create the Gradio interface"""
    
    with gr.Blocks(title="VideoLLaMA3 AI Analyzer", theme=gr.themes.Soft()) as demo:
        gr.Markdown("# πŸŽ₯ VideoLLaMA3 AI Video Analysis Tool")
        gr.Markdown("Upload videos and get detailed AI-powered analysis using VideoLLaMA3-7B!")
        
        # Model loading section
        with gr.Row():
            with gr.Column(scale=3):
                model_status = gr.Textbox(
                    label="πŸ€– Model Status", 
                    value="Model not loaded - Click the button to load VideoLLaMA3-7B β†’",
                    interactive=False,
                    lines=2
                )
            with gr.Column(scale=1):
                load_btn = gr.Button("πŸš€ Load VideoLLaMA3 Model", variant="primary", size="lg")
        
        load_btn.click(load_videollama3_model, outputs=model_status)
        
        gr.Markdown("---")
        
        # Main interface
        with gr.Row():
            with gr.Column(scale=1):
                video_input = gr.Video(
                    label="πŸ“Ή Upload Video (MP4, AVI, MOV, WebM)",
                    height=350
                )
                question_input = gr.Textbox(
                    label="❓ Ask about the video",
                    placeholder="What is happening in this video? Describe it in detail.",
                    lines=3,
                    max_lines=5
                )
                analyze_btn = gr.Button("πŸ” Analyze Video with AI", variant="primary", size="lg")
                
            with gr.Column(scale=1):
                output = gr.Textbox(
                    label="🎯 AI Analysis Results",
                    lines=25,
                    max_lines=30,
                    show_copy_button=True
                )
        
        # Example questions
        gr.Markdown("### πŸ’‘ Example Questions (click to use):")
        
        example_questions = [
            "What is happening in this video? Describe the scene in detail.",
            "Who are the people in this video and what are they doing?",
            "Describe the setting, location, and environment shown.",
            "What objects, animals, or items can you see in the video?",
            "What is the mood, atmosphere, or emotion conveyed?",
            "Summarize the key events that occur chronologically."
        ]
        
        with gr.Row():
            for i in range(0, len(example_questions), 2):
                with gr.Column():
                    if i < len(example_questions):
                        btn1 = gr.Button(example_questions[i], size="sm")
                        btn1.click(lambda x=example_questions[i]: x, outputs=question_input)
                    if i+1 < len(example_questions):
                        btn2 = gr.Button(example_questions[i+1], size="sm") 
                        btn2.click(lambda x=example_questions[i+1]: x, outputs=question_input)
        
        # Connect the analyze button
        analyze_btn.click(
            analyze_video_with_ai,
            inputs=[video_input, question_input],
            outputs=output,
            show_progress=True
        )
        
        gr.Markdown("---")
        gr.Markdown("""
        ### πŸ“‹ Instructions:
        1. **First:** Click "Load VideoLLaMA3 Model" and wait for it to complete (~5-10 minutes)
        2. **Then:** Upload your video file (keep it under 2 minutes for best results)  
        3. **Ask:** Type your question about the video content
        4. **Analyze:** Click "Analyze Video with AI" to get detailed insights
        
        πŸ’‘ **Tips:** 
        - Shorter videos (30s-2min) work best
        - Ask specific questions for better results
        - Try different question styles to explore the AI's capabilities
        """)
    
    return demo

if __name__ == "__main__":
    demo = create_interface()
    demo.launch()