File size: 13,737 Bytes
92be57f
 
 
 
 
 
90dbf48
 
 
92be57f
 
 
 
90dbf48
 
 
 
92be57f
 
 
 
90dbf48
 
 
92be57f
 
90dbf48
92be57f
90dbf48
 
 
 
 
 
 
92be57f
 
90dbf48
 
 
 
 
92be57f
90dbf48
92be57f
 
90dbf48
 
 
 
92be57f
90dbf48
92be57f
 
 
 
 
90dbf48
92be57f
 
 
90dbf48
 
92be57f
 
 
90dbf48
 
 
 
92be57f
 
 
 
90dbf48
 
92be57f
90dbf48
92be57f
90dbf48
 
92be57f
 
 
 
90dbf48
 
 
 
 
 
92be57f
 
 
90dbf48
 
92be57f
 
 
 
 
90dbf48
92be57f
90dbf48
 
92be57f
 
90dbf48
92be57f
 
 
90dbf48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92be57f
 
 
90dbf48
92be57f
 
 
 
 
 
 
 
90dbf48
92be57f
 
 
 
90dbf48
 
92be57f
 
90dbf48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92be57f
90dbf48
92be57f
90dbf48
92be57f
 
 
 
90dbf48
92be57f
 
 
 
90dbf48
92be57f
 
90dbf48
92be57f
 
 
 
 
 
90dbf48
 
 
92be57f
 
 
 
 
 
90dbf48
92be57f
 
 
 
90dbf48
92be57f
90dbf48
92be57f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90dbf48
 
92be57f
 
 
90dbf48
 
 
 
 
92be57f
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
import gradio as gr
import torch
import cv2
import numpy as np
from PIL import Image
import spaces
import base64
import io
from transformers import BlipProcessor, BlipForConditionalGeneration, AutoTokenizer, AutoModelForCausalLM
import warnings
warnings.filterwarnings("ignore")

# Global variables
vision_model = None
vision_processor = None
text_model = None
text_tokenizer = None
device = "cuda" if torch.cuda.is_available() else "cpu"
model_loaded = False

@spaces.GPU
def load_models():
    """Load BLIP for vision and a language model for analysis"""
    global vision_model, vision_processor, text_model, text_tokenizer, model_loaded
    
    try:
        print("πŸ”„ Loading AI models for video analysis...")
        
        # Load BLIP for image understanding
        print("Loading BLIP vision model...")
        vision_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
        vision_model = BlipForConditionalGeneration.from_pretrained(
            "Salesforce/blip-image-captioning-large",
            torch_dtype=torch.float16,
            device_map="auto"
        )
        
        # Load a conversational model for analysis
        print("Loading language model...")
        text_tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
        text_model = AutoModelForCausalLM.from_pretrained(
            "microsoft/DialoGPT-medium",
            torch_dtype=torch.float16,
            device_map="auto"
        )
        
        # Add padding token if needed
        if text_tokenizer.pad_token is None:
            text_tokenizer.pad_token = text_tokenizer.eos_token
        
        model_loaded = True
        success_msg = "βœ… AI models loaded successfully! You can now analyze videos."
        print(success_msg)
        return success_msg
        
    except Exception as e:
        model_loaded = False
        error_msg = f"❌ Failed to load models: {str(e)}"
        print(error_msg)
        return error_msg

def extract_key_frames(video_path, max_frames=8):
    """Extract key frames from video"""
    try:
        cap = cv2.VideoCapture(video_path)
        total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        fps = cap.get(cv2.CAP_PROP_FPS)
        duration = total_frames / fps if fps > 0 else 0
        width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        
        if total_frames == 0:
            return [], None
        
        # Get evenly spaced frames
        frame_indices = np.linspace(0, total_frames-1, min(max_frames, total_frames), dtype=int)
        frames = []
        timestamps = []
        
        for frame_idx in frame_indices:
            cap.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)
            ret, frame = cap.read()
            if ret:
                # Convert BGR to RGB
                frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                
                # Resize if too large
                if max(width, height) > 512:
                    scale = 512 / max(width, height)
                    new_width = int(width * scale)
                    new_height = int(height * scale)
                    frame_rgb = cv2.resize(frame_rgb, (new_width, new_height))
                
                frames.append(Image.fromarray(frame_rgb))
                timestamp = frame_idx / fps if fps > 0 else frame_idx
                timestamps.append(timestamp)
        
        cap.release()
        
        video_info = {
            "duration": duration,
            "fps": fps,
            "total_frames": total_frames,
            "resolution": f"{width}x{height}",
            "extracted_frames": len(frames)
        }
        
        return frames, video_info, timestamps
        
    except Exception as e:
        print(f"Error extracting frames: {e}")
        return [], None, []

@spaces.GPU
def analyze_frame_with_blip(frame, custom_question=None):
    """Analyze a single frame with BLIP"""
    try:
        if custom_question:
            # Use BLIP for visual question answering
            inputs = vision_processor(frame, custom_question, return_tensors="pt").to(device)
        else:
            # Use BLIP for image captioning
            inputs = vision_processor(frame, return_tensors="pt").to(device)
        
        with torch.no_grad():
            if custom_question:
                output_ids = vision_model.generate(**inputs, max_new_tokens=100)
            else:
                output_ids = vision_model.generate(**inputs, max_new_tokens=50)
        
        caption = vision_processor.decode(output_ids[0], skip_special_tokens=True)
        return caption
        
    except Exception as e:
        return f"Error analyzing frame: {str(e)}"

def synthesize_video_analysis(frame_descriptions, question, video_info):
    """Create comprehensive video analysis from frame descriptions"""
    
    # Combine all frame descriptions
    all_descriptions = " ".join(frame_descriptions)
    
    # Create analysis based on question type
    question_lower = question.lower()
    
    analysis = f"""πŸŽ₯ **AI Video Analysis**

❓ **Your Question:** {question}

πŸ€– **Detailed Analysis:**

"""
    
    if any(word in question_lower for word in ['what', 'happening', 'describe', 'see']):
        analysis += f"Based on my analysis of {len(frame_descriptions)} key frames from the video:\n\n"
        
        for i, desc in enumerate(frame_descriptions):
            timestamp = i * (video_info['duration'] / len(frame_descriptions))
            analysis += f"β€’ **At {timestamp:.1f}s:** {desc}\n"
        
        analysis += f"\n**Overall Summary:** This {video_info['duration']:.1f}-second video shows {all_descriptions.lower()}. "
        
        # Add contextual insights
        if len(set(frame_descriptions)) < len(frame_descriptions) * 0.3:
            analysis += "The scene appears relatively static with consistent elements throughout."
        else:
            analysis += "The video shows dynamic content with changing scenes and activities."
    
    elif any(word in question_lower for word in ['people', 'person', 'human', 'who']):
        people_mentions = [desc for desc in frame_descriptions if any(word in desc.lower() for word in ['person', 'people', 'man', 'woman', 'child', 'human'])]
        if people_mentions:
            analysis += f"**People in the video:** {' '.join(people_mentions)}\n\n"
        else:
            analysis += "**People analysis:** No clear human figures were detected in the analyzed frames.\n\n"
    
    elif any(word in question_lower for word in ['object', 'item', 'thing']):
        analysis += "**Objects and items visible:**\n"
        for desc in frame_descriptions:
            analysis += f"β€’ {desc}\n"
    
    elif any(word in question_lower for word in ['setting', 'location', 'place', 'where']):
        analysis += "**Setting and location analysis:**\n"
        analysis += f"Based on the visual elements: {all_descriptions}\n\n"
    
    elif any(word in question_lower for word in ['mood', 'emotion', 'feeling', 'atmosphere']):
        analysis += "**Mood and atmosphere:**\n"
        analysis += f"The visual elements suggest: {all_descriptions}\n\n"
    
    else:
        # General analysis
        analysis += f"**Frame-by-frame analysis:**\n"
        for i, desc in enumerate(frame_descriptions):
            analysis += f"{i+1}. {desc}\n"
    
    return analysis

@spaces.GPU
def analyze_video_with_ai(video_file, question, progress=gr.Progress()):
    """Main video analysis function"""
    
    if video_file is None:
        return "❌ Please upload a video file first."
    
    if not question.strip():
        return "❌ Please enter a question about the video."
    
    if not model_loaded:
        return "❌ AI models are not loaded. Please click 'Load AI Models' first and wait for completion."
    
    try:
        progress(0.1, desc="Extracting video frames...")
        
        # Extract frames
        frames, video_info, timestamps = extract_key_frames(video_file, max_frames=8)
        
        if not frames or video_info is None:
            return "❌ Could not process video. Please check the video format."
        
        progress(0.3, desc="Analyzing frames with AI...")
        
        # Analyze each frame
        frame_descriptions = []
        for i, frame in enumerate(frames):
            progress(0.3 + (i / len(frames)) * 0.5, desc=f"Analyzing frame {i+1}/{len(frames)}...")
            
            # Create frame-specific question if relevant
            if any(word in question.lower() for word in ['what', 'describe', 'see', 'happening']):
                frame_question = f"What do you see in this image? {question}"
                description = analyze_frame_with_blip(frame, frame_question)
            else:
                description = analyze_frame_with_blip(frame)
            
            frame_descriptions.append(description)
        
        progress(0.8, desc="Synthesizing analysis...")
        
        # Create comprehensive analysis
        analysis = synthesize_video_analysis(frame_descriptions, question, video_info)
        
        # Add technical information
        analysis += f"""

πŸ“Š **Technical Information:**
β€’ Duration: {video_info['duration']:.1f} seconds
β€’ Frame Rate: {video_info['fps']:.1f} FPS
β€’ Total Frames: {video_info['total_frames']:,}
β€’ Analyzed Frames: {video_info['extracted_frames']}
β€’ Resolution: {video_info['resolution']}

⚑ **Powered by:** BLIP Vision AI + Advanced Analysis
"""
        
        progress(1.0, desc="Analysis complete!")
        
        return analysis
        
    except Exception as e:
        error_msg = f"❌ Error during analysis: {str(e)}"
        print(error_msg)
        return error_msg

def create_interface():
    """Create the Gradio interface"""
    
    with gr.Blocks(title="AI Video Analyzer", theme=gr.themes.Soft()) as demo:
        gr.Markdown("# πŸŽ₯ AI Video Analysis Tool")
        gr.Markdown("Upload videos and get detailed AI-powered analysis using advanced computer vision!")
        
        # Model loading section
        with gr.Row():
            with gr.Column(scale=3):
                model_status = gr.Textbox(
                    label="πŸ€– Model Status", 
                    value="Models not loaded - Click the button to load AI models β†’",
                    interactive=False,
                    lines=2
                )
            with gr.Column(scale=1):
                load_btn = gr.Button("πŸš€ Load AI Models", variant="primary", size="lg")
        
        load_btn.click(load_models, outputs=model_status)
        
        gr.Markdown("---")
        
        # Main interface
        with gr.Row():
            with gr.Column(scale=1):
                video_input = gr.Video(
                    label="πŸ“Ή Upload Video (MP4, AVI, MOV, WebM)",
                    height=350
                )
                question_input = gr.Textbox(
                    label="❓ Ask about the video",
                    placeholder="What is happening in this video? Describe it in detail.",
                    lines=3,
                    max_lines=5
                )
                analyze_btn = gr.Button("πŸ” Analyze Video with AI", variant="primary", size="lg")
                
            with gr.Column(scale=1):
                output = gr.Textbox(
                    label="🎯 AI Analysis Results",
                    lines=25,
                    max_lines=30,
                    show_copy_button=True
                )
        
        # Example questions
        gr.Markdown("### πŸ’‘ Example Questions (click to use):")
        
        example_questions = [
            "What is happening in this video? Describe the scene in detail.",
            "Who are the people in this video and what are they doing?",
            "Describe the setting, location, and environment shown.",
            "What objects, animals, or items can you see in the video?",
            "What is the mood, atmosphere, or emotion conveyed?",
            "Summarize the key events that occur chronologically."
        ]
        
        with gr.Row():
            for i in range(0, len(example_questions), 2):
                with gr.Column():
                    if i < len(example_questions):
                        btn1 = gr.Button(example_questions[i], size="sm")
                        btn1.click(lambda x=example_questions[i]: x, outputs=question_input)
                    if i+1 < len(example_questions):
                        btn2 = gr.Button(example_questions[i+1], size="sm") 
                        btn2.click(lambda x=example_questions[i+1]: x, outputs=question_input)
        
        # Connect the analyze button
        analyze_btn.click(
            analyze_video_with_ai,
            inputs=[video_input, question_input],
            outputs=output,
            show_progress=True
        )
        
        gr.Markdown("---")
        gr.Markdown("""
        ### πŸ“‹ Instructions:
        1. **First:** Click "Load AI Models" and wait for it to complete (~3-5 minutes)
        2. **Then:** Upload your video file (works with most formats)  
        3. **Ask:** Type your question about the video content
        4. **Analyze:** Click "Analyze Video with AI" to get detailed insights
        
        πŸ’‘ **How it works:** 
        - Extracts key frames from your video
        - Analyzes each frame with BLIP vision AI
        - Synthesizes comprehensive analysis based on your question
        - Works reliably with standard video formats
        """)
    
    return demo

if __name__ == "__main__":
    demo = create_interface()
    demo.launch()