Spaces:
Running
Running
File size: 5,472 Bytes
8a74c03 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 |
#!/usr/bin/env python3
"""
Test video processing with local AI models
"""
import sys
import os
from io import BytesIO
from PIL import Image
import tempfile
# Add current directory to path
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
try:
from app import extract_frames_from_video, process_image_locally
from local_models import get_local_model_manager
print("+ Successfully imported app components")
except ImportError as e:
print(f"- Import error: {e}")
sys.exit(1)
def test_video_processing_with_ai():
"""Test video processing with local AI models"""
print("Testing Video Processing with Local AI Models")
print("=" * 50)
# Find video file
video_files = [f for f in os.listdir('.') if f.endswith('.mp4')]
if not video_files:
print("- No MP4 files found")
return False
video_path = video_files[0]
print(f"+ Using video: {video_path}")
# Initialize local model manager
print("\nInitializing AI models...")
try:
local_manager = get_local_model_manager()
available_models = local_manager.get_available_models()
print(f"+ Available models: {available_models}")
except Exception as e:
print(f"- Error initializing models: {e}")
return False
# Load video and extract frames
print(f"\nExtracting frames from video...")
try:
with open(video_path, 'rb') as f:
video_data = f.read()
video_file = BytesIO(video_data)
frames = extract_frames_from_video(video_file, fps=0.2) # 1 frame every 5 seconds
if not frames:
print("- No frames extracted")
return False
print(f"+ Extracted {len(frames)} frames")
# Test with first 3 frames max to avoid long processing
test_frames = frames[:3]
except Exception as e:
print(f"- Error extracting frames: {e}")
return False
# Test both AI models
test_prompt = "Describe what you see in this image"
results = {}
for model_name in available_models:
print(f"\n🤖 Testing {model_name}")
print("-" * 30)
model_results = []
for i, frame_data in enumerate(test_frames):
print(f"Processing frame {i+1}/{len(test_frames)} (t={frame_data['timestamp']:.1f}s)...")
try:
result = process_image_locally(
frame_data['frame'],
test_prompt,
model_name,
local_manager
)
if 'error' in result:
print(f" - Error: {result['error']}")
else:
caption = result.get('generated_text', 'No caption')
print(f" + Result: {caption}")
model_results.append({
'frame': i,
'timestamp': frame_data['timestamp'],
'caption': caption
})
except Exception as e:
print(f" - Exception: {e}")
results[model_name] = model_results
# Summary
print("\n" + "=" * 50)
print("PROCESSING SUMMARY")
print("=" * 50)
for model_name, model_results in results.items():
print(f"\n{model_name}:")
if model_results:
print(f" + Successfully processed {len(model_results)} frames")
for result in model_results:
print(f" Frame {result['frame']} ({result['timestamp']:.1f}s): {result['caption'][:60]}...")
else:
print(" - No successful results")
return len(results) > 0 and any(len(r) > 0 for r in results.values())
def test_model_info():
"""Test model information display"""
print("\n📋 Model Information")
print("=" * 30)
try:
local_manager = get_local_model_manager()
model_info = local_manager.get_model_info()
for model_name, info in model_info.items():
print(f"\n{model_name}:")
print(f" Description: {info['description']}")
print(f" Strengths: {info['strengths']}")
print(f" Size: {info['size']}")
return True
except Exception as e:
print(f"- Error: {e}")
return False
if __name__ == "__main__":
print("🧪 Video + AI Models Test Suite")
print("This will test both CNN and Transformer models with your video")
print("Note: First run will download AI models (~3GB total)")
print()
# Test model info first
info_ok = test_model_info()
if info_ok:
print("\nProceed with video processing test?")
print("This will download AI models if not cached (~3GB)")
response = input("Continue? (y/n): ")
if response.lower().startswith('y'):
success = test_video_processing_with_ai()
if success:
print("\n+ Video processing with local AI models SUCCESSFUL!")
print("+ Your setup is ready to use!")
else:
print("\n- Some issues encountered during processing")
else:
print("Skipping video processing test.")
print(f"\n+ Test complete! Check the Streamlit app at: http://localhost:8502") |