File size: 4,780 Bytes
4b35e49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
#!/usr/bin/env python3
"""
Test script for the vision sentiment analysis model.
This script verifies that the ResNet-50 model can be loaded and run inference.
"""

import os
import sys
import torch
import torch.nn as nn
from torchvision import transforms, models
from PIL import Image
import numpy as np


def get_sentiment_mapping(num_classes):
    """Get the sentiment mapping based on number of classes"""
    if num_classes == 3:
        return {0: "Negative", 1: "Neutral", 2: "Positive"}
    elif num_classes == 4:
        # Common 4-class emotion mapping
        return {0: "Angry", 1: "Sad", 2: "Happy", 3: "Neutral"}
    elif num_classes == 7:
        # FER2013 7-class emotion mapping
        return {0: "Angry", 1: "Disgust", 2: "Fear", 3: "Happy", 4: "Sad", 5: "Surprise", 6: "Neutral"}
    else:
        # Generic mapping for unknown number of classes
        return {i: f"Class_{i}" for i in range(num_classes)}


def test_vision_model():
    """Test the vision sentiment analysis model"""

    print("🧠 Testing Vision Sentiment Analysis Model")
    print("=" * 50)

    # Check if model file exists
    model_path = "models/resnet50_model.pth"
    if not os.path.exists(model_path):
        print(f"❌ Model file not found: {model_path}")
        print("Please ensure the model file exists in the models/ directory")
        return False

    print(f"βœ… Model file found: {model_path}")

    try:
        # Load the model weights first to check the architecture
        print("πŸ“₯ Loading model checkpoint...")
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        checkpoint = torch.load(model_path, map_location=device)
        
        # Check the number of classes from the checkpoint
        if 'fc.weight' in checkpoint:
            num_classes = checkpoint['fc.weight'].shape[0]
            print(f"πŸ“Š Model checkpoint has {num_classes} output classes")
        else:
            # Fallback: try to infer from the last layer
            num_classes = 3  # Default assumption
            print("⚠️ Could not determine number of classes from checkpoint, assuming 3")
        
        # Initialize ResNet-50 model with the correct number of classes
        print("πŸ”§ Initializing ResNet-50 model...")
        model = models.resnet50(weights=None)  # Don't load ImageNet weights
        num_ftrs = model.fc.in_features
        model.fc = nn.Linear(num_ftrs, num_classes)  # Use actual number of classes
        
        print(f"πŸ“₯ Loading trained weights for {num_classes} classes...")
        model.load_state_dict(checkpoint)
        model.to(device)
        model.eval()
        
        print(f"βœ… Model loaded successfully with {num_classes} classes!")
        print(f"πŸ–₯️  Using device: {device}")

        # Test with a dummy image
        print("πŸ§ͺ Testing inference with dummy image...")

        # Create a dummy image (224x224 RGB)
        dummy_image = Image.fromarray(
            np.random.randint(0, 255, (224, 224, 3), dtype=np.uint8)
        )

        # Apply transforms
        transform = transforms.Compose(
            [
                transforms.Resize(224),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                transforms.Normalize(
                    mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
                ),
            ]
        )

        image_tensor = transform(dummy_image).unsqueeze(0).to(device)

        # Run inference
        with torch.no_grad():
            outputs = model(image_tensor)
            print(f"πŸ” Model output shape: {outputs.shape}")
            
            probabilities = torch.nn.functional.softmax(outputs, dim=1)
            confidence, predicted = torch.max(probabilities, 1)

            # Get sentiment mapping based on number of classes
            sentiment_map = get_sentiment_mapping(num_classes)
            sentiment = sentiment_map[predicted.item()]
            confidence_score = confidence.item()

        print(f"🎯 Test prediction: {sentiment} (confidence: {confidence_score:.3f})")
        print(f"πŸ“‹ Available classes: {list(sentiment_map.values())}")
        print("βœ… Inference test passed!")

        return True

    except Exception as e:
        print(f"❌ Error testing model: {str(e)}")
        import traceback
        traceback.print_exc()
        return False


def main():
    """Main function"""
    success = test_vision_model()

    if success:
        print("\nπŸŽ‰ All tests passed! The vision model is ready to use.")
        print("You can now run the Streamlit app with: streamlit run app.py")
    else:
        print("\nπŸ’₯ Tests failed. Please check the error messages above.")
        sys.exit(1)


if __name__ == "__main__":
    main()