File size: 8,566 Bytes
c8df794
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
"""
Memory usage monitoring and optimization script
Helps monitor and optimize RAM usage for the disease detection system
"""

import psutil
import torch
import gc
import os
import time
from pathlib import Path

class MemoryMonitor:
    """Monitor and optimize memory usage"""
    
    def __init__(self):
        self.process = psutil.Process(os.getpid())
        self.initial_memory = self.get_memory_usage()
        self.peak_memory = self.initial_memory
        
    def get_memory_usage(self):
        """Get current memory usage in MB"""
        return self.process.memory_info().rss / 1024 / 1024
    
    def get_memory_percent(self):
        """Get memory usage as percentage of total system memory"""
        return self.process.memory_percent()
    
    def get_system_memory_info(self):
        """Get system memory information"""
        memory = psutil.virtual_memory()
        return {
            "total_gb": memory.total / 1024 / 1024 / 1024,
            "available_gb": memory.available / 1024 / 1024 / 1024,
            "used_percent": memory.percent,
            "free_gb": memory.free / 1024 / 1024 / 1024
        }
    
    def optimize_memory(self):
        """Force memory optimization"""
        gc.collect()
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
    
    def check_memory_status(self):
        """Check if memory usage is within acceptable limits"""
        current = self.get_memory_usage()
        self.peak_memory = max(self.peak_memory, current)
        
        status = {
            "current_mb": current,
            "peak_mb": self.peak_memory,
            "increase_mb": current - self.initial_memory,
            "within_512mb": current < 512,
            "system_percent": self.get_memory_percent(),
            "status": "OK" if current < 512 else "HIGH"
        }
        
        return status
    
    def print_memory_report(self):
        """Print detailed memory report"""
        status = self.check_memory_status()
        system_info = self.get_system_memory_info()
        
        print("\n" + "="*50)
        print("MEMORY USAGE REPORT")
        print("="*50)
        print(f"Process Memory Usage:")
        print(f"  Current: {status['current_mb']:.1f} MB")
        print(f"  Peak: {status['peak_mb']:.1f} MB")
        print(f"  Increase: {status['increase_mb']:.1f} MB")
        print(f"  Status: {status['status']}")
        print(f"  Within 512MB limit: {'βœ…' if status['within_512mb'] else '❌'}")
        
        print(f"\nSystem Memory:")
        print(f"  Total: {system_info['total_gb']:.1f} GB")
        print(f"  Available: {system_info['available_gb']:.1f} GB")
        print(f"  Used: {system_info['used_percent']:.1f}%")
        
        if torch.cuda.is_available():
            gpu_memory = torch.cuda.memory_allocated() / 1024 / 1024
            gpu_cached = torch.cuda.memory_reserved() / 1024 / 1024
            print(f"\nGPU Memory:")
            print(f"  Allocated: {gpu_memory:.1f} MB")
            print(f"  Cached: {gpu_cached:.1f} MB")
        
        print("="*50)
        
        return status

def test_model_memory_usage():
    """Test memory usage of different model configurations"""
    monitor = MemoryMonitor()
    
    print("Testing memory usage of disease detection models...")
    monitor.print_memory_report()
    
    try:
        # Test importing modules
        print("\n1. Testing module imports...")
        
        import sys
        sys.path.append('src')
        
        from src.model_lite import CropDiseaseResNet50Lite, TinyDiseaseClassifier
        monitor.optimize_memory()
        
        status = monitor.check_memory_status()
        print(f"After imports: {status['current_mb']:.1f} MB")
        
        # Test lite model
        print("\n2. Testing CropDiseaseResNet50Lite...")
        model_lite = CropDiseaseResNet50Lite(15, pretrained=False)
        
        status = monitor.check_memory_status()
        print(f"After lite model creation: {status['current_mb']:.1f} MB")
        print(f"Model size: {model_lite.get_model_size():.1f} MB")
        
        # Test with dummy input
        dummy_input = torch.randn(1, 3, 224, 224)
        with torch.no_grad():
            output = model_lite(dummy_input)
        
        status = monitor.check_memory_status()
        print(f"After inference: {status['current_mb']:.1f} MB")
        
        del model_lite, dummy_input, output
        monitor.optimize_memory()
        
        # Test tiny model
        print("\n3. Testing TinyDiseaseClassifier...")
        model_tiny = TinyDiseaseClassifier(15)
        
        status = monitor.check_memory_status()
        print(f"After tiny model creation: {status['current_mb']:.1f} MB")
        print(f"Model size: {model_tiny.get_model_size():.1f} MB")
        
        # Test with dummy input
        dummy_input = torch.randn(1, 3, 224, 224)
        with torch.no_grad():
            output = model_tiny(dummy_input)
        
        status = monitor.check_memory_status()
        print(f"After inference: {status['current_mb']:.1f} MB")
        
        del model_tiny, dummy_input, output
        monitor.optimize_memory()
        
    except Exception as e:
        print(f"Error during testing: {e}")
    
    print("\n4. Final memory report:")
    monitor.print_memory_report()
    
    return monitor

def benchmark_memory_usage():
    """Benchmark memory usage with continuous monitoring"""
    monitor = MemoryMonitor()
    
    print("Starting memory benchmark...")
    
    try:
        # Simulate API startup
        print("Simulating API startup...")
        
        import sys
        sys.path.append('src')
        
        from src.model_lite import CropDiseaseResNet50Lite
        from src.explain_lite import CropDiseaseExplainerLite
        
        # Load model
        model = CropDiseaseResNet50Lite(15, pretrained=False)
        model.eval()
        
        # Initialize explainer
        explainer = CropDiseaseExplainerLite(model, ["class1", "class2"], 'cpu')
        
        status = monitor.check_memory_status()
        print(f"After model loading: {status['current_mb']:.1f} MB")
        
        # Simulate multiple predictions
        print("Simulating predictions...")
        for i in range(5):
            dummy_input = torch.randn(1, 3, 224, 224)
            
            with torch.no_grad():
                output = model(dummy_input)
            
            # Cleanup after each prediction
            del dummy_input, output
            monitor.optimize_memory()
            
            status = monitor.check_memory_status()
            print(f"Prediction {i+1}: {status['current_mb']:.1f} MB")
        
        print("\nFinal benchmark results:")
        monitor.print_memory_report()
        
    except Exception as e:
        print(f"Benchmark error: {e}")
        monitor.print_memory_report()

def get_optimization_recommendations():
    """Get memory optimization recommendations"""
    monitor = MemoryMonitor()
    system_info = monitor.get_system_memory_info()
    
    recommendations = []
    
    if system_info["total_gb"] < 2:
        recommendations.append("πŸ”΄ Very low system memory. Use TinyDiseaseClassifier instead of ResNet50")
        recommendations.append("πŸ”΄ Disable explanations completely")
        recommendations.append("πŸ”΄ Set max_workers=1 for API")
    
    elif system_info["total_gb"] < 4:
        recommendations.append("🟑 Low system memory. Use CropDiseaseResNet50Lite")
        recommendations.append("🟑 Enable memory efficient mode")
        recommendations.append("🟑 Limit concurrent requests to 2")
    
    else:
        recommendations.append("🟒 Sufficient system memory for standard operation")
        recommendations.append("🟒 Can use full model with optimizations")
    
    if system_info["used_percent"] > 80:
        recommendations.append("⚠️ High system memory usage. Close other applications")
    
    print("\nMEMORY OPTIMIZATION RECOMMENDATIONS:")
    print("="*50)
    for rec in recommendations:
        print(rec)
    print("="*50)
    
    return recommendations

if __name__ == "__main__":
    print("Disease Detection Memory Monitor")
    print("="*50)
    
    # Run tests
    print("1. Testing model memory usage...")
    test_model_memory_usage()
    
    print("\n2. Running memory benchmark...")
    benchmark_memory_usage()
    
    print("\n3. Getting optimization recommendations...")
    get_optimization_recommendations()
    
    print("\nMemory monitoring complete!")