sentinel-api / vram_check.py
Mustafa Öztürk
Deploy Sentinel API to HF Space
857d4f5
import torch
CRITICAL_RESERVED_MB = 3500
def check_vram_usage() -> None:
if not torch.cuda.is_available():
print("❌ CUDA aktif değil, VRAM ölçülemez.")
return
allocated = torch.cuda.memory_allocated(0) / (1024 ** 2)
reserved = torch.cuda.memory_reserved(0) / (1024 ** 2)
total_capacity = torch.cuda.get_device_properties(0).total_memory / (1024 ** 2)
free_estimate = total_capacity - reserved
print("=" * 40)
print(f"📟 GPU: {torch.cuda.get_device_name(0)}")
print(f"📊 Toplam VRAM: {total_capacity:.2f} MB")
print(f"🔥 Şu An Ayrılan (Allocated): {allocated:.2f} MB")
print(f"🛡️ Rezerve Edilen (Reserved): {reserved:.2f} MB")
print(f"🆓 Boş Alan (Tahmini): {free_estimate:.2f} MB")
if reserved >= CRITICAL_RESERVED_MB:
print(f"⚠️ Kritik Eşik Aşıldı: Reserved >= {CRITICAL_RESERVED_MB} MB")
else:
print(f"✅ Güvenli Bölge: Reserved < {CRITICAL_RESERVED_MB} MB")
print("=" * 40)
if __name__ == "__main__":
check_vram_usage()