|
|
""" |
|
|
Device Detection Utility |
|
|
Auto-detect dan konfigurasi device (CPU/GPU) untuk model ML |
|
|
""" |
|
|
|
|
|
import torch |
|
|
import os |
|
|
|
|
|
|
|
|
def get_device() -> str: |
|
|
""" |
|
|
Deteksi device yang tersedia (CPU atau CUDA GPU) |
|
|
|
|
|
Returns: |
|
|
str: 'cuda' jika GPU tersedia, 'cpu' jika tidak |
|
|
""" |
|
|
|
|
|
device_override = os.getenv("DEVICE", "").lower() |
|
|
if device_override in ["cpu", "cuda"]: |
|
|
print(f"๐ง Device override from env: {device_override}") |
|
|
return device_override |
|
|
|
|
|
|
|
|
if torch.cuda.is_available(): |
|
|
device = "cuda" |
|
|
gpu_name = torch.cuda.get_device_name(0) |
|
|
gpu_memory = torch.cuda.get_device_properties(0).total_memory / 1024**3 |
|
|
print(f"๐ฎ GPU detected: {gpu_name} ({gpu_memory:.1f}GB)") |
|
|
else: |
|
|
device = "cpu" |
|
|
print("๐ป No GPU detected, using CPU") |
|
|
|
|
|
return device |
|
|
|
|
|
|
|
|
def get_device_info() -> dict: |
|
|
""" |
|
|
Get detailed device information |
|
|
|
|
|
Returns: |
|
|
dict: Device information |
|
|
""" |
|
|
device = get_device() |
|
|
|
|
|
info = { |
|
|
"device": device, |
|
|
"cuda_available": torch.cuda.is_available(), |
|
|
} |
|
|
|
|
|
if device == "cuda": |
|
|
info.update({ |
|
|
"gpu_name": torch.cuda.get_device_name(0), |
|
|
"gpu_memory_gb": round(torch.cuda.get_device_properties(0).total_memory / 1024**3, 2), |
|
|
"cuda_version": torch.version.cuda, |
|
|
"gpu_count": torch.cuda.device_count() |
|
|
}) |
|
|
else: |
|
|
info.update({ |
|
|
"cpu_count": os.cpu_count(), |
|
|
"torch_threads": torch.get_num_threads() |
|
|
}) |
|
|
|
|
|
return info |
|
|
|
|
|
|
|
|
def optimize_for_device(device: str): |
|
|
""" |
|
|
Optimize PyTorch settings based on device |
|
|
|
|
|
Args: |
|
|
device: 'cpu' or 'cuda' |
|
|
""" |
|
|
if device == "cpu": |
|
|
|
|
|
cpu_count = os.cpu_count() or 1 |
|
|
torch.set_num_threads(min(cpu_count, 4)) |
|
|
print(f"โ๏ธ PyTorch threads: {torch.get_num_threads()}") |
|
|
|
|
|
elif device == "cuda": |
|
|
|
|
|
torch.backends.cudnn.benchmark = True |
|
|
torch.backends.cuda.matmul.allow_tf32 = True |
|
|
print("โก GPU optimizations enabled") |
|
|
|