Spaces:
Sleeping
Sleeping
File size: 2,081 Bytes
4821854 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
# check_gpu.py
import sys
import torch
def check_gpu_environment():
"""
This script checks the system's Python and PyTorch GPU environment.
It prints detailed information about the setup.
"""
print("--- System and Python Information ---")
print(f"Python Version: {sys.version}")
print("\n--- PyTorch and CUDA Information ---")
try:
print(f"PyTorch Version: {torch.__version__}")
# Check if CUDA (GPU support) is available
cuda_available = torch.cuda.is_available()
print(f"CUDA Available: {cuda_available}")
if not cuda_available:
print("\nWARNING: PyTorch was not built with CUDA support. GPU will not be used.")
return
# Get the number of available GPUs
gpu_count = torch.cuda.device_count()
print(f"Number of GPUs Available: {gpu_count}")
# Get details for each GPU
for i in range(gpu_count):
print(f"\n--- GPU Details (Device {i}) ---")
gpu_name = torch.cuda.get_device_name(i)
print(f" GPU Name: {gpu_name}")
cuda_capability = torch.cuda.get_device_capability(i)
print(f" Compute Capability: {cuda_capability[0]}.{cuda_capability[1]}")
total_mem = torch.cuda.get_device_properties(i).total_memory / (1024**3) # Convert bytes to GB
print(f" Total Memory: {total_mem:.2f} GB")
# Check for cuDNN
cudnn_available = torch.backends.cudnn.is_available()
print("\n--- cuDNN Information ---")
print(f"cuDNN Available: {cudnn_available}")
if cudnn_available:
cudnn_version = torch.backends.cudnn.version()
print(f"cuDNN Version: {cudnn_version}")
else:
print("\nWARNING: cuDNN is not available. Training will be significantly slower.")
except Exception as e:
print(f"\nAn error occurred: {e}")
print("Please ensure PyTorch is installed correctly.")
if __name__ == "__main__":
check_gpu_environment() |