| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| |
|
| | import os |
| | import platform |
| | import sys |
| |
|
| |
|
| | os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" |
| |
|
| | print("Python version:", sys.version) |
| |
|
| | print("OS platform:", platform.platform()) |
| | print("OS architecture:", platform.machine()) |
| |
|
| | try: |
| | import torch |
| |
|
| | print("Torch version:", torch.__version__) |
| | print("Cuda available:", torch.cuda.is_available()) |
| | print("Cuda version:", torch.version.cuda) |
| | print("CuDNN version:", torch.backends.cudnn.version()) |
| | print("Number of GPUs available:", torch.cuda.device_count()) |
| | if torch.cuda.is_available(): |
| | device_properties = torch.cuda.get_device_properties(0) |
| | total_memory = device_properties.total_memory / (1024**3) |
| | print(f"CUDA memory: {total_memory} GB") |
| | except ImportError: |
| | print("Torch version:", None) |
| |
|
| | try: |
| | import transformers |
| |
|
| | print("transformers version:", transformers.__version__) |
| | except ImportError: |
| | print("transformers version:", None) |
| |
|