anfastech's picture
Fix: Added protobuf>=3.20.0 to requirements.txt in the Core ML section.
3d66487
raw
history blame contribute delete
373 Bytes
import torch
import torch.nn as nn
print(f"PyTorch: {torch.__version__}")
print(f"CUDA: {torch.cuda.is_available()}")
print(f"Device: {torch.device('cuda' if torch.cuda.is_available() else 'cpu')}")
# Test a model
model = nn.Linear(10, 5)
x = torch.randn(3, 10)
y = model(x)
print(f"Input on: {x.device}")
print(f"Output on: {y.device}")
print("✓ CPU inference works!")