Aryanaideveloper's picture
feat: implement fairseq compatibility shim and working audio inference pipeline
0f96bb5
import torch
print('PyTorch version:', torch.__version__)
print('CUDA available:', torch.cuda.is_available())
if torch.cuda.is_available():
print('CUDA device:', torch.cuda.get_device_name(0))
print('CUDA version:', torch.version.cuda)
print('VRAM:', round(torch.cuda.get_device_properties(0).total_memory / 1024**3, 2), 'GB')
# Test actual GPU computation
try:
x = torch.randn(100, 100, device='cuda')
y = torch.randn(100, 100, device='cuda')
z = torch.mm(x, y)
print('GPU computation test: PASSED')
print('Result shape:', z.shape, 'sum:', z.sum().item())
except Exception as e:
print('GPU computation test: FAILED -', e)
print('Falling back to CPU mode')
else:
print('WARNING: CUDA not available, will use CPU')