import torch import ctranslate2 print(f"Torch CUDA available: {torch.cuda.is_available()}") if torch.cuda.is_available(): print(f"Device count: {torch.cuda.device_count()}") print(f"Device name: {torch.cuda.get_device_name(0)}") print(f"CTranslate2 CUDA available: {ctranslate2.get_cuda_device_count() > 0}") try: from faster_whisper import WhisperModel print("Testing WhisperModel load on CPU with float16 (expect failure if CPU)...") try: model = WhisperModel("tiny", device="cpu", compute_type="float16") print("Success loading float16 on CPU (unexpected)") except Exception as e: print(f"Caught expected error on CPU float16: {e}") print("Testing WhisperModel load on CPU with int8...") try: model = WhisperModel("tiny", device="cpu", compute_type="int8") print("Success loading int8 on CPU") except Exception as e: print(f"Failed loading int8 on CPU: {e}") except ImportError: print("faster_whisper not installed")