Update app.py
Browse files
app.py
CHANGED
|
@@ -22,12 +22,12 @@ print(f"Using device: {device}")
|
|
| 22 |
# Load the Whisper model and processor
|
| 23 |
whisper_model_name = "openai/whisper-small"
|
| 24 |
whisper_processor = WhisperProcessor.from_pretrained(whisper_model_name)
|
| 25 |
-
whisper_model = WhisperForConditionalGeneration.from_pretrained(whisper_model_name).to(device)
|
| 26 |
|
| 27 |
# Load the Qwen model and tokenizer
|
| 28 |
qwen_model_name = "Qwen/Qwen2.5-3B-Instruct"
|
| 29 |
qwen_tokenizer = AutoTokenizer.from_pretrained(qwen_model_name)
|
| 30 |
-
qwen_model = AutoModelForCausalLM.from_pretrained(qwen_model_name).to(device)
|
| 31 |
|
| 32 |
def download_audio_from_url(url):
|
| 33 |
try:
|
|
|
|
| 22 |
# Load the Whisper model and processor
|
| 23 |
whisper_model_name = "openai/whisper-small"
|
| 24 |
whisper_processor = WhisperProcessor.from_pretrained(whisper_model_name)
|
| 25 |
+
whisper_model = WhisperForConditionalGeneration.from_pretrained(whisper_model_name, torch_dtype=torch.float16).to(device)
|
| 26 |
|
| 27 |
# Load the Qwen model and tokenizer
|
| 28 |
qwen_model_name = "Qwen/Qwen2.5-3B-Instruct"
|
| 29 |
qwen_tokenizer = AutoTokenizer.from_pretrained(qwen_model_name)
|
| 30 |
+
qwen_model = AutoModelForCausalLM.from_pretrained(qwen_model_name, torch_dtype=torch.float16).to(device)
|
| 31 |
|
| 32 |
def download_audio_from_url(url):
|
| 33 |
try:
|