Update custom model files, README, and requirements
Browse files- asr_modeling.py +6 -1
asr_modeling.py
CHANGED
|
@@ -622,13 +622,18 @@ class ASRModel(PreTrainedModel, GenerationMixin):
|
|
| 622 |
user_content += " " + self.TRANSCRIBE_PROMPT
|
| 623 |
messages.append({"role": "user", "content": user_content})
|
| 624 |
|
|
|
|
|
|
|
| 625 |
chat_result = self.tokenizer.apply_chat_template(
|
| 626 |
messages,
|
| 627 |
tokenize=True,
|
| 628 |
add_generation_prompt=True,
|
| 629 |
return_tensors="pt",
|
| 630 |
-
enable_thinking=
|
| 631 |
)
|
|
|
|
|
|
|
|
|
|
| 632 |
input_ids = chat_result.input_ids.to(device)
|
| 633 |
|
| 634 |
if input_ids.dim() == 1:
|
|
|
|
| 622 |
user_content += " " + self.TRANSCRIBE_PROMPT
|
| 623 |
messages.append({"role": "user", "content": user_content})
|
| 624 |
|
| 625 |
+
enable_thinking_val = getattr(self.config, "enable_thinking", False)
|
| 626 |
+
print(f"[DEBUG generate] enable_thinking={enable_thinking_val}, system_prompt={system_prompt[:100] if system_prompt else None}...")
|
| 627 |
chat_result = self.tokenizer.apply_chat_template(
|
| 628 |
messages,
|
| 629 |
tokenize=True,
|
| 630 |
add_generation_prompt=True,
|
| 631 |
return_tensors="pt",
|
| 632 |
+
enable_thinking=enable_thinking_val,
|
| 633 |
)
|
| 634 |
+
# Debug: show the formatted prompt
|
| 635 |
+
prompt_text = self.tokenizer.decode(chat_result.input_ids[0] if chat_result.input_ids.dim() > 1 else chat_result.input_ids)
|
| 636 |
+
print(f"[DEBUG generate] Formatted prompt: {prompt_text[:500]}...")
|
| 637 |
input_ids = chat_result.input_ids.to(device)
|
| 638 |
|
| 639 |
if input_ids.dim() == 1:
|