Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -172,12 +172,16 @@ def process_translate(language: str, audio_path: str) -> str:
|
|
| 172 |
],
|
| 173 |
}
|
| 174 |
]
|
| 175 |
-
|
| 176 |
inputs = processor.apply_chat_template(conversation)
|
|
|
|
| 177 |
inputs = inputs.to(device, dtype=torch.bfloat16)
|
| 178 |
-
|
| 179 |
outputs = model.generate(**inputs, max_new_tokens=MAX_TOKENS)
|
|
|
|
|
|
|
| 180 |
decoded_outputs = processor.batch_decode(outputs[:, inputs.input_ids.shape[1]:], skip_special_tokens=True)
|
|
|
|
| 181 |
result = decoded_outputs[0]
|
| 182 |
status = "β
**Success!** Translation done."
|
| 183 |
except Exception as e:
|
|
|
|
| 172 |
],
|
| 173 |
}
|
| 174 |
]
|
| 175 |
+
print("processor.apply_chat_template(conversation) ...")
|
| 176 |
inputs = processor.apply_chat_template(conversation)
|
| 177 |
+
print("OK")
|
| 178 |
inputs = inputs.to(device, dtype=torch.bfloat16)
|
| 179 |
+
print('"model.generate(**inputs, max_new_tokens=MAX_TOKENS) ...")
|
| 180 |
outputs = model.generate(**inputs, max_new_tokens=MAX_TOKENS)
|
| 181 |
+
print("OK")
|
| 182 |
+
print("processor.batch_decode(")
|
| 183 |
decoded_outputs = processor.batch_decode(outputs[:, inputs.input_ids.shape[1]:], skip_special_tokens=True)
|
| 184 |
+
print("OK")
|
| 185 |
result = decoded_outputs[0]
|
| 186 |
status = "β
**Success!** Translation done."
|
| 187 |
except Exception as e:
|