Spaces:
Sleeping
Sleeping
TEMPORARY: Show full model response for debugging clipping issue
Browse files- gradio_app.py +3 -1
gradio_app.py
CHANGED
|
@@ -129,7 +129,9 @@ def chat_with_model(message, history, temperature):
|
|
| 129 |
if assistant_start in full_text:
|
| 130 |
# Find the position after the assistant header
|
| 131 |
response_start = full_text.find(assistant_start) + len(assistant_start)
|
| 132 |
-
response
|
|
|
|
|
|
|
| 133 |
logger.info(f"Extracted response length: {len(response)}")
|
| 134 |
else:
|
| 135 |
# Fallback: try to remove the original prompt
|
|
|
|
| 129 |
if assistant_start in full_text:
|
| 130 |
# Find the position after the assistant header
|
| 131 |
response_start = full_text.find(assistant_start) + len(assistant_start)
|
| 132 |
+
# TEMPORARY: Show full response for debugging
|
| 133 |
+
response = f"=== FULL RESPONSE ===\n{full_text}\n=== END ==="
|
| 134 |
+
# Original line: response = full_text[response_start:].strip()
|
| 135 |
logger.info(f"Extracted response length: {len(response)}")
|
| 136 |
else:
|
| 137 |
# Fallback: try to remove the original prompt
|