Update app.py
Browse files
app.py
CHANGED
|
@@ -76,17 +76,21 @@ def chat_completions_proxy():
|
|
| 76 |
event_type = gpt_oss_data.get('type')
|
| 77 |
update_type = gpt_oss_data.get('update', {}).get('type')
|
| 78 |
|
|
|
|
| 79 |
if event_type == 'thread.item_updated' and update_type == 'cot.entry_added':
|
| 80 |
thought = gpt_oss_data['update']['entry']['content']
|
| 81 |
-
|
|
|
|
| 82 |
|
| 83 |
-
#
|
| 84 |
-
thought_chunk = create_openai_chunk(
|
| 85 |
yield f"data: {json.dumps(thought_chunk)}\n\n"
|
| 86 |
-
|
|
|
|
| 87 |
newline_chunk = create_openai_chunk("\n\n")
|
| 88 |
yield f"data: {json.dumps(newline_chunk)}\n\n"
|
| 89 |
-
|
|
|
|
| 90 |
if event_type == 'thread.item_updated' and update_type == 'assistant_message.content_part.text_delta':
|
| 91 |
delta_content = gpt_oss_data['update'].get('delta', '')
|
| 92 |
text_chunk = create_openai_chunk(delta_content)
|
|
@@ -98,7 +102,7 @@ def chat_completions_proxy():
|
|
| 98 |
|
| 99 |
return Response(stream_formatter(), mimetype='text/event-stream')
|
| 100 |
|
| 101 |
-
else:
|
| 102 |
try:
|
| 103 |
full_response_content = ""
|
| 104 |
for gpt_oss_data in _internal_proxy_stream():
|
|
|
|
| 76 |
event_type = gpt_oss_data.get('type')
|
| 77 |
update_type = gpt_oss_data.get('update', {}).get('type')
|
| 78 |
|
| 79 |
+
# 关键逻辑: 每收到一条思考,就立即格式化并发送
|
| 80 |
if event_type == 'thread.item_updated' and update_type == 'cot.entry_added':
|
| 81 |
thought = gpt_oss_data['update']['entry']['content']
|
| 82 |
+
# 将单条思考包装在它自己的Markdown代码块中
|
| 83 |
+
formatted_thought_block = f"```markdown\n[思考中] {thought}\n```"
|
| 84 |
|
| 85 |
+
# 1. 发送包含代码块的chunk
|
| 86 |
+
thought_chunk = create_openai_chunk(formatted_thought_block)
|
| 87 |
yield f"data: {json.dumps(thought_chunk)}\n\n"
|
| 88 |
+
|
| 89 |
+
# 2. 发送一个包含换行符的chunk,以在视觉上分隔不同的思考块
|
| 90 |
newline_chunk = create_openai_chunk("\n\n")
|
| 91 |
yield f"data: {json.dumps(newline_chunk)}\n\n"
|
| 92 |
+
|
| 93 |
+
# 正常流式传输最终答案
|
| 94 |
if event_type == 'thread.item_updated' and update_type == 'assistant_message.content_part.text_delta':
|
| 95 |
delta_content = gpt_oss_data['update'].get('delta', '')
|
| 96 |
text_chunk = create_openai_chunk(delta_content)
|
|
|
|
| 102 |
|
| 103 |
return Response(stream_formatter(), mimetype='text/event-stream')
|
| 104 |
|
| 105 |
+
else: # 非流式请求逻辑保持不变
|
| 106 |
try:
|
| 107 |
full_response_content = ""
|
| 108 |
for gpt_oss_data in _internal_proxy_stream():
|