feed encrypted reasoning traces back to model
Browse files- app.py +128 -78
- chat_export.py +69 -2
app.py
CHANGED
|
@@ -23,7 +23,6 @@ dump_controls = False
|
|
| 23 |
log_to_console = False
|
| 24 |
|
| 25 |
mcp_servers = load_registry()
|
| 26 |
-
pending_mcp_request = None
|
| 27 |
|
| 28 |
def encode_image(image_data):
|
| 29 |
"""Generates a prefix for image base64 data in the required format for the
|
|
@@ -146,6 +145,43 @@ def undo(history):
|
|
| 146 |
history.pop()
|
| 147 |
return history
|
| 148 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 149 |
def dump(history):
|
| 150 |
return str(history)
|
| 151 |
|
|
@@ -173,8 +209,7 @@ def process_values_js():
|
|
| 173 |
}
|
| 174 |
"""
|
| 175 |
|
| 176 |
-
async def bot(message, history, oai_key, system_prompt, temperature, max_tokens, model, python_use, web_search, *mcp_selected):
|
| 177 |
-
global pending_mcp_request
|
| 178 |
try:
|
| 179 |
client = OpenAI(
|
| 180 |
api_key=oai_key
|
|
@@ -241,21 +276,21 @@ async def bot(message, history, oai_key, system_prompt, temperature, max_tokens,
|
|
| 241 |
)
|
| 242 |
else:
|
| 243 |
approval_items = []
|
| 244 |
-
if
|
| 245 |
-
|
| 246 |
-
if
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
|
| 260 |
tools = []
|
| 261 |
if python_use:
|
|
@@ -294,46 +329,23 @@ async def bot(message, history, oai_key, system_prompt, temperature, max_tokens,
|
|
| 294 |
if log_to_console:
|
| 295 |
print(f"bot history: {str(history)}")
|
| 296 |
|
| 297 |
-
|
| 298 |
user_msg_parts = []
|
| 299 |
|
| 300 |
if system_prompt:
|
| 301 |
-
if not model.startswith("o"):
|
| 302 |
-
role = "system"
|
| 303 |
-
else:
|
| 304 |
-
role = "developer"
|
| 305 |
-
|
| 306 |
if not system_prompt.startswith("Formatting re-enabled"):
|
| 307 |
-
|
| 308 |
-
|
| 309 |
-
|
| 310 |
-
for msg in history:
|
| 311 |
-
role = msg.role if hasattr(msg, "role") else msg["role"]
|
| 312 |
-
content = msg.content if hasattr(msg, "content") else msg["content"]
|
| 313 |
-
|
| 314 |
-
if role == "user":
|
| 315 |
-
user_msg_parts.extend(normalize_user_content(content))
|
| 316 |
-
|
| 317 |
-
if role == "assistant":
|
| 318 |
-
if user_msg_parts:
|
| 319 |
-
history_openai_format.append({"role": "user", "content": user_msg_parts})
|
| 320 |
-
user_msg_parts = []
|
| 321 |
-
|
| 322 |
-
history_openai_format.append({"role": "assistant", "content": str(content)})
|
| 323 |
|
| 324 |
-
#
|
| 325 |
-
if
|
| 326 |
-
|
| 327 |
-
|
| 328 |
-
|
| 329 |
-
|
| 330 |
-
|
| 331 |
-
|
| 332 |
-
if message["files"]:
|
| 333 |
-
for file in message["files"]:
|
| 334 |
-
user_msg_parts.extend(encode_file(file))
|
| 335 |
-
history_openai_format.append({"role": "user", "content": user_msg_parts})
|
| 336 |
-
user_msg_parts = []
|
| 337 |
|
| 338 |
if log_to_console:
|
| 339 |
print(f"br_prompt: {str(history_openai_format)}")
|
|
@@ -375,15 +387,19 @@ async def bot(message, history, oai_key, system_prompt, temperature, max_tokens,
|
|
| 375 |
request_params = {
|
| 376 |
"model": model,
|
| 377 |
"input": history_openai_format,
|
| 378 |
-
"store": False
|
|
|
|
| 379 |
}
|
| 380 |
if reasoner:
|
|
|
|
| 381 |
if high:
|
| 382 |
-
|
| 383 |
elif low:
|
| 384 |
-
|
| 385 |
else:
|
| 386 |
-
|
|
|
|
|
|
|
| 387 |
else:
|
| 388 |
request_params["temperature"] = temperature
|
| 389 |
if tools:
|
|
@@ -410,7 +426,20 @@ async def bot(message, history, oai_key, system_prompt, temperature, max_tokens,
|
|
| 410 |
assistant_msgs.append(final_msg)
|
| 411 |
whole_response += event.delta
|
| 412 |
final_msg.content = whole_response
|
| 413 |
-
yield assistant_msgs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 414 |
elif event.type in (
|
| 415 |
"response.mcp_list_tools.in_progress",
|
| 416 |
"response.mcp_call.in_progress",
|
|
@@ -425,7 +454,7 @@ async def bot(message, history, oai_key, system_prompt, temperature, max_tokens,
|
|
| 425 |
},
|
| 426 |
)
|
| 427 |
assistant_msgs.append(mcp_event_msg)
|
| 428 |
-
yield assistant_msgs
|
| 429 |
elif event.type in (
|
| 430 |
"response.mcp_list_tools.completed",
|
| 431 |
"response.mcp_list_tools.failed",
|
|
@@ -434,11 +463,13 @@ async def bot(message, history, oai_key, system_prompt, temperature, max_tokens,
|
|
| 434 |
):
|
| 435 |
if mcp_event_msg is not None:
|
| 436 |
mcp_event_msg.metadata["status"] = "done"
|
| 437 |
-
yield assistant_msgs
|
| 438 |
elif event.type == "response.completed":
|
| 439 |
response = event.response
|
| 440 |
outputs = response.output
|
| 441 |
|
|
|
|
|
|
|
| 442 |
for output in outputs:
|
| 443 |
if output.type == "message":
|
| 444 |
for part in output.content:
|
|
@@ -449,7 +480,7 @@ async def bot(message, history, oai_key, system_prompt, temperature, max_tokens,
|
|
| 449 |
assistant_msgs.append(final_msg)
|
| 450 |
whole_response += part.text
|
| 451 |
final_msg.content = whole_response
|
| 452 |
-
yield assistant_msgs
|
| 453 |
|
| 454 |
anns = part.annotations
|
| 455 |
if anns:
|
|
@@ -466,8 +497,7 @@ async def bot(message, history, oai_key, system_prompt, temperature, max_tokens,
|
|
| 466 |
final_msg = gr.ChatMessage(role="assistant", content="")
|
| 467 |
assistant_msgs.append(final_msg)
|
| 468 |
final_msg.content = whole_response
|
| 469 |
-
yield assistant_msgs
|
| 470 |
-
|
| 471 |
elif output.type == "function_call":
|
| 472 |
# Check if this is a local MCP tool call
|
| 473 |
function_name = output.name
|
|
@@ -510,7 +540,7 @@ async def bot(message, history, oai_key, system_prompt, temperature, max_tokens,
|
|
| 510 |
metadata={"title": "request", "parent_id": call_id},
|
| 511 |
)
|
| 512 |
)
|
| 513 |
-
yield assistant_msgs
|
| 514 |
|
| 515 |
# Call the MCP tool (async)
|
| 516 |
try:
|
|
@@ -531,7 +561,6 @@ async def bot(message, history, oai_key, system_prompt, temperature, max_tokens,
|
|
| 531 |
)
|
| 532 |
)
|
| 533 |
parent_msg.metadata["status"] = "done"
|
| 534 |
-
yield assistant_msgs
|
| 535 |
# Add result to history
|
| 536 |
history_openai_format.append(
|
| 537 |
{
|
|
@@ -540,6 +569,7 @@ async def bot(message, history, oai_key, system_prompt, temperature, max_tokens,
|
|
| 540 |
"output": result_text,
|
| 541 |
}
|
| 542 |
)
|
|
|
|
| 543 |
except Exception as e:
|
| 544 |
error_message = str(e)
|
| 545 |
history_openai_format.append({
|
|
@@ -555,7 +585,7 @@ async def bot(message, history, oai_key, system_prompt, temperature, max_tokens,
|
|
| 555 |
)
|
| 556 |
)
|
| 557 |
parent_msg.metadata["status"] = "done"
|
| 558 |
-
yield assistant_msgs
|
| 559 |
|
| 560 |
# Need to continue the loop to process the function output
|
| 561 |
loop_tool_calling = True
|
|
@@ -601,7 +631,7 @@ async def bot(message, history, oai_key, system_prompt, temperature, max_tokens,
|
|
| 601 |
metadata={"title": "request", "parent_id": call_id},
|
| 602 |
)
|
| 603 |
)
|
| 604 |
-
yield assistant_msgs
|
| 605 |
|
| 606 |
tool_result = eval_script(tool_script)
|
| 607 |
result_text = (
|
|
@@ -618,7 +648,7 @@ async def bot(message, history, oai_key, system_prompt, temperature, max_tokens,
|
|
| 618 |
)
|
| 619 |
)
|
| 620 |
parent_msg.metadata["status"] = "done"
|
| 621 |
-
yield assistant_msgs
|
| 622 |
|
| 623 |
history_openai_format.append(
|
| 624 |
{
|
|
@@ -647,13 +677,13 @@ async def bot(message, history, oai_key, system_prompt, temperature, max_tokens,
|
|
| 647 |
)
|
| 648 |
)
|
| 649 |
parent_msg.metadata["status"] = "done"
|
| 650 |
-
yield assistant_msgs
|
| 651 |
else:
|
| 652 |
history_openai_format.append(outputs)
|
| 653 |
|
| 654 |
loop_tool_calling = True
|
| 655 |
elif output.type == "mcp_approval_request":
|
| 656 |
-
|
| 657 |
assistant_msgs.append(
|
| 658 |
gr.ChatMessage(
|
| 659 |
role="assistant",
|
|
@@ -663,7 +693,7 @@ async def bot(message, history, oai_key, system_prompt, temperature, max_tokens,
|
|
| 663 |
options=[{"value": "y", "label": "Yes"}, {"value": "n", "label": "No"}],
|
| 664 |
)
|
| 665 |
)
|
| 666 |
-
yield assistant_msgs
|
| 667 |
return
|
| 668 |
elif output.type == "mcp_call":
|
| 669 |
history_openai_format.append(_event_to_dict(output))
|
|
@@ -675,7 +705,7 @@ async def bot(message, history, oai_key, system_prompt, temperature, max_tokens,
|
|
| 675 |
metadata={"title": "response"},
|
| 676 |
)
|
| 677 |
)
|
| 678 |
-
yield assistant_msgs
|
| 679 |
|
| 680 |
if log_to_console:
|
| 681 |
print(f"usage: {event.usage}")
|
|
@@ -685,7 +715,7 @@ async def bot(message, history, oai_key, system_prompt, temperature, max_tokens,
|
|
| 685 |
final_msg = gr.ChatMessage(role="assistant", content="")
|
| 686 |
assistant_msgs.append(final_msg)
|
| 687 |
final_msg.content = whole_response
|
| 688 |
-
yield assistant_msgs
|
| 689 |
|
| 690 |
if log_to_console:
|
| 691 |
print(f"br_result: {str(history)}")
|
|
@@ -702,9 +732,9 @@ def import_history_guarded(oai_key, history, file):
|
|
| 702 |
raise gr.Error(f"OpenAI login error: {str(e)}")
|
| 703 |
|
| 704 |
# actual import
|
| 705 |
-
chat_history, system_prompt_value = import_history(history, file)
|
| 706 |
-
|
| 707 |
-
return chat_history, system_prompt_value, chat_history
|
| 708 |
|
| 709 |
with gr.Blocks(delete_cache=(86400, 86400)) as demo:
|
| 710 |
gr.Markdown("# OpenAI™️ Chat (Nils' Version™️)")
|
|
@@ -763,10 +793,12 @@ with gr.Blocks(delete_cache=(86400, 86400)) as demo:
|
|
| 763 |
dl_settings_button.click(None, controls, js=generate_download_settings_js("oai_chat_settings.bin", control_ids))
|
| 764 |
ul_settings_button.click(None, None, None, js=generate_upload_settings_js(control_ids))
|
| 765 |
|
|
|
|
| 766 |
chat = gr.ChatInterface(
|
| 767 |
fn=bot,
|
| 768 |
multimodal=True,
|
| 769 |
-
additional_inputs=controls,
|
|
|
|
| 770 |
autofocus=False,
|
| 771 |
type="messages",
|
| 772 |
chatbot=gr.Chatbot(elem_id="chatbot", type="messages"),
|
|
@@ -779,6 +811,24 @@ with gr.Blocks(delete_cache=(86400, 86400)) as demo:
|
|
| 779 |
chatbot.show_copy_button = True
|
| 780 |
chatbot.height = 450
|
| 781 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 782 |
|
| 783 |
if dump_controls:
|
| 784 |
with gr.Row():
|
|
@@ -846,6 +896,6 @@ with gr.Blocks(delete_cache=(86400, 86400)) as demo:
|
|
| 846 |
""")
|
| 847 |
import_button.upload(import_history_guarded,
|
| 848 |
inputs=[oai_key, chatbot, import_button],
|
| 849 |
-
outputs=[chatbot, system_prompt, chat.chatbot_state])
|
| 850 |
|
| 851 |
demo.queue(default_concurrency_limit = None).launch()
|
|
|
|
| 23 |
log_to_console = False
|
| 24 |
|
| 25 |
mcp_servers = load_registry()
|
|
|
|
| 26 |
|
| 27 |
def encode_image(image_data):
|
| 28 |
"""Generates a prefix for image base64 data in the required format for the
|
|
|
|
| 145 |
history.pop()
|
| 146 |
return history
|
| 147 |
|
| 148 |
+
def clear_both_histories():
|
| 149 |
+
"""Clear both chatbot display history and OpenAI format history"""
|
| 150 |
+
return [], []
|
| 151 |
+
|
| 152 |
+
def undo_both_histories(chatbot_history, openai_history):
|
| 153 |
+
"""Remove last message from both histories"""
|
| 154 |
+
|
| 155 |
+
# remove all Gradio messages until last user message
|
| 156 |
+
while chatbot_history and chatbot_history[-1]["role"] != "user":
|
| 157 |
+
chatbot_history.pop()
|
| 158 |
+
if chatbot_history and chatbot_history[-1]["role"] == "user":
|
| 159 |
+
chatbot_history.pop()
|
| 160 |
+
|
| 161 |
+
# remove all messages from OpenAI history until last user message
|
| 162 |
+
while openai_history and not (isinstance(openai_history[-1], dict) and openai_history[-1].get("role") == "user"):
|
| 163 |
+
openai_history.pop()
|
| 164 |
+
if openai_history and isinstance(openai_history[-1], dict) and openai_history[-1].get("role") == "user":
|
| 165 |
+
openai_history.pop()
|
| 166 |
+
|
| 167 |
+
return chatbot_history, openai_history
|
| 168 |
+
|
| 169 |
+
def retry_last_message(chatbot_history, openai_history):
|
| 170 |
+
"""Remove last assistant message for retry"""
|
| 171 |
+
if chatbot_history and len(chatbot_history) > 0:
|
| 172 |
+
# Remove the last message if it's from assistant
|
| 173 |
+
last_msg = chatbot_history[-1]
|
| 174 |
+
if hasattr(last_msg, 'role') and last_msg.role == "assistant":
|
| 175 |
+
new_chatbot = chatbot_history[:-1]
|
| 176 |
+
new_openai = openai_history[:-1] if openai_history else []
|
| 177 |
+
return new_chatbot, new_openai
|
| 178 |
+
elif isinstance(last_msg, dict) and last_msg.get('role') == "assistant":
|
| 179 |
+
new_chatbot = chatbot_history[:-1]
|
| 180 |
+
new_openai = openai_history[:-1] if openai_history else []
|
| 181 |
+
return new_chatbot, new_openai
|
| 182 |
+
|
| 183 |
+
return chatbot_history, openai_history
|
| 184 |
+
|
| 185 |
def dump(history):
|
| 186 |
return str(history)
|
| 187 |
|
|
|
|
| 209 |
}
|
| 210 |
"""
|
| 211 |
|
| 212 |
+
async def bot(message, history, history_openai_format, oai_key, system_prompt, temperature, max_tokens, model, python_use, web_search, *mcp_selected):
|
|
|
|
| 213 |
try:
|
| 214 |
client = OpenAI(
|
| 215 |
api_key=oai_key
|
|
|
|
| 276 |
)
|
| 277 |
else:
|
| 278 |
approval_items = []
|
| 279 |
+
if history_openai_format:
|
| 280 |
+
last_msg = history_openai_format[-1]
|
| 281 |
+
if last_msg.type == "mcp_approval_request":
|
| 282 |
+
flag = message[0].lower()
|
| 283 |
+
if flag == 'y':
|
| 284 |
+
approve = True
|
| 285 |
+
elif flag == 'n':
|
| 286 |
+
approve = False
|
| 287 |
+
else:
|
| 288 |
+
raise gr.Error("MCP tool call awaiting confirmation. Start your reply with 'y' or 'n'.")
|
| 289 |
+
history_openai_format.append({
|
| 290 |
+
"type": "mcp_approval_response",
|
| 291 |
+
"approval_request_id": pending_mcp_request.id,
|
| 292 |
+
"approve": approve,
|
| 293 |
+
})
|
| 294 |
|
| 295 |
tools = []
|
| 296 |
if python_use:
|
|
|
|
| 329 |
if log_to_console:
|
| 330 |
print(f"bot history: {str(history)}")
|
| 331 |
|
| 332 |
+
instructions = None
|
| 333 |
user_msg_parts = []
|
| 334 |
|
| 335 |
if system_prompt:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 336 |
if not system_prompt.startswith("Formatting re-enabled"):
|
| 337 |
+
instructions = "Formatting re-enabled\n" + system_prompt
|
| 338 |
+
else:
|
| 339 |
+
instructions = system_prompt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 340 |
|
| 341 |
+
# handle chatbot input
|
| 342 |
+
if message["text"]:
|
| 343 |
+
user_msg_parts.append({"type": "input_text", "text": message["text"]})
|
| 344 |
+
if message["files"]:
|
| 345 |
+
for file in message["files"]:
|
| 346 |
+
user_msg_parts.extend(encode_file(file))
|
| 347 |
+
history_openai_format.append({"role": "user", "content": user_msg_parts})
|
| 348 |
+
user_msg_parts = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 349 |
|
| 350 |
if log_to_console:
|
| 351 |
print(f"br_prompt: {str(history_openai_format)}")
|
|
|
|
| 387 |
request_params = {
|
| 388 |
"model": model,
|
| 389 |
"input": history_openai_format,
|
| 390 |
+
"store": False,
|
| 391 |
+
"instructions": instructions
|
| 392 |
}
|
| 393 |
if reasoner:
|
| 394 |
+
reasoning_dict = {"summary": "auto"}
|
| 395 |
if high:
|
| 396 |
+
reasoning_dict["effort"] = "high"
|
| 397 |
elif low:
|
| 398 |
+
reasoning_dict["effort"] = "low"
|
| 399 |
else:
|
| 400 |
+
reasoning_dict["effort"] = "medium"
|
| 401 |
+
request_params["reasoning"] = reasoning_dict
|
| 402 |
+
request_params["include"] = ["reasoning.encrypted_content"]
|
| 403 |
else:
|
| 404 |
request_params["temperature"] = temperature
|
| 405 |
if tools:
|
|
|
|
| 426 |
assistant_msgs.append(final_msg)
|
| 427 |
whole_response += event.delta
|
| 428 |
final_msg.content = whole_response
|
| 429 |
+
yield assistant_msgs, history_openai_format
|
| 430 |
+
elif event.type == "response.output_item.added" and event.item.type == "reasoning":
|
| 431 |
+
summary = ""
|
| 432 |
+
for str in event.item.summary:
|
| 433 |
+
if str.type == "summary_text":
|
| 434 |
+
summary += str.text
|
| 435 |
+
if summary:
|
| 436 |
+
rs_msg = gr.ChatMessage(
|
| 437 |
+
role="assistant",
|
| 438 |
+
content=summary,
|
| 439 |
+
metadata={"title": "Reasoning", "id": event.item.id, "status": "done"},
|
| 440 |
+
)
|
| 441 |
+
assistant_msgs.append(rs_msg)
|
| 442 |
+
yield assistant_msgs, history_openai_format
|
| 443 |
elif event.type in (
|
| 444 |
"response.mcp_list_tools.in_progress",
|
| 445 |
"response.mcp_call.in_progress",
|
|
|
|
| 454 |
},
|
| 455 |
)
|
| 456 |
assistant_msgs.append(mcp_event_msg)
|
| 457 |
+
yield assistant_msgs, history_openai_format
|
| 458 |
elif event.type in (
|
| 459 |
"response.mcp_list_tools.completed",
|
| 460 |
"response.mcp_list_tools.failed",
|
|
|
|
| 463 |
):
|
| 464 |
if mcp_event_msg is not None:
|
| 465 |
mcp_event_msg.metadata["status"] = "done"
|
| 466 |
+
yield assistant_msgs, history_openai_format
|
| 467 |
elif event.type == "response.completed":
|
| 468 |
response = event.response
|
| 469 |
outputs = response.output
|
| 470 |
|
| 471 |
+
history_openai_format.extend(outputs)
|
| 472 |
+
|
| 473 |
for output in outputs:
|
| 474 |
if output.type == "message":
|
| 475 |
for part in output.content:
|
|
|
|
| 480 |
assistant_msgs.append(final_msg)
|
| 481 |
whole_response += part.text
|
| 482 |
final_msg.content = whole_response
|
| 483 |
+
yield assistant_msgs, history_openai_format
|
| 484 |
|
| 485 |
anns = part.annotations
|
| 486 |
if anns:
|
|
|
|
| 497 |
final_msg = gr.ChatMessage(role="assistant", content="")
|
| 498 |
assistant_msgs.append(final_msg)
|
| 499 |
final_msg.content = whole_response
|
| 500 |
+
yield assistant_msgs, history_openai_format
|
|
|
|
| 501 |
elif output.type == "function_call":
|
| 502 |
# Check if this is a local MCP tool call
|
| 503 |
function_name = output.name
|
|
|
|
| 540 |
metadata={"title": "request", "parent_id": call_id},
|
| 541 |
)
|
| 542 |
)
|
| 543 |
+
yield assistant_msgs, history_openai_format
|
| 544 |
|
| 545 |
# Call the MCP tool (async)
|
| 546 |
try:
|
|
|
|
| 561 |
)
|
| 562 |
)
|
| 563 |
parent_msg.metadata["status"] = "done"
|
|
|
|
| 564 |
# Add result to history
|
| 565 |
history_openai_format.append(
|
| 566 |
{
|
|
|
|
| 569 |
"output": result_text,
|
| 570 |
}
|
| 571 |
)
|
| 572 |
+
yield assistant_msgs, history_openai_format
|
| 573 |
except Exception as e:
|
| 574 |
error_message = str(e)
|
| 575 |
history_openai_format.append({
|
|
|
|
| 585 |
)
|
| 586 |
)
|
| 587 |
parent_msg.metadata["status"] = "done"
|
| 588 |
+
yield assistant_msgs, history_openai_format
|
| 589 |
|
| 590 |
# Need to continue the loop to process the function output
|
| 591 |
loop_tool_calling = True
|
|
|
|
| 631 |
metadata={"title": "request", "parent_id": call_id},
|
| 632 |
)
|
| 633 |
)
|
| 634 |
+
yield assistant_msgs, history_openai_format
|
| 635 |
|
| 636 |
tool_result = eval_script(tool_script)
|
| 637 |
result_text = (
|
|
|
|
| 648 |
)
|
| 649 |
)
|
| 650 |
parent_msg.metadata["status"] = "done"
|
| 651 |
+
yield assistant_msgs, history_openai_format
|
| 652 |
|
| 653 |
history_openai_format.append(
|
| 654 |
{
|
|
|
|
| 677 |
)
|
| 678 |
)
|
| 679 |
parent_msg.metadata["status"] = "done"
|
| 680 |
+
yield assistant_msgs, history_openai_format
|
| 681 |
else:
|
| 682 |
history_openai_format.append(outputs)
|
| 683 |
|
| 684 |
loop_tool_calling = True
|
| 685 |
elif output.type == "mcp_approval_request":
|
| 686 |
+
history_openai_format.append(output)
|
| 687 |
assistant_msgs.append(
|
| 688 |
gr.ChatMessage(
|
| 689 |
role="assistant",
|
|
|
|
| 693 |
options=[{"value": "y", "label": "Yes"}, {"value": "n", "label": "No"}],
|
| 694 |
)
|
| 695 |
)
|
| 696 |
+
yield assistant_msgs, history_openai_format
|
| 697 |
return
|
| 698 |
elif output.type == "mcp_call":
|
| 699 |
history_openai_format.append(_event_to_dict(output))
|
|
|
|
| 705 |
metadata={"title": "response"},
|
| 706 |
)
|
| 707 |
)
|
| 708 |
+
yield assistant_msgs, history_openai_format
|
| 709 |
|
| 710 |
if log_to_console:
|
| 711 |
print(f"usage: {event.usage}")
|
|
|
|
| 715 |
final_msg = gr.ChatMessage(role="assistant", content="")
|
| 716 |
assistant_msgs.append(final_msg)
|
| 717 |
final_msg.content = whole_response
|
| 718 |
+
yield assistant_msgs, history_openai_format
|
| 719 |
|
| 720 |
if log_to_console:
|
| 721 |
print(f"br_result: {str(history)}")
|
|
|
|
| 732 |
raise gr.Error(f"OpenAI login error: {str(e)}")
|
| 733 |
|
| 734 |
# actual import
|
| 735 |
+
chat_history, system_prompt_value, history_openai_format = import_history(history, file)
|
| 736 |
+
|
| 737 |
+
return chat_history, system_prompt_value, chat_history, history_openai_format
|
| 738 |
|
| 739 |
with gr.Blocks(delete_cache=(86400, 86400)) as demo:
|
| 740 |
gr.Markdown("# OpenAI™️ Chat (Nils' Version™️)")
|
|
|
|
| 793 |
dl_settings_button.click(None, controls, js=generate_download_settings_js("oai_chat_settings.bin", control_ids))
|
| 794 |
ul_settings_button.click(None, None, None, js=generate_upload_settings_js(control_ids))
|
| 795 |
|
| 796 |
+
history_openai_format = gr.State([])
|
| 797 |
chat = gr.ChatInterface(
|
| 798 |
fn=bot,
|
| 799 |
multimodal=True,
|
| 800 |
+
additional_inputs=[history_openai_format] + controls,
|
| 801 |
+
additional_outputs=[history_openai_format],
|
| 802 |
autofocus=False,
|
| 803 |
type="messages",
|
| 804 |
chatbot=gr.Chatbot(elem_id="chatbot", type="messages"),
|
|
|
|
| 811 |
chatbot.show_copy_button = True
|
| 812 |
chatbot.height = 450
|
| 813 |
|
| 814 |
+
# Add event handlers to sync chatbot actions with history_openai_format state
|
| 815 |
+
chatbot.clear(
|
| 816 |
+
fn=clear_both_histories,
|
| 817 |
+
outputs=[chatbot, history_openai_format]
|
| 818 |
+
)
|
| 819 |
+
|
| 820 |
+
chatbot.undo(
|
| 821 |
+
fn=undo_both_histories,
|
| 822 |
+
inputs=[chatbot, history_openai_format],
|
| 823 |
+
outputs=[chatbot, history_openai_format]
|
| 824 |
+
)
|
| 825 |
+
|
| 826 |
+
chatbot.retry(
|
| 827 |
+
fn=retry_last_message,
|
| 828 |
+
inputs=[chatbot, history_openai_format],
|
| 829 |
+
outputs=[chatbot, history_openai_format]
|
| 830 |
+
)
|
| 831 |
+
|
| 832 |
|
| 833 |
if dump_controls:
|
| 834 |
with gr.Row():
|
|
|
|
| 896 |
""")
|
| 897 |
import_button.upload(import_history_guarded,
|
| 898 |
inputs=[oai_key, chatbot, import_button],
|
| 899 |
+
outputs=[chatbot, system_prompt, chat.chatbot_state, history_openai_format])
|
| 900 |
|
| 901 |
demo.queue(default_concurrency_limit = None).launch()
|
chat_export.py
CHANGED
|
@@ -4,6 +4,7 @@ import os, io
|
|
| 4 |
from PIL import Image
|
| 5 |
import gradio as gr
|
| 6 |
from gradio import processing_utils, utils
|
|
|
|
| 7 |
|
| 8 |
def import_history(history, file):
|
| 9 |
if os.path.getsize(file.name) > 100e6:
|
|
@@ -20,6 +21,7 @@ def import_history(history, file):
|
|
| 20 |
messages = import_data['messages']
|
| 21 |
system_prompt_value = ''
|
| 22 |
chat_history = []
|
|
|
|
| 23 |
|
| 24 |
msg_num = 1
|
| 25 |
for msg in messages:
|
|
@@ -40,6 +42,14 @@ def import_history(history, file):
|
|
| 40 |
"role": msg['role'],
|
| 41 |
"content": {"path": cache_path}
|
| 42 |
})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
elif item.get('type', '') == 'file':
|
| 44 |
fname = os.path.basename(item['file'].get('name', f'download{msg_num}'))
|
| 45 |
file_data = base64.b64decode(item['file']['url'].split(',')[1])
|
|
@@ -51,13 +61,36 @@ def import_history(history, file):
|
|
| 51 |
"role": msg['role'],
|
| 52 |
"content": {"path": cache_path}
|
| 53 |
})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
else:
|
| 55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
else:
|
| 57 |
chat_history.append(msg)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
|
| 59 |
elif msg['role'] == 'assistant':
|
| 60 |
chat_history.append(msg)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
|
| 62 |
msg_num = msg_num + 1
|
| 63 |
|
|
@@ -71,6 +104,8 @@ def import_history(history, file):
|
|
| 71 |
system_prompt_value = ''
|
| 72 |
|
| 73 |
chat_history = []
|
|
|
|
|
|
|
| 74 |
# Convert tuple/pair format to messages format
|
| 75 |
for pair in legacy_history:
|
| 76 |
if pair[0]: # User message
|
|
@@ -88,6 +123,14 @@ def import_history(history, file):
|
|
| 88 |
"role": "user",
|
| 89 |
"content": {"path": cache_path}
|
| 90 |
})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 91 |
else:
|
| 92 |
fname = pair[0]['file'].get('name', 'download')
|
| 93 |
file_bytes = base64.b64decode(file_data.split(',')[1])
|
|
@@ -96,25 +139,49 @@ def import_history(history, file):
|
|
| 96 |
"role": "user",
|
| 97 |
"content": {"path": cache_path}
|
| 98 |
})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 99 |
else:
|
| 100 |
# Keep as-is but convert to message format
|
| 101 |
chat_history.append({
|
| 102 |
"role": "user",
|
| 103 |
"content": pair[0]
|
| 104 |
})
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
else:
|
| 106 |
chat_history.append({
|
| 107 |
"role": "user",
|
| 108 |
"content": pair[0]
|
| 109 |
})
|
|
|
|
|
|
|
|
|
|
|
|
|
| 110 |
|
| 111 |
if pair[1]: # Assistant message
|
| 112 |
chat_history.append({
|
| 113 |
"role": "assistant",
|
| 114 |
"content": pair[1]
|
| 115 |
})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 116 |
|
| 117 |
-
return chat_history, system_prompt_value
|
| 118 |
|
| 119 |
def get_export_js():
|
| 120 |
return """
|
|
|
|
| 4 |
from PIL import Image
|
| 5 |
import gradio as gr
|
| 6 |
from gradio import processing_utils, utils
|
| 7 |
+
import openai
|
| 8 |
|
| 9 |
def import_history(history, file):
|
| 10 |
if os.path.getsize(file.name) > 100e6:
|
|
|
|
| 21 |
messages = import_data['messages']
|
| 22 |
system_prompt_value = ''
|
| 23 |
chat_history = []
|
| 24 |
+
openai_history = []
|
| 25 |
|
| 26 |
msg_num = 1
|
| 27 |
for msg in messages:
|
|
|
|
| 42 |
"role": msg['role'],
|
| 43 |
"content": {"path": cache_path}
|
| 44 |
})
|
| 45 |
+
openai_history.append({
|
| 46 |
+
"role": "user",
|
| 47 |
+
"content": [{
|
| 48 |
+
"type": "input_image",
|
| 49 |
+
"detail": "high",
|
| 50 |
+
"image_url": data_uri
|
| 51 |
+
}]
|
| 52 |
+
})
|
| 53 |
elif item.get('type', '') == 'file':
|
| 54 |
fname = os.path.basename(item['file'].get('name', f'download{msg_num}'))
|
| 55 |
file_data = base64.b64decode(item['file']['url'].split(',')[1])
|
|
|
|
| 61 |
"role": msg['role'],
|
| 62 |
"content": {"path": cache_path}
|
| 63 |
})
|
| 64 |
+
openai_history.append({
|
| 65 |
+
"role": "user",
|
| 66 |
+
"content": [{
|
| 67 |
+
"type": "input_text",
|
| 68 |
+
"content": file_data
|
| 69 |
+
}]
|
| 70 |
+
})
|
| 71 |
else:
|
| 72 |
+
# untested - does not happen?
|
| 73 |
+
chat_history.append(item["content"])
|
| 74 |
+
openai_history.append({
|
| 75 |
+
"role": "user",
|
| 76 |
+
"content": item["content"]
|
| 77 |
+
})
|
| 78 |
else:
|
| 79 |
chat_history.append(msg)
|
| 80 |
+
openai_history.append({
|
| 81 |
+
"role": "user",
|
| 82 |
+
"content": content
|
| 83 |
+
})
|
| 84 |
|
| 85 |
elif msg['role'] == 'assistant':
|
| 86 |
chat_history.append(msg)
|
| 87 |
+
openai_history.append(openai.types.responses.ResponseOutputMessage(
|
| 88 |
+
id=f"msg_{msg_num}",
|
| 89 |
+
role="assistant",
|
| 90 |
+
content=[openai.types.responses.ResponseOutputText(type="output_text", text=msg['content'], annotations=[])],
|
| 91 |
+
status="completed",
|
| 92 |
+
type="message"
|
| 93 |
+
))
|
| 94 |
|
| 95 |
msg_num = msg_num + 1
|
| 96 |
|
|
|
|
| 104 |
system_prompt_value = ''
|
| 105 |
|
| 106 |
chat_history = []
|
| 107 |
+
openai_history = []
|
| 108 |
+
msg_num = 1
|
| 109 |
# Convert tuple/pair format to messages format
|
| 110 |
for pair in legacy_history:
|
| 111 |
if pair[0]: # User message
|
|
|
|
| 123 |
"role": "user",
|
| 124 |
"content": {"path": cache_path}
|
| 125 |
})
|
| 126 |
+
openai_history.append({
|
| 127 |
+
"role": "user",
|
| 128 |
+
"content": [{
|
| 129 |
+
"type": "input_image",
|
| 130 |
+
"detail": "high",
|
| 131 |
+
"image_url": file_data
|
| 132 |
+
}]
|
| 133 |
+
})
|
| 134 |
else:
|
| 135 |
fname = pair[0]['file'].get('name', 'download')
|
| 136 |
file_bytes = base64.b64decode(file_data.split(',')[1])
|
|
|
|
| 139 |
"role": "user",
|
| 140 |
"content": {"path": cache_path}
|
| 141 |
})
|
| 142 |
+
openai_history.append({
|
| 143 |
+
"role": "user",
|
| 144 |
+
"content": [{
|
| 145 |
+
"type": "input_text",
|
| 146 |
+
"content": file_bytes
|
| 147 |
+
}]
|
| 148 |
+
})
|
| 149 |
else:
|
| 150 |
# Keep as-is but convert to message format
|
| 151 |
chat_history.append({
|
| 152 |
"role": "user",
|
| 153 |
"content": pair[0]
|
| 154 |
})
|
| 155 |
+
openai_history.append({
|
| 156 |
+
"role": "user",
|
| 157 |
+
"content": pair[0]
|
| 158 |
+
})
|
| 159 |
else:
|
| 160 |
chat_history.append({
|
| 161 |
"role": "user",
|
| 162 |
"content": pair[0]
|
| 163 |
})
|
| 164 |
+
openai_history.append({
|
| 165 |
+
"role": "user",
|
| 166 |
+
"content": pair[0]
|
| 167 |
+
})
|
| 168 |
|
| 169 |
if pair[1]: # Assistant message
|
| 170 |
chat_history.append({
|
| 171 |
"role": "assistant",
|
| 172 |
"content": pair[1]
|
| 173 |
})
|
| 174 |
+
openai_history.append(openai.types.responses.ResponseOutputMessage(
|
| 175 |
+
id=f"msg_{msg_num}",
|
| 176 |
+
role="assistant",
|
| 177 |
+
content=[openai.types.responses.ResponseOutputText(type="output_text", text=pair[1], annotations=[])],
|
| 178 |
+
status="completed",
|
| 179 |
+
type="message"
|
| 180 |
+
))
|
| 181 |
+
|
| 182 |
+
msg_num = msg_num + 1
|
| 183 |
|
| 184 |
+
return chat_history, system_prompt_value, openai_history
|
| 185 |
|
| 186 |
def get_export_js():
|
| 187 |
return """
|