Update app.py
Browse files
app.py
CHANGED
|
@@ -673,9 +673,13 @@ TEXT_EXTENSIONS = {
|
|
| 673 |
import zipfile
|
| 674 |
import shutil
|
| 675 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 676 |
def process_uploaded_file(file) -> str:
|
| 677 |
"""
|
| 678 |
-
Read an uploaded file.
|
| 679 |
1. Handles Gradio list inputs (fixes the crash).
|
| 680 |
2. Unzips archives so the agent can see inside (fixes the blindness).
|
| 681 |
3. Reads text files into context.
|
|
@@ -688,7 +692,6 @@ def process_uploaded_file(file) -> str:
|
|
| 688 |
if len(file) == 0: return ""
|
| 689 |
file = file[0]
|
| 690 |
|
| 691 |
-
# Handle both file objects (which have .name) and direct string paths
|
| 692 |
file_path = file.name if hasattr(file, 'name') else str(file)
|
| 693 |
file_name = os.path.basename(file_path)
|
| 694 |
suffix = os.path.splitext(file_name)[1].lower()
|
|
@@ -696,9 +699,7 @@ def process_uploaded_file(file) -> str:
|
|
| 696 |
# FIX 2: Handle ZIP files (The "Unpacking Protocol")
|
| 697 |
if suffix == '.zip':
|
| 698 |
try:
|
| 699 |
-
# Create a clean directory for this upload
|
| 700 |
extract_to = Path(REPO_PATH) / "uploaded_assets" / file_name.replace(".zip", "")
|
| 701 |
-
# Clean up if it exists from a previous run to avoid clutter
|
| 702 |
if extract_to.exists():
|
| 703 |
shutil.rmtree(extract_to)
|
| 704 |
extract_to.mkdir(parents=True, exist_ok=True)
|
|
@@ -706,11 +707,9 @@ def process_uploaded_file(file) -> str:
|
|
| 706 |
with zipfile.ZipFile(file_path, 'r') as zip_ref:
|
| 707 |
zip_ref.extractall(extract_to)
|
| 708 |
|
| 709 |
-
# Get a preview of contents to show the Agent
|
| 710 |
file_list = [f.name for f in extract_to.glob('*')]
|
| 711 |
preview = ", ".join(file_list[:10])
|
| 712 |
-
if len(file_list) > 10:
|
| 713 |
-
preview += f", ... (+{len(file_list)-10} more)"
|
| 714 |
|
| 715 |
return (f"📦 **Unzipped: {file_name}**\n"
|
| 716 |
f"Location: `{extract_to}`\n"
|
|
@@ -719,7 +718,7 @@ def process_uploaded_file(file) -> str:
|
|
| 719 |
except Exception as e:
|
| 720 |
return f"⚠️ Failed to unzip {file_name}: {e}"
|
| 721 |
|
| 722 |
-
#
|
| 723 |
if suffix in TEXT_EXTENSIONS or suffix == '':
|
| 724 |
try:
|
| 725 |
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
|
|
@@ -730,7 +729,7 @@ def process_uploaded_file(file) -> str:
|
|
| 730 |
except Exception as e:
|
| 731 |
return f"📎 **Uploaded: {file_name}** (error reading: {e})"
|
| 732 |
|
| 733 |
-
# Fallback for
|
| 734 |
else:
|
| 735 |
try:
|
| 736 |
size = os.path.getsize(file_path)
|
|
@@ -739,134 +738,72 @@ def process_uploaded_file(file) -> str:
|
|
| 739 |
return f"📎 **Uploaded: {file_name}** (error getting size: {e})"
|
| 740 |
|
| 741 |
|
| 742 |
-
# die Standard file handling remains the same...
|
| 743 |
-
|
| 744 |
-
# =============================================================================
|
| 745 |
-
# AGENTIC LOOP
|
| 746 |
-
# =============================================================================
|
| 747 |
-
# CHANGELOG [2026-02-01 - Claude/Opus]
|
| 748 |
-
# The core conversation loop. For each user message:
|
| 749 |
-
# 1. Build messages array with system prompt + history + new message
|
| 750 |
-
# 2. Send to Kimi K2.5 via HF Inference API
|
| 751 |
-
# 3. Parse response for tool calls
|
| 752 |
-
# 4. If READ tool calls: execute immediately, inject results, loop back to Kimi
|
| 753 |
-
# 5. If WRITE tool calls: stage in approval queue, notify user
|
| 754 |
-
# 6. If no tool calls: return conversational response
|
| 755 |
-
# 7. Save the turn to ChromaDB for persistent memory
|
| 756 |
-
#
|
| 757 |
-
# The loop runs up to MAX_ITERATIONS times to handle multi-step tool use.
|
| 758 |
-
# Each iteration either executes tools and loops, or returns the final text.
|
| 759 |
-
#
|
| 760 |
-
# IMPORTANT: Gradio 5.0+ chatbot with type="messages" expects history as a
|
| 761 |
-
# list of {"role": str, "content": str} dicts. We maintain that format
|
| 762 |
-
# throughout.
|
| 763 |
-
# =============================================================================
|
| 764 |
-
|
| 765 |
-
MAX_ITERATIONS = 5
|
| 766 |
-
|
| 767 |
-
|
| 768 |
def call_model_with_retry(messages, model_id, max_retries=4):
|
| 769 |
-
"""
|
| 770 |
-
Tries to call the API. If it hits a 504 (Busy/Timeout), it waits and retries.
|
| 771 |
-
"""
|
| 772 |
for attempt in range(max_retries):
|
| 773 |
try:
|
| 774 |
-
# Attempt the call
|
| 775 |
return client.chat_completion(
|
| 776 |
-
model=model_id,
|
| 777 |
-
messages=messages,
|
| 778 |
-
max_tokens=2048,
|
| 779 |
-
temperature=0.7
|
| 780 |
)
|
| 781 |
except Exception as e:
|
| 782 |
-
# Check if it is a 'busy' error (504 Gateway Timeout or 503 Service Unavailable)
|
| 783 |
error_str = str(e)
|
| 784 |
if "504" in error_str or "503" in error_str or "timeout" in error_str.lower():
|
| 785 |
-
|
| 786 |
-
|
| 787 |
-
print(f"❌ Final API attempt failed: {e}")
|
| 788 |
-
raise e
|
| 789 |
-
|
| 790 |
-
# The Backoff: Wait 2 seconds, then 4, then 8...
|
| 791 |
-
wait_time = 2 * (2 ** attempt)
|
| 792 |
print(f"⚠️ API Busy (Attempt {attempt+1}/{max_retries}). Retrying in {wait_time}s...")
|
| 793 |
time.sleep(wait_time)
|
| 794 |
else:
|
| 795 |
-
# If it's a different error (like 401 Unauthorized), don't retry.
|
| 796 |
raise e
|
| 797 |
|
| 798 |
|
| 799 |
-
|
| 800 |
-
|
|
|
|
| 801 |
|
| 802 |
-
|
| 803 |
-
message: User's text input
|
| 804 |
-
history: Chat history as list of {"role": ..., "content": ...} dicts
|
| 805 |
-
pending_proposals: Current list of staged write proposals (gr.State)
|
| 806 |
-
uploaded_file: Optional uploaded file from the file input widget
|
| 807 |
|
| 808 |
-
|
| 809 |
-
|
| 810 |
-
updated_gate_choices, updated_stats_files, updated_stats_convos)
|
| 811 |
-
"""
|
| 812 |
if not message.strip() and uploaded_file is None:
|
| 813 |
-
|
| 814 |
-
return (
|
| 815 |
-
history, "", pending_proposals,
|
| 816 |
-
_format_gate_choices(pending_proposals),
|
| 817 |
-
_stats_label_files(), _stats_label_convos()
|
| 818 |
-
)
|
| 819 |
|
| 820 |
-
# Inject
|
| 821 |
full_message = message.strip()
|
| 822 |
if uploaded_file is not None:
|
| 823 |
-
|
| 824 |
-
|
| 825 |
-
|
|
|
|
|
|
|
|
|
|
| 826 |
|
| 827 |
if not full_message:
|
| 828 |
-
return (
|
| 829 |
-
history, "", pending_proposals,
|
| 830 |
-
_format_gate_choices(pending_proposals),
|
| 831 |
-
_stats_label_files(), _stats_label_convos()
|
| 832 |
-
)
|
| 833 |
|
| 834 |
-
#
|
| 835 |
history = history + [{"role": "user", "content": full_message}]
|
| 836 |
|
| 837 |
-
#
|
| 838 |
system_prompt = build_system_prompt()
|
| 839 |
api_messages = [{"role": "system", "content": system_prompt}]
|
| 840 |
-
|
| 841 |
-
# Include recent history (cap to avoid token overflow)
|
| 842 |
-
# Keep last 20 turns to stay within Kimi's context window
|
| 843 |
-
recent_history = history[-40:] # 40 entries = ~20 turns (user+assistant pairs)
|
| 844 |
for h in recent_history:
|
| 845 |
api_messages.append({"role": h["role"], "content": h["content"]})
|
| 846 |
|
| 847 |
-
# Agentic loop: tool calls → execution → re-prompt → repeat
|
| 848 |
accumulated_text = ""
|
| 849 |
staged_this_turn = []
|
| 850 |
|
|
|
|
| 851 |
for iteration in range(MAX_ITERATIONS):
|
| 852 |
try:
|
| 853 |
-
|
| 854 |
-
|
| 855 |
-
messages=api_messages,
|
| 856 |
-
max_tokens=2048,
|
| 857 |
-
temperature=0.7
|
| 858 |
-
)
|
| 859 |
content = response.choices[0].message.content or ""
|
| 860 |
except Exception as e:
|
| 861 |
error_msg = f"⚠️ API Error: {e}"
|
| 862 |
history = history + [{"role": "assistant", "content": error_msg}]
|
| 863 |
-
return (
|
| 864 |
-
history, "", pending_proposals,
|
| 865 |
-
_format_gate_choices(pending_proposals),
|
| 866 |
-
_stats_label_files(), _stats_label_convos()
|
| 867 |
-
)
|
| 868 |
|
| 869 |
-
# Parse for tool calls
|
| 870 |
tool_calls = parse_tool_calls(content)
|
| 871 |
conversational_text = extract_conversational_text(content)
|
| 872 |
|
|
@@ -874,81 +811,60 @@ def agent_loop(message: str, history: list, pending_proposals: list, uploaded_fi
|
|
| 874 |
accumulated_text += ("\n\n" if accumulated_text else "") + conversational_text
|
| 875 |
|
| 876 |
if not tool_calls:
|
| 877 |
-
# No tools — this is the final response
|
| 878 |
break
|
| 879 |
|
| 880 |
-
# Process each tool call
|
| 881 |
tool_results_for_context = []
|
| 882 |
for tool_name, args in tool_calls:
|
| 883 |
result = execute_tool(tool_name, args)
|
| 884 |
-
|
| 885 |
if result["status"] == "executed":
|
| 886 |
-
|
| 887 |
-
tool_results_for_context.append(
|
| 888 |
-
f"[Tool Result: {tool_name}]\n{result['result']}"
|
| 889 |
-
)
|
| 890 |
elif result["status"] == "staged":
|
| 891 |
-
# WRITE tool — staged for approval
|
| 892 |
proposal = {
|
| 893 |
"id": f"proposal_{int(time.time())}_{tool_name}",
|
| 894 |
-
"tool": tool_name,
|
| 895 |
-
"
|
| 896 |
-
"description": result["description"],
|
| 897 |
-
"timestamp": time.strftime("%H:%M:%S")
|
| 898 |
}
|
| 899 |
staged_this_turn.append(proposal)
|
| 900 |
-
tool_results_for_context.append(
|
| 901 |
-
f"[Tool {tool_name}: STAGED for human approval. "
|
| 902 |
-
f"Josh will review this in the Build Approval Gate.]"
|
| 903 |
-
)
|
| 904 |
elif result["status"] == "error":
|
| 905 |
-
tool_results_for_context.append(
|
| 906 |
-
f"[Tool Error: {tool_name}]\n{result['result']}"
|
| 907 |
-
)
|
| 908 |
|
| 909 |
-
# If we only had staged tools and no reads, break the loop
|
| 910 |
if tool_results_for_context:
|
| 911 |
-
# Feed tool results back as a system message for the next iteration
|
| 912 |
combined_results = "\n\n".join(tool_results_for_context)
|
| 913 |
api_messages.append({"role": "assistant", "content": content})
|
| 914 |
api_messages.append({"role": "user", "content": f"[Tool Results]\n{combined_results}"})
|
| 915 |
else:
|
| 916 |
break
|
| 917 |
|
| 918 |
-
#
|
| 919 |
final_response = accumulated_text
|
| 920 |
-
|
| 921 |
-
# Append staging notifications if any writes were staged
|
| 922 |
if staged_this_turn:
|
| 923 |
staging_notice = "\n\n---\n🛡️ **Staged for your approval** (see Build Approval Gate tab):\n"
|
| 924 |
for proposal in staged_this_turn:
|
| 925 |
staging_notice += f"- {proposal['description']}\n"
|
| 926 |
final_response += staging_notice
|
| 927 |
-
# Add to persistent queue
|
| 928 |
pending_proposals = pending_proposals + staged_this_turn
|
| 929 |
|
| 930 |
if not final_response:
|
| 931 |
-
final_response = "🤔 I processed your request but didn't generate a text response.
|
| 932 |
|
| 933 |
-
# Add assistant response to history
|
| 934 |
history = history + [{"role": "assistant", "content": final_response}]
|
| 935 |
|
| 936 |
-
# Save conversation turn for persistent memory
|
| 937 |
try:
|
| 938 |
-
|
| 939 |
-
|
| 940 |
-
|
| 941 |
-
|
| 942 |
-
|
| 943 |
-
|
| 944 |
-
|
| 945 |
-
|
| 946 |
-
|
| 947 |
-
|
| 948 |
-
|
| 949 |
-
)
|
| 950 |
-
|
| 951 |
-
|
| 952 |
|
| 953 |
|
| 954 |
# =============================================================================
|
|
|
|
| 673 |
import zipfile
|
| 674 |
import shutil
|
| 675 |
|
| 676 |
+
# =============================================================================
|
| 677 |
+
# ROBUST FILE & API HANDLERS (The "Box Cutter" & "Persistent Dialer")
|
| 678 |
+
# =============================================================================
|
| 679 |
+
|
| 680 |
def process_uploaded_file(file) -> str:
|
| 681 |
"""
|
| 682 |
+
Read an uploaded file.
|
| 683 |
1. Handles Gradio list inputs (fixes the crash).
|
| 684 |
2. Unzips archives so the agent can see inside (fixes the blindness).
|
| 685 |
3. Reads text files into context.
|
|
|
|
| 692 |
if len(file) == 0: return ""
|
| 693 |
file = file[0]
|
| 694 |
|
|
|
|
| 695 |
file_path = file.name if hasattr(file, 'name') else str(file)
|
| 696 |
file_name = os.path.basename(file_path)
|
| 697 |
suffix = os.path.splitext(file_name)[1].lower()
|
|
|
|
| 699 |
# FIX 2: Handle ZIP files (The "Unpacking Protocol")
|
| 700 |
if suffix == '.zip':
|
| 701 |
try:
|
|
|
|
| 702 |
extract_to = Path(REPO_PATH) / "uploaded_assets" / file_name.replace(".zip", "")
|
|
|
|
| 703 |
if extract_to.exists():
|
| 704 |
shutil.rmtree(extract_to)
|
| 705 |
extract_to.mkdir(parents=True, exist_ok=True)
|
|
|
|
| 707 |
with zipfile.ZipFile(file_path, 'r') as zip_ref:
|
| 708 |
zip_ref.extractall(extract_to)
|
| 709 |
|
|
|
|
| 710 |
file_list = [f.name for f in extract_to.glob('*')]
|
| 711 |
preview = ", ".join(file_list[:10])
|
| 712 |
+
if len(file_list) > 10: preview += f", ... (+{len(file_list)-10} more)"
|
|
|
|
| 713 |
|
| 714 |
return (f"📦 **Unzipped: {file_name}**\n"
|
| 715 |
f"Location: `{extract_to}`\n"
|
|
|
|
| 718 |
except Exception as e:
|
| 719 |
return f"⚠️ Failed to unzip {file_name}: {e}"
|
| 720 |
|
| 721 |
+
# Handle Text files
|
| 722 |
if suffix in TEXT_EXTENSIONS or suffix == '':
|
| 723 |
try:
|
| 724 |
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
|
|
|
|
| 729 |
except Exception as e:
|
| 730 |
return f"📎 **Uploaded: {file_name}** (error reading: {e})"
|
| 731 |
|
| 732 |
+
# Fallback for binary
|
| 733 |
else:
|
| 734 |
try:
|
| 735 |
size = os.path.getsize(file_path)
|
|
|
|
| 738 |
return f"📎 **Uploaded: {file_name}** (error getting size: {e})"
|
| 739 |
|
| 740 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 741 |
def call_model_with_retry(messages, model_id, max_retries=4):
|
| 742 |
+
"""Tries to call the API. If it hits a 504/503 (Busy), it waits and retries."""
|
|
|
|
|
|
|
| 743 |
for attempt in range(max_retries):
|
| 744 |
try:
|
|
|
|
| 745 |
return client.chat_completion(
|
| 746 |
+
model=model_id, messages=messages, max_tokens=2048, temperature=0.7
|
|
|
|
|
|
|
|
|
|
| 747 |
)
|
| 748 |
except Exception as e:
|
|
|
|
| 749 |
error_str = str(e)
|
| 750 |
if "504" in error_str or "503" in error_str or "timeout" in error_str.lower():
|
| 751 |
+
if attempt == max_retries - 1: raise e
|
| 752 |
+
wait_time = 2 * (2 ** attempt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 753 |
print(f"⚠️ API Busy (Attempt {attempt+1}/{max_retries}). Retrying in {wait_time}s...")
|
| 754 |
time.sleep(wait_time)
|
| 755 |
else:
|
|
|
|
| 756 |
raise e
|
| 757 |
|
| 758 |
|
| 759 |
+
# =============================================================================
|
| 760 |
+
# AGENTIC LOOP
|
| 761 |
+
# =============================================================================
|
| 762 |
|
| 763 |
+
MAX_ITERATIONS = 5
|
|
|
|
|
|
|
|
|
|
|
|
|
| 764 |
|
| 765 |
+
def agent_loop(message: str, history: list, pending_proposals: list, uploaded_file) -> tuple:
|
| 766 |
+
# 1. Handle empty inputs
|
|
|
|
|
|
|
| 767 |
if not message.strip() and uploaded_file is None:
|
| 768 |
+
return (history, "", pending_proposals, _format_gate_choices(pending_proposals), _stats_label_files(), _stats_label_convos())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 769 |
|
| 770 |
+
# 2. Inject File Content
|
| 771 |
full_message = message.strip()
|
| 772 |
if uploaded_file is not None:
|
| 773 |
+
try:
|
| 774 |
+
file_context = process_uploaded_file(uploaded_file)
|
| 775 |
+
if file_context:
|
| 776 |
+
full_message = f"{file_context}\n\n{full_message}" if full_message else file_context
|
| 777 |
+
except Exception as e:
|
| 778 |
+
full_message += f"\n[System Error processing file: {e}]"
|
| 779 |
|
| 780 |
if not full_message:
|
| 781 |
+
return (history, "", pending_proposals, _format_gate_choices(pending_proposals), _stats_label_files(), _stats_label_convos())
|
|
|
|
|
|
|
|
|
|
|
|
|
| 782 |
|
| 783 |
+
# 3. Update History
|
| 784 |
history = history + [{"role": "user", "content": full_message}]
|
| 785 |
|
| 786 |
+
# 4. Prepare Context
|
| 787 |
system_prompt = build_system_prompt()
|
| 788 |
api_messages = [{"role": "system", "content": system_prompt}]
|
| 789 |
+
recent_history = history[-40:]
|
|
|
|
|
|
|
|
|
|
| 790 |
for h in recent_history:
|
| 791 |
api_messages.append({"role": h["role"], "content": h["content"]})
|
| 792 |
|
|
|
|
| 793 |
accumulated_text = ""
|
| 794 |
staged_this_turn = []
|
| 795 |
|
| 796 |
+
# 5. The Thinking Loop
|
| 797 |
for iteration in range(MAX_ITERATIONS):
|
| 798 |
try:
|
| 799 |
+
# USE RETRY LOGIC HERE
|
| 800 |
+
response = call_model_with_retry(api_messages, MODEL_ID)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 801 |
content = response.choices[0].message.content or ""
|
| 802 |
except Exception as e:
|
| 803 |
error_msg = f"⚠️ API Error: {e}"
|
| 804 |
history = history + [{"role": "assistant", "content": error_msg}]
|
| 805 |
+
return (history, "", pending_proposals, _format_gate_choices(pending_proposals), _stats_label_files(), _stats_label_convos())
|
|
|
|
|
|
|
|
|
|
|
|
|
| 806 |
|
|
|
|
| 807 |
tool_calls = parse_tool_calls(content)
|
| 808 |
conversational_text = extract_conversational_text(content)
|
| 809 |
|
|
|
|
| 811 |
accumulated_text += ("\n\n" if accumulated_text else "") + conversational_text
|
| 812 |
|
| 813 |
if not tool_calls:
|
|
|
|
| 814 |
break
|
| 815 |
|
|
|
|
| 816 |
tool_results_for_context = []
|
| 817 |
for tool_name, args in tool_calls:
|
| 818 |
result = execute_tool(tool_name, args)
|
|
|
|
| 819 |
if result["status"] == "executed":
|
| 820 |
+
tool_results_for_context.append(f"[Tool Result: {tool_name}]\n{result['result']}")
|
|
|
|
|
|
|
|
|
|
| 821 |
elif result["status"] == "staged":
|
|
|
|
| 822 |
proposal = {
|
| 823 |
"id": f"proposal_{int(time.time())}_{tool_name}",
|
| 824 |
+
"tool": tool_name, "args": result["args"],
|
| 825 |
+
"description": result["description"], "timestamp": time.strftime("%H:%M:%S")
|
|
|
|
|
|
|
| 826 |
}
|
| 827 |
staged_this_turn.append(proposal)
|
| 828 |
+
tool_results_for_context.append(f"[Tool {tool_name}: STAGED for human approval.]")
|
|
|
|
|
|
|
|
|
|
| 829 |
elif result["status"] == "error":
|
| 830 |
+
tool_results_for_context.append(f"[Tool Error: {tool_name}]\n{result['result']}")
|
|
|
|
|
|
|
| 831 |
|
|
|
|
| 832 |
if tool_results_for_context:
|
|
|
|
| 833 |
combined_results = "\n\n".join(tool_results_for_context)
|
| 834 |
api_messages.append({"role": "assistant", "content": content})
|
| 835 |
api_messages.append({"role": "user", "content": f"[Tool Results]\n{combined_results}"})
|
| 836 |
else:
|
| 837 |
break
|
| 838 |
|
| 839 |
+
# 6. Finalize Response
|
| 840 |
final_response = accumulated_text
|
|
|
|
|
|
|
| 841 |
if staged_this_turn:
|
| 842 |
staging_notice = "\n\n---\n🛡️ **Staged for your approval** (see Build Approval Gate tab):\n"
|
| 843 |
for proposal in staged_this_turn:
|
| 844 |
staging_notice += f"- {proposal['description']}\n"
|
| 845 |
final_response += staging_notice
|
|
|
|
| 846 |
pending_proposals = pending_proposals + staged_this_turn
|
| 847 |
|
| 848 |
if not final_response:
|
| 849 |
+
final_response = "🤔 I processed your request but didn't generate a text response."
|
| 850 |
|
|
|
|
| 851 |
history = history + [{"role": "assistant", "content": final_response}]
|
| 852 |
|
|
|
|
| 853 |
try:
|
| 854 |
+
turn_count = len([h for h in history if h["role"] == "user"])
|
| 855 |
+
ctx.save_conversation_turn(full_message, final_response, turn_count)
|
| 856 |
+
except Exception:
|
| 857 |
+
pass
|
| 858 |
+
|
| 859 |
+
# 7. THE CRITICAL RETURN (Aligned with the Function Definition)
|
| 860 |
+
return (
|
| 861 |
+
history,
|
| 862 |
+
"", # Clear the textbox
|
| 863 |
+
pending_proposals,
|
| 864 |
+
_format_gate_choices(pending_proposals),
|
| 865 |
+
_stats_label_files(),
|
| 866 |
+
_stats_label_convos()
|
| 867 |
+
)
|
| 868 |
|
| 869 |
|
| 870 |
# =============================================================================
|