Executor-Tyrant-Framework commited on
Commit
e965918
Β·
verified Β·
1 Parent(s): 3dc8fac

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +240 -50
app.py CHANGED
@@ -679,65 +679,255 @@ import shutil
679
  # ROBUST FILE & API HANDLERS (The "Box Cutter" & "Persistent Dialer")
680
  # =============================================================================
681
 
682
- def process_uploaded_file(file) -> str:
683
- """
684
- Read an uploaded file.
685
- 1. Handles Gradio list inputs (fixes the crash).
686
- 2. Unzips archives so the agent can see inside (fixes the blindness).
687
- 3. Reads text files into context.
688
- """
689
- if file is None:
690
- return ""
 
 
691
 
692
- # FIX 1: Gradio often passes a list, even for single files. Unwrap it.
693
- if isinstance(file, list):
694
- if len(file) == 0: return ""
695
- file = file[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
696
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
697
  file_path = file.name if hasattr(file, 'name') else str(file)
698
  file_name = os.path.basename(file_path)
699
- suffix = os.path.splitext(file_name)[1].lower()
700
-
701
- # FIX 2: Handle ZIP files (The "Unpacking Protocol")
702
- if suffix == '.zip':
 
 
 
 
 
703
  try:
704
- extract_to = Path(REPO_PATH) / "uploaded_assets" / file_name.replace(".zip", "")
705
- if extract_to.exists():
706
- shutil.rmtree(extract_to)
707
- extract_to.mkdir(parents=True, exist_ok=True)
708
-
709
- with zipfile.ZipFile(file_path, 'r') as zip_ref:
710
- zip_ref.extractall(extract_to)
711
-
712
- file_list = [f.name for f in extract_to.glob('*')]
713
- preview = ", ".join(file_list[:10])
714
- if len(file_list) > 10: preview += f", ... (+{len(file_list)-10} more)"
715
-
716
- return (f"πŸ“¦ **Unzipped: {file_name}**\n"
717
- f"Location: `{extract_to}`\n"
718
- f"Contents: {preview}\n"
719
- f"SYSTEM NOTE: The files are extracted. Use list_files('{extract_to}') to explore them.")
720
- except Exception as e:
721
- return f"⚠️ Failed to unzip {file_name}: {e}"
722
 
723
- # Handle Text files
724
- if suffix in TEXT_EXTENSIONS or suffix == '':
 
 
 
 
 
 
 
 
725
  try:
726
- with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
727
- content = f.read()
728
- if len(content) > 50000:
729
- content = content[:50000] + f"\n\n... (truncated, {len(content)} total chars)"
730
- return f"πŸ“Ž **Uploaded: {file_name}**\n```\n{content}\n```"
731
  except Exception as e:
732
- return f"πŸ“Ž **Uploaded: {file_name}** (error reading: {e})"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
733
 
734
- # Fallback for binary
735
- else:
736
- try:
737
- size = os.path.getsize(file_path)
738
- return f"πŸ“Ž **Uploaded: {file_name}** (binary file, {size:,} bytes)"
739
- except Exception as e:
740
- return f"πŸ“Ž **Uploaded: {file_name}** (error getting size: {e})"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
741
 
742
 
743
  def call_model_with_retry(messages, model_id, max_retries=4):
 
679
  # ROBUST FILE & API HANDLERS (The "Box Cutter" & "Persistent Dialer")
680
  # =============================================================================
681
 
682
+ import gradio as gr
683
+ from huggingface_hub import InferenceClient
684
+ from recursive_context import RecursiveContextManager
685
+ from pathlib import Path
686
+ import os
687
+ import json
688
+ import re
689
+ import time
690
+ import traceback
691
+ import zipfile
692
+ import shutil
693
 
694
+ # ... [Standard Initialization & Sync Logic remains identical to your baseline] ...
695
+
696
+ client = InferenceClient("https://router.huggingface.co/v1", token=os.getenv("HF_TOKEN"))
697
+ ET_SYSTEMS_SPACE = os.getenv("ET_SYSTEMS_SPACE", "")
698
+ REPO_PATH = os.getenv("REPO_PATH", "/workspace/e-t-systems")
699
+
700
+ # ... [sync_from_space & _resolve_repo_path functions remain identical] ...
701
+
702
+ def sync_from_space(space_id: str, local_path: Path):
703
+ token = os.getenv("HF_TOKEN") or os.getenv("HUGGING_FACE_HUB_TOKEN")
704
+ if not token: return
705
+ try:
706
+ from huggingface_hub import HfFileSystem
707
+ fs = HfFileSystem(token=token)
708
+ space_path = f"spaces/{space_id}"
709
+ all_files = fs.glob(f"{space_path}/**")
710
+ local_path.mkdir(parents=True, exist_ok=True)
711
+ for file_path in all_files:
712
+ rel = file_path.replace(f"{space_path}/", "", 1)
713
+ if any(p.startswith('.') for p in rel.split('/')) or '__pycache__' in rel: continue
714
+ try:
715
+ if fs.info(file_path)['type'] == 'directory': continue
716
+ except: continue
717
+ dest = local_path / rel
718
+ dest.parent.mkdir(parents=True, exist_ok=True)
719
+ with fs.open(file_path, "rb") as f: dest.write_bytes(f.read())
720
+ except Exception: pass
721
+
722
+ def _resolve_repo_path() -> str:
723
+ repo_path = Path(REPO_PATH)
724
+ if ET_SYSTEMS_SPACE: sync_from_space(ET_SYSTEMS_SPACE, repo_path)
725
+ if repo_path.exists() and any(repo_path.iterdir()): return str(repo_path)
726
+ return os.path.dirname(os.path.abspath(__file__))
727
+
728
+ ctx = RecursiveContextManager(_resolve_repo_path())
729
+ MODEL_ID = "moonshotai/Kimi-K2.5"
730
+
731
+ # ... [Tool Definitions & Prompts remain identical] ...
732
+
733
+ TOOL_DEFINITIONS = """
734
+ ## Available Tools
735
+ - **search_code(query, n=5)**
736
+ - **read_file(path, start_line, end_line)**
737
+ - **list_files(path, max_depth)**
738
+ - **write_file(path, content)**
739
+ - **shell_execute(command)**
740
+ - **create_shadow_branch()**
741
+ """
742
+
743
+ def build_system_prompt() -> str:
744
+ stats = ctx.get_stats()
745
+ return f"""You are Clawdbot 🦞.
746
+
747
+ ## System Stats
748
+ - πŸ“‚ Files: {stats.get('total_files', 0)}
749
+ - πŸ’Ύ Conversations: {stats.get('conversations', 0)}
750
+
751
+ {TOOL_DEFINITIONS}
752
+ """
753
+
754
+ def parse_tool_calls(content: str) -> list:
755
+ calls = []
756
+ for match in re.finditer(r'<\|tool_call_begin\|>\s*functions\.(\w+):\d+\s*\n(.*?)<\|tool_call_end\|>', content, re.DOTALL):
757
+ try: calls.append((match.group(1), json.loads(match.group(2).strip())))
758
+ except: calls.append((match.group(1), {"raw": match.group(2).strip()}))
759
+ for block in re.finditer(r'<function_calls>(.*?)</function_calls>', content, re.DOTALL):
760
+ for invoke in re.finditer(r'<invoke\s+name="(\w+)">(.*?)</invoke>', block.group(1), re.DOTALL):
761
+ args = {}
762
+ for p in re.finditer(r'<parameter\s+name="(\w+)">(.*?)</parameter>', invoke.group(2), re.DOTALL):
763
+ try: args[p.group(1)] = json.loads(p.group(2).strip())
764
+ except: args[p.group(1)] = p.group(2).strip()
765
+ calls.append((invoke.group(1), args))
766
+ return calls
767
 
768
+ def extract_conversational_text(content: str) -> str:
769
+ cleaned = re.sub(r'<\|tool_call_begin\|>.*?<\|tool_call_end\|>', '', content, flags=re.DOTALL)
770
+ return re.sub(r'<function_calls>.*?</function_calls>', '', cleaned, flags=re.DOTALL).strip()
771
+
772
+ def execute_tool(tool_name: str, args: dict) -> dict:
773
+ try:
774
+ if tool_name == 'search_code':
775
+ res = ctx.search_code(args.get('query', ''), args.get('n', 5))
776
+ return {"status": "executed", "tool": tool_name, "result": "\n".join([f"πŸ“„ {r['file']}\n```{r['snippet']}```" for r in res])}
777
+ elif tool_name == 'read_file':
778
+ return {"status": "executed", "tool": tool_name, "result": ctx.read_file(args.get('path', ''), args.get('start_line'), args.get('end_line'))}
779
+ elif tool_name == 'list_files':
780
+ return {"status": "executed", "tool": tool_name, "result": ctx.list_files(args.get('path', ''), args.get('max_depth', 3))}
781
+ elif tool_name == 'write_file':
782
+ return {"status": "staged", "tool": tool_name, "args": args, "description": f"✏️ Write to `{args.get('path')}`"}
783
+ elif tool_name == 'shell_execute':
784
+ return {"status": "staged", "tool": tool_name, "args": args, "description": f"πŸ–₯️ Execute: `{args.get('command')}`"}
785
+ elif tool_name == 'create_shadow_branch':
786
+ return {"status": "staged", "tool": tool_name, "args": args, "description": "πŸ›‘οΈ Create shadow branch"}
787
+ return {"status": "error", "result": f"Unknown tool: {tool_name}"}
788
+ except Exception as e: return {"status": "error", "result": str(e)}
789
+
790
+ def execute_staged_tool(tool_name: str, args: dict) -> str:
791
+ try:
792
+ if tool_name == 'write_file': return ctx.write_file(args.get('path', ''), args.get('content', ''))
793
+ if tool_name == 'shell_execute': return ctx.shell_execute(args.get('command', ''))
794
+ if tool_name == 'create_shadow_branch': return ctx.create_shadow_branch()
795
+ except Exception as e: return f"Error: {e}"
796
+ return "Unknown tool"
797
+
798
+ # --- FIXED FILE UPLOAD HANDLER ---
799
+ TEXT_EXTENSIONS = {'.py', '.js', '.ts', '.json', '.md', '.txt', '.yaml', '.yml', '.html', '.css', '.sh', '.toml', '.sql', '.env', '.dockerfile'}
800
+
801
+ def process_uploaded_file(file) -> str:
802
+ if file is None: return ""
803
+ file = file[0] if isinstance(file, list) else file
804
  file_path = file.name if hasattr(file, 'name') else str(file)
805
  file_name = os.path.basename(file_path)
806
+
807
+ # Standardize upload path for tool access
808
+ upload_dir = Path("/workspace/uploads")
809
+ upload_dir.mkdir(parents=True, exist_ok=True)
810
+
811
+ if file_name.lower().endswith('.zip'):
812
+ extract_to = upload_dir / file_name.replace('.zip', '')
813
+ if extract_to.exists(): shutil.rmtree(extract_to)
814
+ extract_to.mkdir(parents=True, exist_ok=True)
815
  try:
816
+ with zipfile.ZipFile(file_path, 'r') as z: z.extractall(extract_to)
817
+ return f"πŸ“¦ **Unzipped:** `{extract_to}`\nFiles available for tools."
818
+ except Exception as e: return f"❌ Zip Error: {e}"
819
+
820
+ if os.path.splitext(file_name)[1].lower() in TEXT_EXTENSIONS:
821
+ try:
822
+ with open(file_path, 'r', errors='ignore') as f: return f"πŸ“Ž **{file_name}**\n```\n{f.read()[:50000]}\n```"
823
+ except Exception as e: return f"Error reading {file_name}: {e}"
824
+ return f"πŸ“Ž **{file_name}** (Binary ignored)"
825
+
826
+ # --- AGENT LOOP ---
827
+ def agent_loop(message: str, history: list, pending_proposals: list, uploaded_file) -> tuple:
828
+ if not message.strip() and uploaded_file is None:
829
+ return (history, "", pending_proposals, _format_gate_choices(pending_proposals), _stats_label_files(), _stats_label_convos())
 
 
 
 
830
 
831
+ full_message = message.strip()
832
+ if uploaded_file: full_message = f"{process_uploaded_file(uploaded_file)}\n\n{full_message}"
833
+
834
+ history = history + [{"role": "user", "content": full_message}]
835
+ api_messages = [{"role": "system", "content": build_system_prompt()}] + [{"role": h["role"], "content": h["content"]} for h in history[-20:]]
836
+
837
+ accumulated_text = ""
838
+ staged_this_turn = []
839
+
840
+ for _ in range(5):
841
  try:
842
+ resp = client.chat_completion(model=MODEL_ID, messages=api_messages, max_tokens=2048)
843
+ content = resp.choices[0].message.content or ""
 
 
 
844
  except Exception as e:
845
+ history.append({"role": "assistant", "content": f"API Error: {e}"})
846
+ return (history, "", pending_proposals, _format_gate_choices(pending_proposals), _stats_label_files(), _stats_label_convos())
847
+
848
+ calls = parse_tool_calls(content)
849
+ text = extract_conversational_text(content)
850
+ if text: accumulated_text += ("\n\n" if accumulated_text else "") + text
851
+
852
+ if not calls: break
853
+
854
+ results = []
855
+ for name, args in calls:
856
+ res = execute_tool(name, args)
857
+ if res["status"] == "executed": results.append(f"Result: {res['result']}")
858
+ elif res["status"] == "staged":
859
+ staged_this_turn.append({"id": f"p_{int(time.time())}_{name}", "tool": name, "args": res["args"], "description": res["description"], "timestamp": time.strftime("%H:%M:%S")})
860
+ results.append(f"STAGED: {name}")
861
+
862
+ api_messages += [{"role": "assistant", "content": content}, {"role": "user", "content": "\n".join(results)}]
863
+
864
+ final = accumulated_text + ("\n\nπŸ›‘οΈ Check Gate." if staged_this_turn else "")
865
+ history.append({"role": "assistant", "content": final or "Thinking..."})
866
+ ctx.save_conversation_turn(full_message, final, len(history))
867
 
868
+ return (history, "", pending_proposals + staged_this_turn, _format_gate_choices(pending_proposals + staged_this_turn), _stats_label_files(), _stats_label_convos())
869
+
870
+ # --- UI COMPONENTS ---
871
+ def _format_gate_choices(proposals):
872
+ return gr.CheckboxGroup(choices=[(f"[{p['timestamp']}] {p['description']}", p['id']) for p in proposals], value=[])
873
+
874
+ def execute_approved_proposals(ids, proposals, history):
875
+ if not ids: return "No selection.", proposals, _format_gate_choices(proposals), history
876
+ results, remaining = [], []
877
+ for p in proposals:
878
+ if p['id'] in ids: results.append(f"**{p['tool']}**: {execute_staged_tool(p['tool'], p['args'])}")
879
+ else: remaining.append(p)
880
+ if results: history.append({"role": "assistant", "content": "βœ… **Executed:**\n" + "\n".join(results)})
881
+ return "Done.", remaining, _format_gate_choices(remaining), history
882
+
883
+ def auto_continue_after_approval(history, proposals):
884
+ last = history[-1].get("content", "")
885
+ text = last[0].get("text", "") if isinstance(last, list) else str(last)
886
+ if not text.startswith("βœ…"): return history, "", proposals, _format_gate_choices(proposals), _stats_label_files(), _stats_label_convos()
887
+ return agent_loop("[Approved. Continue.]", history, proposals, None)
888
+
889
+ def _stats_label_files(): return f"πŸ“‚ Files: {ctx.get_stats().get('total_files', 0)}"
890
+ def _stats_label_convos(): return f"πŸ’Ύ Convos: {ctx.get_stats().get('conversations', 0)}"
891
+
892
+ # --- UI LAYOUT ---
893
+ with gr.Blocks(title="🦞 Clawdbot") as demo:
894
+ state_proposals = gr.State([])
895
+ gr.Markdown("# 🦞 Clawdbot Command Center")
896
+ with gr.Tabs():
897
+ with gr.Tab("πŸ’¬ Chat"):
898
+ with gr.Row():
899
+ with gr.Column(scale=1):
900
+ stat_f = gr.Markdown(_stats_label_files())
901
+ stat_c = gr.Markdown(_stats_label_convos())
902
+ btn_ref = gr.Button("πŸ”„")
903
+ # FIXED: Restored full file types list
904
+ file_in = gr.File(label="Upload", file_count="multiple", file_types=['.py', '.js', '.json', '.md', '.txt', '.yaml', '.sh', '.zip', '.env', '.toml', '.sql'])
905
+ with gr.Column(scale=4):
906
+ chat = gr.Chatbot(height=600, avatar_images=(None, "https://em-content.zobj.net/source/twitter/408/lobster_1f99e.png"))
907
+ with gr.Row():
908
+ txt = gr.Textbox(scale=6, placeholder="Prompt...")
909
+ btn_send = gr.Button("Send", scale=1)
910
+ with gr.Tab("πŸ›‘οΈ Gate"):
911
+ gate = gr.CheckboxGroup(label="Proposals", interactive=True)
912
+ with gr.Row():
913
+ btn_exec = gr.Button("βœ… Execute", variant="primary")
914
+ btn_clear = gr.Button("πŸ—‘οΈ Clear")
915
+ res_md = gr.Markdown()
916
+
917
+ inputs = [txt, chat, state_proposals, file_in]
918
+ outputs = [chat, txt, state_proposals, gate, stat_f, stat_c]
919
+
920
+ txt.submit(agent_loop, inputs, outputs)
921
+ btn_send.click(agent_loop, inputs, outputs)
922
+ btn_ref.click(lambda: (_stats_label_files(), _stats_label_convos()), None, [stat_f, stat_c])
923
+
924
+ btn_exec.click(execute_approved_proposals, [gate, state_proposals, chat], [res_md, state_proposals, gate, chat]).then(
925
+ auto_continue_after_approval, [chat, state_proposals], outputs
926
+ )
927
+ btn_clear.click(lambda p: ("Cleared.", [], _format_gate_choices([])), state_proposals, [res_md, state_proposals, gate])
928
+
929
+ if __name__ == "__main__":
930
+ demo.launch(server_name="0.0.0.0", server_port=7860)
931
 
932
 
933
  def call_model_with_retry(messages, model_id, max_retries=4):