Boning c commited on
Commit
995d061
·
verified ·
1 Parent(s): 0f42d91

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +90 -35
app.py CHANGED
@@ -1,12 +1,20 @@
1
- import gradio as gr
2
- import subprocess
3
  import os
 
 
 
 
 
4
 
5
- # Use container’s home directory to avoid permission errors
6
- WORKDIR = os.path.join(os.environ['HOME'], "workspace")
7
  os.makedirs(WORKDIR, exist_ok=True)
8
 
9
- # HTML iframe to embed noVNC viewer
 
 
 
 
 
10
  NO_VNC_IFRAME = """
11
  <iframe src="http://localhost:6080/vnc.html?autoconnect=true&resize=remote"
12
  style="width:100%; height:600px; border:none;">
@@ -16,45 +24,92 @@ NO_VNC_IFRAME = """
16
  def start_desktop():
17
  return NO_VNC_IFRAME
18
 
19
- def run_shell(cmd):
20
- result = subprocess.run(
21
- cmd,
22
- shell=True,
23
- capture_output=True,
24
- text=True,
25
- env={**os.environ, "DISPLAY": ":1"} # Supports GUI apps if needed
26
  )
27
- return result.stdout + result.stderr
28
 
29
  def upload_file(file):
30
- dest_path = os.path.join(WORKDIR, os.path.basename(file.name))
31
- with open(dest_path, "wb") as f:
32
  f.write(file.read())
33
- return f"✅ Uploaded to: {dest_path}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
- with gr.Blocks(css="@import url('https://cdn.simplecss.org/simple.min.css');") as demo:
36
- gr.Markdown("# 🖥️ Offline Visual Sandbox (no API, no cost)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
 
38
  with gr.Tab("Desktop"):
39
- launch_btn = gr.Button("Launch Desktop")
40
- desktop_view = gr.HTML()
41
- launch_btn.click(start_desktop, outputs=desktop_view)
 
42
 
43
  with gr.Tab("Shell"):
44
- cmd_input = gr.Textbox(label="Shell Command", placeholder="Try 'ls /home/user/workspace'")
45
- run_btn = gr.Button("Run")
46
- cmd_output = gr.Textbox(label="Command Output", lines=10)
47
- run_btn.click(run_shell, inputs=cmd_input, outputs=cmd_output)
 
 
 
 
 
 
 
 
48
 
49
  with gr.Tab("Upload"):
50
- file_input = gr.File(label="Upload File")
51
- status_output = gr.Textbox(label="Upload Status")
52
- file_input.change(upload_file, inputs=file_input, outputs=status_output)
53
-
54
- demo.launch(
55
- server_name="0.0.0.0",
56
- server_port=7860,
57
- share=False,
58
- ssr_mode=False
59
- )
60
 
 
 
 
 
1
  import os
2
+ import re
3
+ import subprocess
4
+ import gradio as gr
5
+ from transformers import AutoTokenizer
6
+ from optimum.onnxruntime import ORTModelForCausalLM
7
 
8
+ # 1. Prepare a writable workspace
9
+ WORKDIR = os.path.join(os.environ["HOME"], "workspace")
10
  os.makedirs(WORKDIR, exist_ok=True)
11
 
12
+ # 2. Load DialoGPT‐medium (chat‐tuned) via ONNXRuntime
13
+ MODEL_ID = "microsoft/DialoGPT-medium"
14
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
15
+ model = ORTModelForCausalLM.from_pretrained(MODEL_ID, provider="CPUExecutionProvider")
16
+
17
+ # 3. Virtual desktop iframe
18
  NO_VNC_IFRAME = """
19
  <iframe src="http://localhost:6080/vnc.html?autoconnect=true&resize=remote"
20
  style="width:100%; height:600px; border:none;">
 
24
  def start_desktop():
25
  return NO_VNC_IFRAME
26
 
27
+ def run_shell(cmd: str):
28
+ proc = subprocess.run(
29
+ cmd, shell=True, capture_output=True, text=True,
30
+ env={**os.environ, "DISPLAY": ":1"}
 
 
 
31
  )
32
+ return proc.stdout + proc.stderr
33
 
34
  def upload_file(file):
35
+ dest = os.path.join(WORKDIR, os.path.basename(file.name))
36
+ with open(dest, "wb") as f:
37
  f.write(file.read())
38
+ return f"✅ Uploaded to: {dest}"
39
+
40
+ def agent_chat(user_msg, history):
41
+ # System prompt and conversation framing
42
+ system = (
43
+ "You are a helpful shell assistant. "
44
+ "When you need to run a shell command, wrap it in [CMD]...[/CMD]."
45
+ )
46
+ # Build the full conversation string
47
+ conv = system + tokenizer.eos_token + "\n"
48
+ for u, a in history:
49
+ conv += f"User: {u}{tokenizer.eos_token}Assistant: {a}{tokenizer.eos_token}\n"
50
+ conv += f"User: {user_msg}{tokenizer.eos_token}Assistant:"
51
+
52
+ # Tokenize + generate
53
+ inputs = tokenizer(conv, return_tensors="pt")
54
+ outputs = model.generate(
55
+ **inputs,
56
+ max_new_tokens=150,
57
+ do_sample=True,
58
+ temperature=0.7,
59
+ pad_token_id=tokenizer.eos_token_id
60
+ )
61
+ full_reply = tokenizer.decode(outputs[0], skip_special_tokens=True)
62
+ reply = full_reply.split("Assistant:")[-1].strip()
63
 
64
+ # Extract & execute shell commands
65
+ cmds = re.findall(r"
66
+
67
+ \[CMD\]
68
+
69
+ (.*?)
70
+
71
+ \[/CMD\]
72
+
73
+ ", reply, re.DOTALL)
74
+ if cmds:
75
+ out = ""
76
+ for cmd in cmds:
77
+ out += f"$ {cmd}\n"
78
+ p = subprocess.run(cmd, shell=True, capture_output=True, text=True)
79
+ out += p.stdout + p.stderr + "\n"
80
+ reply += f"\n\n**Command Output:**\n```\n{out}```"
81
+
82
+ history.append((user_msg, reply))
83
+ return reply, history
84
+
85
+ # 4. Gradio UI
86
+ with gr.Blocks() as demo:
87
+ gr.Markdown("# 🤖💬 ONNX Chat (DialoGPT) + Shell Agent")
88
 
89
  with gr.Tab("Desktop"):
90
+ gr.Markdown("### 🖥️ Virtual Desktop (noVNC)")
91
+ btn = gr.Button("Launch Desktop")
92
+ desktop = gr.HTML()
93
+ btn.click(start_desktop, outputs=desktop)
94
 
95
  with gr.Tab("Shell"):
96
+ gr.Markdown("### 🛠️ Direct Shell")
97
+ cmd_in = gr.Textbox(label="Command")
98
+ cmd_out = gr.Textbox(label="Output", lines=6)
99
+ gr.Button("Run").click(run_shell, inputs=cmd_in, outputs=cmd_out)
100
+
101
+ with gr.Tab("Chat"):
102
+ gr.Markdown("### 💬 Chat with AI (can run shell)")
103
+ chat = gr.ChatInterface(
104
+ fn=agent_chat,
105
+ state=[],
106
+ placeholder="Ask me to list files: [CMD]ls workspace[/CMD]"
107
+ )
108
 
109
  with gr.Tab("Upload"):
110
+ gr.Markdown("### 📂 Upload File to Sandbox")
111
+ uploader = gr.File(label="Choose File")
112
+ status = gr.Textbox(label="Status")
113
+ uploader.change(upload_file, inputs=uploader, outputs=status)
 
 
 
 
 
 
114
 
115
+ demo.launch(server_name="0.0.0.0", server_port=7860, share=False)