Spaces:
Sleeping
Sleeping
Ilya
commited on
Update app.py
Browse files
app.py
CHANGED
|
@@ -37,6 +37,7 @@ class CodeExecutor:
|
|
| 37 |
self.execution_complete = False
|
| 38 |
|
| 39 |
def custom_input(self, prompt=''):
|
|
|
|
| 40 |
self.waiting_for_input = True
|
| 41 |
while self.waiting_for_input:
|
| 42 |
pass # Wait until input is provided
|
|
@@ -68,6 +69,10 @@ class CodeExecutor:
|
|
| 68 |
self.waiting_for_input = False
|
| 69 |
|
| 70 |
def get_output(self):
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
return self.output
|
| 72 |
|
| 73 |
def is_execution_complete(self):
|
|
@@ -79,35 +84,38 @@ def start_execution(code, session_id):
|
|
| 79 |
executor = CodeExecutor()
|
| 80 |
executors[session_id] = executor
|
| 81 |
executor.execute_code(code)
|
| 82 |
-
return "
|
| 83 |
|
| 84 |
def provide_input(user_input, session_id):
|
| 85 |
executor = executors.get(session_id)
|
| 86 |
if executor and not executor.is_execution_complete():
|
| 87 |
executor.provide_input(user_input)
|
| 88 |
-
return "
|
| 89 |
else:
|
| 90 |
return "No execution in progress or execution has completed."
|
| 91 |
|
| 92 |
-
def
|
| 93 |
executor = executors.get(session_id)
|
| 94 |
if executor:
|
| 95 |
output = executor.get_output()
|
| 96 |
if executor.is_execution_complete():
|
| 97 |
del executors[session_id]
|
| 98 |
-
|
|
|
|
|
|
|
| 99 |
else:
|
| 100 |
-
return "No execution in progress."
|
| 101 |
|
| 102 |
def chat(user_input, history):
|
| 103 |
response = llm_inference(user_input)
|
| 104 |
-
history.append(
|
|
|
|
| 105 |
return history, history
|
| 106 |
|
| 107 |
with gr.Blocks() as demo:
|
| 108 |
gr.Markdown("# 🐍 Python Helper Chatbot")
|
| 109 |
with gr.Tab("Chat"):
|
| 110 |
-
chatbot = gr.Chatbot()
|
| 111 |
msg = gr.Textbox(placeholder="Type your message here...")
|
| 112 |
msg.submit(chat, inputs=[msg, chatbot], outputs=[chatbot, chatbot])
|
| 113 |
with gr.Tab("Interpreter"):
|
|
@@ -120,30 +128,27 @@ with gr.Blocks() as demo:
|
|
| 120 |
session_id = gr.State()
|
| 121 |
|
| 122 |
def run_code(code):
|
| 123 |
-
|
| 124 |
-
session = str(uuid4())
|
| 125 |
-
session_id.
|
| 126 |
-
start_execution(code, session)
|
| 127 |
-
return
|
| 128 |
|
| 129 |
def send_input(user_input):
|
| 130 |
-
session = session_id.
|
| 131 |
provide_input(user_input, session)
|
| 132 |
return ""
|
| 133 |
|
| 134 |
def update_output():
|
| 135 |
-
session = session_id.
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
return output
|
| 139 |
-
else:
|
| 140 |
-
return "No execution in progress."
|
| 141 |
|
| 142 |
run_button.click(run_code, inputs=code_input, outputs=[output_box, input_box, send_input_button])
|
| 143 |
send_input_button.click(send_input, inputs=input_box, outputs=input_box)
|
| 144 |
-
|
| 145 |
with gr.Tab("Logs"):
|
| 146 |
gr.Markdown("### 📜 Logs")
|
| 147 |
log_output = gr.Textbox(label="Logs", lines=10, interactive=False)
|
| 148 |
|
| 149 |
-
demo.launch()
|
|
|
|
| 37 |
self.execution_complete = False
|
| 38 |
|
| 39 |
def custom_input(self, prompt=''):
|
| 40 |
+
self.output += prompt
|
| 41 |
self.waiting_for_input = True
|
| 42 |
while self.waiting_for_input:
|
| 43 |
pass # Wait until input is provided
|
|
|
|
| 69 |
self.waiting_for_input = False
|
| 70 |
|
| 71 |
def get_output(self):
|
| 72 |
+
if sys.stdout and hasattr(sys.stdout, "getvalue"):
|
| 73 |
+
self.output += sys.stdout.getvalue()
|
| 74 |
+
sys.stdout.seek(0)
|
| 75 |
+
sys.stdout.truncate(0)
|
| 76 |
return self.output
|
| 77 |
|
| 78 |
def is_execution_complete(self):
|
|
|
|
| 84 |
executor = CodeExecutor()
|
| 85 |
executors[session_id] = executor
|
| 86 |
executor.execute_code(code)
|
| 87 |
+
return "Code is running...", gr.update(visible=True), gr.update(visible=True)
|
| 88 |
|
| 89 |
def provide_input(user_input, session_id):
|
| 90 |
executor = executors.get(session_id)
|
| 91 |
if executor and not executor.is_execution_complete():
|
| 92 |
executor.provide_input(user_input)
|
| 93 |
+
return ""
|
| 94 |
else:
|
| 95 |
return "No execution in progress or execution has completed."
|
| 96 |
|
| 97 |
+
def poll_output(session_id):
|
| 98 |
executor = executors.get(session_id)
|
| 99 |
if executor:
|
| 100 |
output = executor.get_output()
|
| 101 |
if executor.is_execution_complete():
|
| 102 |
del executors[session_id]
|
| 103 |
+
return output, gr.update(visible=False), gr.update(visible=False)
|
| 104 |
+
else:
|
| 105 |
+
return output, gr.update(visible=True), gr.update(visible=True)
|
| 106 |
else:
|
| 107 |
+
return "No execution in progress.", gr.update(visible=False), gr.update(visible=False)
|
| 108 |
|
| 109 |
def chat(user_input, history):
|
| 110 |
response = llm_inference(user_input)
|
| 111 |
+
history.append({"role": "user", "content": user_input})
|
| 112 |
+
history.append({"role": "assistant", "content": response})
|
| 113 |
return history, history
|
| 114 |
|
| 115 |
with gr.Blocks() as demo:
|
| 116 |
gr.Markdown("# 🐍 Python Helper Chatbot")
|
| 117 |
with gr.Tab("Chat"):
|
| 118 |
+
chatbot = gr.Chatbot(type='messages')
|
| 119 |
msg = gr.Textbox(placeholder="Type your message here...")
|
| 120 |
msg.submit(chat, inputs=[msg, chatbot], outputs=[chatbot, chatbot])
|
| 121 |
with gr.Tab("Interpreter"):
|
|
|
|
| 128 |
session_id = gr.State()
|
| 129 |
|
| 130 |
def run_code(code):
|
| 131 |
+
import uuid
|
| 132 |
+
session = str(uuid.uuid4())
|
| 133 |
+
session_id.value = session
|
| 134 |
+
return_values = start_execution(code, session)
|
| 135 |
+
return return_values
|
| 136 |
|
| 137 |
def send_input(user_input):
|
| 138 |
+
session = session_id.value
|
| 139 |
provide_input(user_input, session)
|
| 140 |
return ""
|
| 141 |
|
| 142 |
def update_output():
|
| 143 |
+
session = session_id.value
|
| 144 |
+
output, input_visible, button_visible = poll_output(session)
|
| 145 |
+
return output, input_visible, button_visible
|
|
|
|
|
|
|
|
|
|
| 146 |
|
| 147 |
run_button.click(run_code, inputs=code_input, outputs=[output_box, input_box, send_input_button])
|
| 148 |
send_input_button.click(send_input, inputs=input_box, outputs=input_box)
|
| 149 |
+
demo.load(update_output, outputs=[output_box, input_box, send_input_button], every=1)
|
| 150 |
with gr.Tab("Logs"):
|
| 151 |
gr.Markdown("### 📜 Logs")
|
| 152 |
log_output = gr.Textbox(label="Logs", lines=10, interactive=False)
|
| 153 |
|
| 154 |
+
demo.queue().launch()
|