bot / app.py
eddddyy's picture
Update app.py
3dd1623 verified
import gradio as gr
from model_loader import load_model
from assistant import get_assistant_response
# Load model pipeline
pipe = load_model()
# Wrapper to handle dynamic output length
def smart_response(command, execute):
# Dynamically adjust max_new_tokens based on command complexity
simple_keywords = ["hi", "hello", "open", "close", "shutdown", "restart", "lock", "screenshot"]
is_simple = any(keyword in command.lower() for keyword in simple_keywords)
max_tokens = 64 if is_simple else 512 # fast replies for simple commands
return get_assistant_response(pipe, command, execute, max_new_tokens=max_tokens)
# Gradio UI
demo = gr.Interface(
fn=smart_response,
inputs=[
gr.Textbox(
lines=2,
placeholder="e.g. Open Chrome or Take a screenshot",
label="🧾 Command"
),
gr.Checkbox(label="🛠️ Execute command (if possible)")
],
outputs="text",
title="🧠 Smart AI Assistant",
description=(
"AI assistant powered by **Qwen/Qwen3-1.7B** 🧠\n\n"
"Try commands like:\n"
"• 'Open Chrome'\n"
"• 'Take a screenshot'\n"
"• 'What is AGI?'\n"
"• 'Summarize the history of AI'\n\n"
"✅ Simple commands → fast replies\n"
"🧠 Complex questions → thoughtful answers\n"
"⚠️ Check the box to allow real OS command execution"
),
flagging_mode="never"
)
if __name__ == "__main__":
demo.launch(share=False, server_name="0.0.0.0", server_port=7860)