File size: 1,550 Bytes
799893f 46c04ea 799893f 380046c 46c04ea 01b60a4 b9614c4 3dd1623 b9614c4 3dd1623 b9614c4 380046c f4593e2 b9614c4 46c04ea 3b42523 46c04ea f4593e2 46c04ea 380046c 3dd1623 3b42523 380046c 01b60a4 f4593e2 799893f f4593e2 3dd1623 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
import gradio as gr
from model_loader import load_model
from assistant import get_assistant_response
# Load model pipeline
pipe = load_model()
# Wrapper to handle dynamic output length
def smart_response(command, execute):
# Dynamically adjust max_new_tokens based on command complexity
simple_keywords = ["hi", "hello", "open", "close", "shutdown", "restart", "lock", "screenshot"]
is_simple = any(keyword in command.lower() for keyword in simple_keywords)
max_tokens = 64 if is_simple else 512 # fast replies for simple commands
return get_assistant_response(pipe, command, execute, max_new_tokens=max_tokens)
# Gradio UI
demo = gr.Interface(
fn=smart_response,
inputs=[
gr.Textbox(
lines=2,
placeholder="e.g. Open Chrome or Take a screenshot",
label="🧾 Command"
),
gr.Checkbox(label="🛠️ Execute command (if possible)")
],
outputs="text",
title="🧠 Smart AI Assistant",
description=(
"AI assistant powered by **Qwen/Qwen3-1.7B** 🧠\n\n"
"Try commands like:\n"
"• 'Open Chrome'\n"
"• 'Take a screenshot'\n"
"• 'What is AGI?'\n"
"• 'Summarize the history of AI'\n\n"
"✅ Simple commands → fast replies\n"
"🧠 Complex questions → thoughtful answers\n"
"⚠️ Check the box to allow real OS command execution"
),
flagging_mode="never"
)
if __name__ == "__main__":
demo.launch(share=False, server_name="0.0.0.0", server_port=7860)
|