eddddyy commited on
Commit
3dd1623
·
verified ·
1 Parent(s): 63e33f3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -7,10 +7,10 @@ pipe = load_model()
7
 
8
  # Wrapper to handle dynamic output length
9
  def smart_response(command, execute):
10
- # Dynamically adjust max_new_tokens based on complexity
11
- simple_keywords = ["open", "close", "shutdown", "restart", "lock", "screenshot"]
12
  is_simple = any(keyword in command.lower() for keyword in simple_keywords)
13
- max_tokens = 128 if is_simple else 1024 # Shorter for quick commands, longer for reasoning
14
 
15
  return get_assistant_response(pipe, command, execute, max_new_tokens=max_tokens)
16
 
@@ -28,7 +28,7 @@ demo = gr.Interface(
28
  outputs="text",
29
  title="🧠 Smart AI Assistant",
30
  description=(
31
- "AI assistant powered by **Mistral-7B-Instruct** 🧠\n\n"
32
  "Try commands like:\n"
33
  "• 'Open Chrome'\n"
34
  "• 'Take a screenshot'\n"
@@ -42,4 +42,4 @@ demo = gr.Interface(
42
  )
43
 
44
  if __name__ == "__main__":
45
- demo.launch(share=True, server_name="0.0.0.0", server_port=7860)
 
7
 
8
  # Wrapper to handle dynamic output length
9
  def smart_response(command, execute):
10
+ # Dynamically adjust max_new_tokens based on command complexity
11
+ simple_keywords = ["hi", "hello", "open", "close", "shutdown", "restart", "lock", "screenshot"]
12
  is_simple = any(keyword in command.lower() for keyword in simple_keywords)
13
+ max_tokens = 64 if is_simple else 512 # fast replies for simple commands
14
 
15
  return get_assistant_response(pipe, command, execute, max_new_tokens=max_tokens)
16
 
 
28
  outputs="text",
29
  title="🧠 Smart AI Assistant",
30
  description=(
31
+ "AI assistant powered by **Qwen/Qwen3-1.7B** 🧠\n\n"
32
  "Try commands like:\n"
33
  "• 'Open Chrome'\n"
34
  "• 'Take a screenshot'\n"
 
42
  )
43
 
44
  if __name__ == "__main__":
45
+ demo.launch(share=False, server_name="0.0.0.0", server_port=7860)