eddddyy commited on
Commit
d90d9dc
·
verified ·
1 Parent(s): b9614c4

Update assistant.py

Browse files
Files changed (1) hide show
  1. assistant.py +10 -3
assistant.py CHANGED
@@ -1,7 +1,7 @@
1
  from command_classifier import classify_command
2
  from executor import execute_command
3
 
4
- def get_assistant_response(pipe, command: str, execute: bool) -> str:
5
  """
6
  Process the user command using the language model pipeline and optionally execute a system command.
7
 
@@ -9,6 +9,7 @@ def get_assistant_response(pipe, command: str, execute: bool) -> str:
9
  pipe: The Hugging Face model pipeline
10
  command (str): The user's text input
11
  execute (bool): Whether or not to execute a real command on the machine
 
12
 
13
  Returns:
14
  str: Response from the assistant (and optionally OS output)
@@ -21,11 +22,17 @@ def get_assistant_response(pipe, command: str, execute: bool) -> str:
21
  if exec_output:
22
  return f"🛠️ Executed:\n{exec_output}"
23
 
24
- # Format prompt for Qwen2.5-VL instruction-tuned model
25
  prompt = f"<|user|>\n{command}\n<|assistant|>\n"
26
 
27
  try:
28
- output = pipe(prompt)[0]["generated_text"]
 
 
 
 
 
 
29
 
30
  # Cleanly extract assistant response
31
  if "<|assistant|>" in output:
 
1
  from command_classifier import classify_command
2
  from executor import execute_command
3
 
4
+ def get_assistant_response(pipe, command: str, execute: bool, max_new_tokens: int = 1024) -> str:
5
  """
6
  Process the user command using the language model pipeline and optionally execute a system command.
7
 
 
9
  pipe: The Hugging Face model pipeline
10
  command (str): The user's text input
11
  execute (bool): Whether or not to execute a real command on the machine
12
+ max_new_tokens (int): Maximum tokens to generate (longer = more detailed)
13
 
14
  Returns:
15
  str: Response from the assistant (and optionally OS output)
 
22
  if exec_output:
23
  return f"🛠️ Executed:\n{exec_output}"
24
 
25
+ # Format prompt for instruction-tuned models like Gemma or Qwen
26
  prompt = f"<|user|>\n{command}\n<|assistant|>\n"
27
 
28
  try:
29
+ output = pipe(
30
+ prompt,
31
+ max_new_tokens=max_new_tokens,
32
+ do_sample=True,
33
+ temperature=0.7,
34
+ top_p=0.9
35
+ )[0]["generated_text"]
36
 
37
  # Cleanly extract assistant response
38
  if "<|assistant|>" in output: