eddddyy commited on
Commit
a5b969d
·
verified ·
1 Parent(s): 380046c

Update assistant.py

Browse files
Files changed (1) hide show
  1. assistant.py +23 -4
assistant.py CHANGED
@@ -2,19 +2,38 @@ from command_classifier import classify_command
2
  from executor import execute_command
3
 
4
  def get_assistant_response(pipe, command: str, execute: bool) -> str:
 
 
 
 
 
 
 
 
 
 
 
5
  command_type = classify_command(command)
 
 
6
  if execute:
7
- result = execute_command(command_type, command)
8
- if result:
9
- return result
10
 
 
11
  prompt = f"<|user|>\n{command}\n<|assistant|>\n"
 
12
  try:
13
  output = pipe(prompt)[0]["generated_text"]
 
 
14
  if "<|assistant|>" in output:
15
  response = output.split("<|assistant|>")[-1].strip()
16
  else:
17
  response = output.strip()
 
18
  return f"🤖 ({command_type}) {response}"
 
19
  except Exception as e:
20
- return f"⚠️ Error: {e}"
 
2
  from executor import execute_command
3
 
4
  def get_assistant_response(pipe, command: str, execute: bool) -> str:
5
+ """
6
+ Process the user command using the language model pipeline and optionally execute a system command.
7
+
8
+ Args:
9
+ pipe: The Hugging Face model pipeline
10
+ command (str): The user's text input
11
+ execute (bool): Whether or not to execute a real command on the machine
12
+
13
+ Returns:
14
+ str: Response from the assistant (and optionally OS output)
15
+ """
16
  command_type = classify_command(command)
17
+
18
+ # If execute flag is enabled, try running a system command
19
  if execute:
20
+ exec_output = execute_command(command_type, command)
21
+ if exec_output:
22
+ return f"🛠️ Executed:\n{exec_output}"
23
 
24
+ # Format prompt for Qwen2.5-VL instruction-tuned model
25
  prompt = f"<|user|>\n{command}\n<|assistant|>\n"
26
+
27
  try:
28
  output = pipe(prompt)[0]["generated_text"]
29
+
30
+ # Cleanly extract assistant response
31
  if "<|assistant|>" in output:
32
  response = output.split("<|assistant|>")[-1].strip()
33
  else:
34
  response = output.strip()
35
+
36
  return f"🤖 ({command_type}) {response}"
37
+
38
  except Exception as e:
39
+ return f"⚠️ Model error: {e}"