File size: 1,636 Bytes
51e64f4
 
 
d90d9dc
a5b969d
 
 
 
 
 
 
d90d9dc
a5b969d
 
 
 
51e64f4
a5b969d
5c7f0a5
51e64f4
a5b969d
 
 
51e64f4
5c7f0a5
51e64f4
a5b969d
51e64f4
d90d9dc
 
5c7f0a5
d90d9dc
 
 
 
5c7f0a5
a5b969d
5c7f0a5
 
 
 
 
51e64f4
5c7f0a5
a5b969d
51e64f4
a5b969d
51e64f4
a5b969d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
from command_classifier import classify_command
from executor import execute_command

def get_assistant_response(pipe, command: str, execute: bool, max_new_tokens: int = 1024) -> str:
    """
    Process the user command using the language model pipeline and optionally execute a system command.

    Args:
        pipe: The Hugging Face model pipeline
        command (str): The user's text input
        execute (bool): Whether or not to execute a real command on the machine
        max_new_tokens (int): Maximum tokens to generate (longer = more detailed)

    Returns:
        str: Response from the assistant (and optionally OS output)
    """
    command_type = classify_command(command)

    # If execution is allowed, try to run a shell command
    if execute:
        exec_output = execute_command(command_type, command)
        if exec_output:
            return f"🛠️ Executed:\n{exec_output}"

    # Prepare prompt for chat-style models like Qwen3
    prompt = f"<|user|>\n{command}\n<|assistant|>\n"

    try:
        output = pipe(
            prompt,
            return_full_text=True,
            max_new_tokens=max_new_tokens,
            do_sample=True,
            temperature=0.7,
            top_p=0.9
        )

        generated_text = output[0]["generated_text"]

        # Extract only assistant response
        if "<|assistant|>" in generated_text:
            response = generated_text.split("<|assistant|>")[-1].strip()
        else:
            response = generated_text.strip()

        return f"🤖 ({command_type}) {response}"

    except Exception as e:
        return f"⚠️ Model error: {e}"