from command_classifier import classify_command from executor import execute_command def get_assistant_response(pipe, command: str, execute: bool, max_new_tokens: int = 1024) -> str: """ Process the user command using the language model pipeline and optionally execute a system command. Args: pipe: The Hugging Face model pipeline command (str): The user's text input execute (bool): Whether or not to execute a real command on the machine max_new_tokens (int): Maximum tokens to generate (longer = more detailed) Returns: str: Response from the assistant (and optionally OS output) """ command_type = classify_command(command) # If execution is allowed, try to run a shell command if execute: exec_output = execute_command(command_type, command) if exec_output: return f"🛠️ Executed:\n{exec_output}" # Prepare prompt for chat-style models like Qwen3 prompt = f"<|user|>\n{command}\n<|assistant|>\n" try: output = pipe( prompt, return_full_text=True, max_new_tokens=max_new_tokens, do_sample=True, temperature=0.7, top_p=0.9 ) generated_text = output[0]["generated_text"] # Extract only assistant response if "<|assistant|>" in generated_text: response = generated_text.split("<|assistant|>")[-1].strip() else: response = generated_text.strip() return f"🤖 ({command_type}) {response}" except Exception as e: return f"⚠️ Model error: {e}"