File size: 5,039 Bytes
e066621
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
import argparse
import sys
from pathlib import Path

from agent.graph import AgentRunner
from agent.states import AgentConfig, ModelBackend

COMMAND_HELP = """\
Commands:
  :help / :h       Show this message.
  :context         Refresh and display the project summary.
  :clear           Reset the conversation history (system instructions stay).
  :history         Print the last few exchanges.
  :exit / :quit    Leave the CLI.
"""


def parse_backend(value: str) -> ModelBackend:
    try:
        return ModelBackend(value.lower())
    except ValueError as exc:
        valid = ", ".join(b.value for b in ModelBackend)
        raise argparse.ArgumentTypeError(f"Backend must be one of: {valid}") from exc


def resolve_project_directory(arg_dir: str | None, default_dir: Path) -> str:
    if arg_dir:
        project_directory_input = arg_dir.strip()
    else:
        prompt = (
            f"Enter the absolute or relative path to your project root "
            f"(press Enter for default: {default_dir}): "
        )
        user_value = input(prompt).strip()
        project_directory_input = user_value or str(default_dir)

    return str(Path(project_directory_input).expanduser().resolve())


def build_agent(args, project_directory: str) -> AgentRunner:
    config = AgentConfig(
        project_directory=project_directory,
        backend=args.backend if isinstance(args.backend, ModelBackend) else parse_backend(args.backend),
        model=args.model,
        temperature=args.temperature,
        recursion_limit=args.recursion_limit,
        auto_context=not args.no_auto_context,
    )
    return AgentRunner(config)


def handle_command(command: str, runner: AgentRunner) -> bool:
    cmd = command.lower()
    if cmd in {":help", ":h"}:
        print(COMMAND_HELP)
        return True
    if cmd in {":exit", ":quit"}:
        print("Exiting...")
        sys.exit(0)
    if cmd == ":clear":
        runner.reset_history()
        print("Conversation cleared.")
        return True
    if cmd == ":context":
        summary = runner.refresh_summary()
        print("\n--- Workspace summary ---")
        print(summary)
        print("-------------------------\n")
        return True
    if cmd == ":history":
        history = runner.conversation_history()
        if not history:
            print("No conversation history yet.")
            return True
        print("\n--- Conversation history ---")
        for message in history:
            role = message.get("role", "").upper()
            if role == "SYSTEM":
                continue
            content = message.get("content", "")
            print(f"[{role}]\n{content}\n")
        print("-----------------------------\n")
        return True
    return False


def main():
    default_project_dir = Path.cwd() / "generated_project"
    parser = argparse.ArgumentParser(description="Gemini/Qwen-style engineering agent CLI")
    parser.add_argument("--recursion-limit", "-r", type=int, default=100,
                        help="Recursion limit for LangGraph (default: 100)")
    parser.add_argument("--project-directory", "-p", default=None,
                        help="Absolute or relative path to the project root. Prompts interactively when omitted.")
    parser.add_argument("--backend", "-b", default=ModelBackend.GROQ.value, type=parse_backend,
                        help="LLM backend: groq, gemini, openrouter (default: groq)")
    parser.add_argument("--model", "-m", default=None, help="Override the LLM model name for the selected backend.")
    parser.add_argument("--temperature", "-t", type=float, default=0.2,
                        help="Sampling temperature for the agent (default: 0.2)")
    parser.add_argument("--no-auto-context", action="store_true",
                        help="Disable automatic summarize_project calls before each request.")

    args = parser.parse_args()
    project_directory = resolve_project_directory(args.project_directory, default_project_dir)
    runner = build_agent(args, project_directory)

    print("\nProject Engineering Agent (Gemini/Qwen style)")
    print("Type your request and press Enter. Use :help for CLI commands.\n")

    while True:
        try:
            user_prompt = input("agent> ").strip()
        except KeyboardInterrupt:
            print("\nInterrupted. Type :exit to leave.")
            continue

        if not user_prompt:
            continue

        if user_prompt.lower() in ("exit", "quit", "q"):
            print("Exiting...")
            break

        if user_prompt.startswith(":") and handle_command(user_prompt, runner):
            continue

        try:
            result_state = runner.invoke(user_prompt)
            final_message = result_state["messages"][-1] if result_state.get("messages") else {}
            response = final_message.get("content", "(no response)")
            print(f"\n{response}\n")
        except Exception as exc:
            print(f"Error: {exc}", file=sys.stderr)

    sys.exit(0)


if __name__ == "__main__":
    main()