ai_project_agent / coordinator /task_parser.py
curiouscurrent's picture
Update coordinator/task_parser.py
b01fb55 verified
# coordinator/task_parser.py
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Use a larger OPT model if possible for better reasoning
MODEL_NAME = "facebook/opt-1.3b" # upgrade from 125m
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
def parse_brief_with_reasoning(brief: str):
"""
Parses a project brief into distinct tasks with LLM reasoning.
Returns a dict:
{
"reasoning": <str>,
"tasks": [list of tasks]
}
"""
prompt = f"""
You are an expert project manager.
Given the project brief, break it into **distinct technical tasks**, each on a new line, numbered.
Explain your reasoning step by step before listing the tasks.
Do not repeat the project brief.
Project Brief:
{brief}
Reasoning and Tasks:
"""
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=300)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Split reasoning and tasks
if "Tasks:" in decoded:
reasoning, tasks_text = decoded.split("Tasks:", 1)
else:
reasoning, tasks_text = decoded, ""
# Deduplicate tasks
tasks = []
for line in tasks_text.split("\n"):
task = line.strip("- ").strip()
if task and task not in tasks:
tasks.append(task)
return {"reasoning": reasoning.strip(), "tasks": tasks}