Spaces:
Runtime error
Runtime error
File size: 1,467 Bytes
d3bd55e 3f96a46 b01fb55 3f96a46 d3bd55e 075c278 d3bd55e b01fb55 075c278 b01fb55 075c278 d3bd55e 075c278 b01fb55 1dfba20 075c278 b01fb55 075c278 3f96a46 1dfba20 3f96a46 1dfba20 3f96a46 075c278 1dfba20 b01fb55 1dfba20 075c278 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
# coordinator/task_parser.py
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Use a larger OPT model if possible for better reasoning
MODEL_NAME = "facebook/opt-1.3b" # upgrade from 125m
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
def parse_brief_with_reasoning(brief: str):
"""
Parses a project brief into distinct tasks with LLM reasoning.
Returns a dict:
{
"reasoning": <str>,
"tasks": [list of tasks]
}
"""
prompt = f"""
You are an expert project manager.
Given the project brief, break it into **distinct technical tasks**, each on a new line, numbered.
Explain your reasoning step by step before listing the tasks.
Do not repeat the project brief.
Project Brief:
{brief}
Reasoning and Tasks:
"""
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=300)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Split reasoning and tasks
if "Tasks:" in decoded:
reasoning, tasks_text = decoded.split("Tasks:", 1)
else:
reasoning, tasks_text = decoded, ""
# Deduplicate tasks
tasks = []
for line in tasks_text.split("\n"):
task = line.strip("- ").strip()
if task and task not in tasks:
tasks.append(task)
return {"reasoning": reasoning.strip(), "tasks": tasks}
|