curiouscurrent commited on
Commit
b01fb55
·
verified ·
1 Parent(s): 78ea7c0

Update coordinator/task_parser.py

Browse files
Files changed (1) hide show
  1. coordinator/task_parser.py +10 -14
coordinator/task_parser.py CHANGED
@@ -2,34 +2,30 @@
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
 
5
- # Use a larger model for better reasoning
6
- MODEL_NAME = "facebook/opt-1.3b"
7
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
8
  model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
9
 
10
  def parse_brief_with_reasoning(brief: str):
11
  """
12
- Use LLM to extract tasks and reasoning
13
  Returns a dict:
14
  {
15
- "reasoning": <llm reasoning>,
16
  "tasks": [list of tasks]
17
  }
18
  """
19
  prompt = f"""
20
- You are an expert project manager. Given the project brief, list the distinct technical tasks needed to implement it.
21
- Explain your reasoning step by step.
22
- Output format:
23
- Reasoning:
24
- - Explain step 1
25
- - Explain step 2
26
- Tasks:
27
- 1. First task
28
- 2. Second task
29
  Do not repeat the project brief.
30
 
31
  Project Brief:
32
  {brief}
 
 
33
  """
34
  inputs = tokenizer(prompt, return_tensors="pt")
35
  outputs = model.generate(**inputs, max_new_tokens=300)
@@ -44,7 +40,7 @@ Project Brief:
44
  # Deduplicate tasks
45
  tasks = []
46
  for line in tasks_text.split("\n"):
47
- task = line.strip("-123. ").strip()
48
  if task and task not in tasks:
49
  tasks.append(task)
50
 
 
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
 
5
+ # Use a larger OPT model if possible for better reasoning
6
+ MODEL_NAME = "facebook/opt-1.3b" # upgrade from 125m
7
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
8
  model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
9
 
10
  def parse_brief_with_reasoning(brief: str):
11
  """
12
+ Parses a project brief into distinct tasks with LLM reasoning.
13
  Returns a dict:
14
  {
15
+ "reasoning": <str>,
16
  "tasks": [list of tasks]
17
  }
18
  """
19
  prompt = f"""
20
+ You are an expert project manager.
21
+ Given the project brief, break it into **distinct technical tasks**, each on a new line, numbered.
22
+ Explain your reasoning step by step before listing the tasks.
 
 
 
 
 
 
23
  Do not repeat the project brief.
24
 
25
  Project Brief:
26
  {brief}
27
+
28
+ Reasoning and Tasks:
29
  """
30
  inputs = tokenizer(prompt, return_tensors="pt")
31
  outputs = model.generate(**inputs, max_new_tokens=300)
 
40
  # Deduplicate tasks
41
  tasks = []
42
  for line in tasks_text.split("\n"):
43
+ task = line.strip("- ").strip()
44
  if task and task not in tasks:
45
  tasks.append(task)
46