curiouscurrent commited on
Commit
68fb8f6
·
verified ·
1 Parent(s): 1dfba20

Update coordinator/task_assigner.py

Browse files
Files changed (1) hide show
  1. coordinator/task_assigner.py +14 -11
coordinator/task_assigner.py CHANGED
@@ -2,14 +2,14 @@
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
 
5
- MODEL_NAME = "facebook/opt-125m"
6
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
7
  model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
8
 
9
  def assign_tasks_with_reasoning(tasks):
10
  """
11
- Use LLM to assign tasks and show reasoning
12
- Returns a dict:
13
  {
14
  "reasoning": <llm reasoning>,
15
  "assignments": {task: agent}
@@ -17,19 +17,21 @@ def assign_tasks_with_reasoning(tasks):
17
  """
18
  task_text = "\n".join(f"- {task}" for task in tasks)
19
  prompt = f"""
20
- You are an AI assistant. Given the following technical tasks:
21
- {task_text}
22
-
23
- Explain your reasoning step-by-step on whether each task should go to Frontend or Backend agent.
24
- Then provide the final assignments in the format: Task -> Agent.
 
 
25
 
26
- Reasoning and Assignments:
 
27
  """
28
  inputs = tokenizer(prompt, return_tensors="pt")
29
- outputs = model.generate(**inputs, max_new_tokens=200)
30
  decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
31
 
32
- # Split reasoning and final assignments
33
  if "Assignments:" in decoded:
34
  reasoning, assignments_text = decoded.split("Assignments:", 1)
35
  else:
@@ -40,4 +42,5 @@ Reasoning and Assignments:
40
  if "->" in line:
41
  task, agent = line.split("->")
42
  assignments[task.strip()] = agent.strip()
 
43
  return {"reasoning": reasoning.strip(), "assignments": assignments}
 
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
 
5
+ MODEL_NAME = "facebook/opt-1.3b"
6
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
7
  model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
8
 
9
  def assign_tasks_with_reasoning(tasks):
10
  """
11
+ Use LLM to assign tasks to Frontend/Backend with reasoning
12
+ Returns dict:
13
  {
14
  "reasoning": <llm reasoning>,
15
  "assignments": {task: agent}
 
17
  """
18
  task_text = "\n".join(f"- {task}" for task in tasks)
19
  prompt = f"""
20
+ You are an expert project manager. Assign the following technical tasks to either a Frontend or Backend agent.
21
+ Explain your reasoning step by step.
22
+ Output format:
23
+ Reasoning:
24
+ - Step by step explanation
25
+ Assignments:
26
+ Task -> Agent
27
 
28
+ Tasks:
29
+ {task_text}
30
  """
31
  inputs = tokenizer(prompt, return_tensors="pt")
32
+ outputs = model.generate(**inputs, max_new_tokens=300)
33
  decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
34
 
 
35
  if "Assignments:" in decoded:
36
  reasoning, assignments_text = decoded.split("Assignments:", 1)
37
  else:
 
42
  if "->" in line:
43
  task, agent = line.split("->")
44
  assignments[task.strip()] = agent.strip()
45
+
46
  return {"reasoning": reasoning.strip(), "assignments": assignments}