curiouscurrent commited on
Commit
a4d649d
·
verified ·
1 Parent(s): 075c278

Update coordinator/task_assigner.py

Browse files
Files changed (1) hide show
  1. coordinator/task_assigner.py +25 -9
coordinator/task_assigner.py CHANGED
@@ -2,26 +2,42 @@
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
 
5
- # Reuse the same small OPT model
6
  MODEL_NAME = "facebook/opt-125m"
7
-
8
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
9
  model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
10
 
11
- def assign_tasks(tasks):
12
  """
13
- Use LLM to assign tasks to Frontend or Backend agents
 
 
 
 
 
14
  """
15
  task_text = "\n".join(f"- {task}" for task in tasks)
16
- prompt = f"Assign the following technical tasks to either Frontend or Backend agent:\n{task_text}\n\nAssignments:"
 
 
 
 
 
 
 
 
17
  inputs = tokenizer(prompt, return_tensors="pt")
18
- outputs = model.generate(**inputs, max_new_tokens=150)
19
  decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
20
 
21
- # Parse into dict
 
 
 
 
 
22
  assignments = {}
23
- for line in decoded.split("\n"):
24
  if "->" in line:
25
  task, agent = line.split("->")
26
  assignments[task.strip()] = agent.strip()
27
- return assignments
 
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
 
 
5
  MODEL_NAME = "facebook/opt-125m"
 
6
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
7
  model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
8
 
9
+ def assign_tasks_with_reasoning(tasks):
10
  """
11
+ Use LLM to assign tasks and show reasoning
12
+ Returns a dict:
13
+ {
14
+ "reasoning": <llm reasoning>,
15
+ "assignments": {task: agent}
16
+ }
17
  """
18
  task_text = "\n".join(f"- {task}" for task in tasks)
19
+ prompt = f"""
20
+ You are an AI assistant. Given the following technical tasks:
21
+ {task_text}
22
+
23
+ Explain your reasoning step-by-step on whether each task should go to Frontend or Backend agent.
24
+ Then provide the final assignments in the format: Task -> Agent.
25
+
26
+ Reasoning and Assignments:
27
+ """
28
  inputs = tokenizer(prompt, return_tensors="pt")
29
+ outputs = model.generate(**inputs, max_new_tokens=200)
30
  decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
31
 
32
+ # Split reasoning and final assignments
33
+ if "Assignments:" in decoded:
34
+ reasoning, assignments_text = decoded.split("Assignments:", 1)
35
+ else:
36
+ reasoning, assignments_text = decoded, ""
37
+
38
  assignments = {}
39
+ for line in assignments_text.split("\n"):
40
  if "->" in line:
41
  task, agent = line.split("->")
42
  assignments[task.strip()] = agent.strip()
43
+ return {"reasoning": reasoning.strip(), "assignments": assignments}