File size: 1,429 Bytes
d219650
aea6581
 
 
68fb8f6
aea6581
 
d219650
a4d649d
d219650
5fe61b6
 
a4d649d
5fe61b6
a4d649d
 
d219650
aea6581
a4d649d
5fe61b6
68fb8f6
5fe61b6
 
 
a4d649d
aea6581
5fe61b6
aea6581
 
a4d649d
 
 
 
 
5fe61b6
aea6581
a4d649d
aea6581
 
 
a4d649d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
# coordinator/task_assigner.py
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

MODEL_NAME = "facebook/opt-1.3b"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)

def assign_tasks_with_reasoning(tasks):
    """
    Assigns tasks to Frontend/Backend agents with reasoning.
    Returns a dict:
        {
            "reasoning": <str>,
            "assignments": {task: agent}
        }
    """
    task_text = "\n".join(f"- {task}" for task in tasks)
    prompt = f"""
You are an AI assistant. Given the following technical tasks:
{task_text}

Explain your reasoning step-by-step on whether each task should go to Frontend or Backend agent.
Then provide the final assignments in the format: Task -> Agent.
"""
    inputs = tokenizer(prompt, return_tensors="pt")
    outputs = model.generate(**inputs, max_new_tokens=250)
    decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)

    if "Assignments:" in decoded:
        reasoning, assignments_text = decoded.split("Assignments:", 1)
    else:
        reasoning, assignments_text = decoded, ""

    # Parse assignments
    assignments = {}
    for line in assignments_text.split("\n"):
        if "->" in line:
            task, agent = line.split("->")
            assignments[task.strip()] = agent.strip()
    return {"reasoning": reasoning.strip(), "assignments": assignments}