Rahul-8799 commited on
Commit
4882a84
·
verified ·
1 Parent(s): d88e86d

Update agents/project_manager_agent.py

Browse files
Files changed (1) hide show
  1. agents/project_manager_agent.py +9 -10
agents/project_manager_agent.py CHANGED
@@ -4,14 +4,13 @@ import torch
4
  MODEL_REPO = "Rahul-8799/project_manager_gemma3"
5
 
6
  tokenizer = AutoTokenizer.from_pretrained(MODEL_REPO, trust_remote_code=True)
7
- model = AutoModelForCausalLM.from_pretrained(MODEL_REPO, torch_dtype=torch.float16, device_map="auto")
 
 
 
 
8
 
9
- def run(state):
10
- prompt = state["prompt"]
11
- input_ids = tokenizer(prompt, return_tensors="pt").input_ids
12
- output_ids = model.generate(input_ids, max_new_tokens=512)
13
- response = tokenizer.decode(output_ids[0], skip_special_tokens=True)
14
-
15
- state["chat_log"].append({"Project Manager": response})
16
- state["pm_out"] = response
17
- return state
 
4
  MODEL_REPO = "Rahul-8799/project_manager_gemma3"
5
 
6
  tokenizer = AutoTokenizer.from_pretrained(MODEL_REPO, trust_remote_code=True)
7
+ model = AutoModelForCausalLM.from_pretrained(
8
+ MODEL_REPO,
9
+ torch_dtype=torch.float16,
10
+ device_map="auto"
11
+ )
12
 
13
+ def run(prompt):
14
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
15
+ outputs = model.generate(**inputs, max_new_tokens=512)
16
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)