Keeby-smilyai commited on
Commit
2be91bc
·
verified ·
1 Parent(s): eff8eed

Update models/loader.py

Browse files
Files changed (1) hide show
  1. models/loader.py +47 -32
models/loader.py CHANGED
@@ -1,45 +1,60 @@
1
  # models/loader.py
2
  import torch
3
- from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
 
4
 
 
 
 
 
 
 
 
 
5
  MODEL_REGISTRY = {
6
  "ceo": "Qwen/Qwen3-0.6B",
 
7
  "manager": "Qwen/Qwen3-0.6B",
8
- "worker_coder": "Qwen/Qwen3-0.6B",
 
 
 
 
9
  "worker_tester": "Qwen/Qwen3-0.6B",
10
  }
11
  _MODEL_CACHE = {}
12
 
13
- def load_model(model_name):
14
- if model_name in _MODEL_CACHE:
15
- return _MODEL_CACHE[model_name]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
- model_kwargs = {"device_map": "auto", "trust_remote_code": True, "attn_implementation": "eager"}
18
- if torch.cuda.is_available():
19
- print(f"CUDA found. Loading '{model_name}' in 4-bit.")
20
- bnb_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16)
21
- model_kwargs["quantization_config"] = bnb_config
22
- else:
23
- print(f"CUDA not found. Loading '{model_name}' on CPU.")
24
 
25
- tokenizer = AutoTokenizer.from_pretrained(model_name)
26
- model = AutoModelForCausalLM.from_pretrained(model_name, **model_kwargs)
27
- _MODEL_CACHE[model_name] = (tokenizer, model)
28
- print(f"Model {model_name} loaded and cached.")
29
- return tokenizer, model
30
-
31
- def generate_with_model(role: str, prompt: str) -> str:
32
- from backend.agents import ROLE_PROMPTS
33
- from .loader import MODEL_REGISTRY
 
 
34
 
35
- try:
36
- model_name = MODEL_REGISTRY[role]
37
- tokenizer, model = load_model(model_name)
38
- messages = [{"role": "system", "content": ROLE_PROMPTS[role]}, {"role": "user", "content": prompt}]
39
- input_text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
40
- inputs = tokenizer(input_text, return_tensors="pt").to(model.device)
41
- outputs = model.generate(**inputs, max_new_tokens=3072, pad_token_id=tokenizer.eos_token_id, use_cache=True)
42
- return tokenizer.decode(outputs[0][len(inputs.input_ids[0]):], skip_special_tokens=True).strip()
43
- except Exception as e:
44
- print(f"Error during model generation for role {role}: {e}")
45
- return f"error({e})"
 
1
  # models/loader.py
2
  import torch
3
+ import os
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
5
 
6
+ # Configuration for loading models
7
+ QUANTIZATION_CONFIG = BitsAndBytesConfig(
8
+ load_in_4bit=True,
9
+ bnb_4bit_quant_type="nf4",
10
+ bnb_4bit_compute_dtype=torch.bfloat16,
11
+ )
12
+
13
+ # Use a registry to map agent roles to specific models
14
  MODEL_REGISTRY = {
15
  "ceo": "Qwen/Qwen3-0.6B",
16
+ "planner": "Qwen/Qwen3-0.6B",
17
  "manager": "Qwen/Qwen3-0.6B",
18
+ "debugger": "Qwen/Qwen3-0.6B",
19
+ "business_analyst": "Qwen/Qwen3-0.6B",
20
+ "ux_ui_designer": "Qwen/Qwen3-0.6B",
21
+ "worker_backend_coder": "Qwen/Qwen3-0.6B",
22
+ "worker_front_end_coder": "Qwen/Qwen3-0.6B",
23
  "worker_tester": "Qwen/Qwen3-0.6B",
24
  }
25
  _MODEL_CACHE = {}
26
 
27
+ def get_model_and_tokenizer(model_name="Qwen/Qwen3-0.6B"):
28
+ if model_name not in _MODEL_CACHE:
29
+ print(f"Loading model: {model_name}...")
30
+ _MODEL_CACHE[model_name] = {
31
+ "model": AutoModelForCausalLM.from_pretrained(
32
+ model_name,
33
+ device_map="auto",
34
+ quantization_config=QUANTIZATION_CONFIG,
35
+ trust_remote_code=True,
36
+ ),
37
+ "tokenizer": AutoTokenizer.from_pretrained(model_name)
38
+ }
39
+ return _MODEL_CACHE[model_name]["model"], _MODEL_CACHE[model_name]["tokenizer"]
40
+
41
+ def generate_with_model(agent_role, prompt):
42
+ model_name = MODEL_REGISTRY.get(agent_role, "Qwen/Qwen3-0.6B")
43
+ model, tokenizer = get_model_and_tokenizer(model_name)
44
 
45
+ full_prompt = f"You are a helpful assistant. {ROLE_PROMPTS.get(agent_role, '')}\n\nUser prompt: {prompt}"
 
 
 
 
 
 
46
 
47
+ input_ids = tokenizer.encode(full_prompt, return_tensors="pt").to(model.device)
48
+
49
+ with torch.no_grad():
50
+ output = model.generate(
51
+ input_ids,
52
+ max_new_tokens=2048,
53
+ do_sample=True,
54
+ temperature=0.7,
55
+ top_p=0.9,
56
+ repetition_penalty=1.1
57
+ )
58
 
59
+ decoded_output = tokenizer.decode(output[0], skip_special_tokens=True)
60
+ return decoded_output.replace(full_prompt, "", 1).strip()