Keeby-smilyai commited on
Commit
f3bb5cb
·
verified ·
1 Parent(s): 954d114

Create models/loader.py

Browse files
Files changed (1) hide show
  1. models/loader.py +45 -0
models/loader.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # models/loader.py
2
+ import torch
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
4
+
5
+ MODEL_REGISTRY = {
6
+ "ceo": "Qwen/Qwen3-0.6B",
7
+ "manager": "Qwen/Qwen3-0.6B",
8
+ "worker_coder": "Qwen/Qwen3-0.6B",
9
+ "worker_tester": "Qwen/Qwen3-0.6B",
10
+ }
11
+ _MODEL_CACHE = {}
12
+
13
+ def load_model(model_name):
14
+ if model_name in _MODEL_CACHE:
15
+ return _MODEL_CACHE[model_name]
16
+
17
+ model_kwargs = {"device_map": "auto", "trust_remote_code": True, "attn_implementation": "eager"}
18
+ if torch.cuda.is_available():
19
+ print(f"CUDA found. Loading '{model_name}' in 4-bit.")
20
+ bnb_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16)
21
+ model_kwargs["quantization_config"] = bnb_config
22
+ else:
23
+ print(f"CUDA not found. Loading '{model_name}' on CPU.")
24
+
25
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
26
+ model = AutoModelForCausalLM.from_pretrained(model_name, **model_kwargs)
27
+ _MODEL_CACHE[model_name] = (tokenizer, model)
28
+ print(f"Model {model_name} loaded and cached.")
29
+ return tokenizer, model
30
+
31
+ def generate_with_model(role: str, prompt: str) -> str:
32
+ from backend.agents import ROLE_PROMPTS
33
+ from .loader import MODEL_REGISTRY
34
+
35
+ try:
36
+ model_name = MODEL_REGISTRY[role]
37
+ tokenizer, model = load_model(model_name)
38
+ messages = [{"role": "system", "content": ROLE_PROMPTS[role]}, {"role": "user", "content": prompt}]
39
+ input_text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
40
+ inputs = tokenizer(input_text, return_tensors="pt").to(model.device)
41
+ outputs = model.generate(**inputs, max_new_tokens=3072, pad_token_id=tokenizer.eos_token_id, use_cache=True)
42
+ return tokenizer.decode(outputs[0][len(inputs.input_ids[0]):], skip_special_tokens=True).strip()
43
+ except Exception as e:
44
+ print(f"Error during model generation for role {role}: {e}")
45
+ return f"error({e})"