from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig import torch MODEL_REPO = "Rahul-8799/software_architect_command_r" bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4" ) tokenizer = AutoTokenizer.from_pretrained(MODEL_REPO, trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained( MODEL_REPO, trust_remote_code=True, quantization_config=bnb_config, device_map={"": 0}, # force full GPU load torch_dtype=torch.float16 ) model.eval() def run(prompt): inputs = tokenizer(prompt, return_tensors="pt").to(model.device) with torch.no_grad(): outputs = model.generate(**inputs, max_new_tokens=512) return tokenizer.decode(outputs[0], skip_special_tokens=True)