# handler.py import torch from transformers import AutoTokenizer, AutoModelForCausalLM from peft import PeftModel # Base model that your LoRA was trained on (must match training) BASE_MODEL = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B" # change if you trained on a different DeepSeek variant ADAPTER_PATH = "GilbertAkham/deepseek-R1-multitask-lora" class EndpointHandler: def __init__(self, path=""): print("🚀 Loading base model...") self.tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL, trust_remote_code=True) # Load base model base_model = AutoModelForCausalLM.from_pretrained( BASE_MODEL, torch_dtype=torch.float16, device_map="auto", trust_remote_code=True ) print(f"🔗 Attaching LoRA adapter from {ADAPTER_PATH}...") # Load the LoRA adapter properly self.model = PeftModel.from_pretrained(base_model, ADAPTER_PATH) self.model.eval() print("✅ Model + LoRA adapter loaded successfully.") def __call__(self, data): prompt = data.get("inputs", "") inputs = self.tokenizer(prompt, return_tensors="pt").to(self.model.device) with torch.no_grad(): outputs = self.model.generate( **inputs, max_new_tokens=512, temperature=0.7, top_p=0.9, do_sample=True, pad_token_id=self.tokenizer.eos_token_id, eos_token_id=self.tokenizer.eos_token_id, ) text = self.tokenizer.decode(outputs[0], skip_special_tokens=True) return {"generated_text": text}