File size: 1,671 Bytes
af43127 4b8bb26 af43127 730c68d 4b8bb26 af43127 730c68d 4b8bb26 730c68d 4b8bb26 d3cb356 af43127 730c68d 4b8bb26 af43127 730c68d af43127 730c68d af43127 730c68d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
# handler.py
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from peft import PeftModel
# Base model that your LoRA was trained on (must match training)
BASE_MODEL = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B" # change if you trained on a different DeepSeek variant
ADAPTER_PATH = "GilbertAkham/deepseek-R1-multitask-lora"
class EndpointHandler:
def __init__(self, path=""):
print("🚀 Loading base model...")
self.tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL, trust_remote_code=True)
# Load base model
base_model = AutoModelForCausalLM.from_pretrained(
BASE_MODEL,
torch_dtype=torch.float16,
device_map="auto",
trust_remote_code=True
)
print(f"🔗 Attaching LoRA adapter from {ADAPTER_PATH}...")
# Load the LoRA adapter properly
self.model = PeftModel.from_pretrained(base_model, ADAPTER_PATH)
self.model.eval()
print("✅ Model + LoRA adapter loaded successfully.")
def __call__(self, data):
prompt = data.get("inputs", "")
inputs = self.tokenizer(prompt, return_tensors="pt").to(self.model.device)
with torch.no_grad():
outputs = self.model.generate(
**inputs,
max_new_tokens=512,
temperature=0.7,
top_p=0.9,
do_sample=True,
pad_token_id=self.tokenizer.eos_token_id,
eos_token_id=self.tokenizer.eos_token_id,
)
text = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
return {"generated_text": text}
|