| from typing import Dict, Any |
| import torch |
| from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig |
| from peft import PeftModel |
|
|
|
|
| class EndpointHandler: |
| def __init__(self, path: str = ""): |
| self.device = "cuda" if torch.cuda.is_available() else "cpu" |
| |
| bnb_config = BitsAndBytesConfig( |
| load_in_4bit=True, |
| bnb_4bit_quant_type="nf4", |
| bnb_4bit_compute_dtype=torch.bfloat16, |
| bnb_4bit_use_double_quant=True, |
| ) |
| |
| base_model_id = "deepseek-ai/deepseek-coder-6.7b-instruct" |
| |
| self.tokenizer = AutoTokenizer.from_pretrained(path) |
| self.model = AutoModelForCausalLM.from_pretrained( |
| base_model_id, |
| quantization_config=bnb_config, |
| device_map="auto", |
| trust_remote_code=True, |
| ) |
| self.model = PeftModel.from_pretrained(self.model, path) |
| self.model.eval() |
|
|
| def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]: |
| inputs = data.get("inputs", "") |
| parameters = data.get("parameters", {}) |
| |
| max_new_tokens = parameters.get("max_new_tokens", 512) |
| temperature = parameters.get("temperature", 0.7) |
| top_p = parameters.get("top_p", 0.95) |
| do_sample = parameters.get("do_sample", True) |
| |
| if not inputs.startswith("### System:"): |
| prompt = f"""### System: |
| You are an expert Minecraft Forge mod developer for version 1.21.11. Write clean, efficient, and well-structured Java code. |
| |
| ### User: |
| {inputs} |
| |
| ### Assistant: |
| """ |
| else: |
| prompt = inputs |
| |
| input_ids = self.tokenizer(prompt, return_tensors="pt").to(self.device) |
| |
| with torch.no_grad(): |
| outputs = self.model.generate( |
| **input_ids, |
| max_new_tokens=max_new_tokens, |
| temperature=temperature, |
| top_p=top_p, |
| do_sample=do_sample, |
| pad_token_id=self.tokenizer.eos_token_id, |
| ) |
| |
| generated_text = self.tokenizer.decode(outputs[0], skip_special_tokens=True) |
| |
| if "### Assistant:" in generated_text: |
| generated_text = generated_text.split("### Assistant:")[-1].strip() |
| |
| return {"generated_text": generated_text} |
|
|