| |
| from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline |
| import torch |
|
|
| class BangdimAI: |
| def __init__(self): |
| print("Loading Bangdim AI Model...") |
| |
| |
| model_name = "microsoft/DialoGPT-medium" |
| |
| self.tokenizer = AutoTokenizer.from_pretrained(model_name) |
| self.model = AutoModelForCausalLM.from_pretrained(model_name) |
| |
| |
| self.tokenizer.pad_token = self.tokenizer.eos_token |
| |
| print("โ
Model loaded successfully!") |
| |
| def generate_response(self, user_input, history=[]): |
| |
| prompt = self.format_prompt(user_input, history) |
| |
| |
| inputs = self.tokenizer.encode(prompt, return_tensors='pt') |
| |
| |
| with torch.no_grad(): |
| outputs = self.model.generate( |
| inputs, |
| max_length=200, |
| temperature=0.8, |
| top_p=0.9, |
| do_sample=True, |
| pad_token_id=self.tokenizer.eos_token_id |
| ) |
| |
| |
| response = self.tokenizer.decode(outputs[0], skip_special_tokens=True) |
| |
| |
| response = response[len(prompt):].strip() |
| |
| return response if response else "Maaf kak, saya kurang paham. Bisa diulang? ๐" |
| |
| def format_prompt(self, user_input, history): |
| prompt = """Anda adalah Bangdim AI, CS toko top up game yang ramah. Panggil user dengan 'kak'. |
| |
| """ |
| |
| for h in history[-3:]: |
| if 'user' in h: |
| prompt += f"User: {h['user']}\n" |
| if 'bot' in h: |
| prompt += f"Assistant: {h['bot']}\n" |
| |
| prompt += f"User: {user_input}\nAssistant: " |
| return prompt |
|
|
| |
| bangdim_ai = BangdimAI() |
|
|
| |
| def predict(user_input, history=[]): |
| response = bangdim_ai.generate_response(user_input, history) |
| return response |