bangdim-ai / model.py
KazuX-1
Initial commit: Bangdim AI Customer Service
3b7e70b
# model.py - Custom AI Model untuk Bangdim CS
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import torch
class BangdimAI:
def __init__(self):
print("Loading Bangdim AI Model...")
# Gunakan model dasar yang ringan
model_name = "microsoft/DialoGPT-medium" # Bisa ganti dengan model lain
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
self.model = AutoModelForCausalLM.from_pretrained(model_name)
# Add padding token
self.tokenizer.pad_token = self.tokenizer.eos_token
print("โœ… Model loaded successfully!")
def generate_response(self, user_input, history=[]):
# Format input dengan history
prompt = self.format_prompt(user_input, history)
# Encode
inputs = self.tokenizer.encode(prompt, return_tensors='pt')
# Generate response
with torch.no_grad():
outputs = self.model.generate(
inputs,
max_length=200,
temperature=0.8,
top_p=0.9,
do_sample=True,
pad_token_id=self.tokenizer.eos_token_id
)
# Decode response
response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
# Remove prompt from response
response = response[len(prompt):].strip()
return response if response else "Maaf kak, saya kurang paham. Bisa diulang? ๐Ÿ˜Š"
def format_prompt(self, user_input, history):
prompt = """Anda adalah Bangdim AI, CS toko top up game yang ramah. Panggil user dengan 'kak'.
"""
# Add history
for h in history[-3:]:
if 'user' in h:
prompt += f"User: {h['user']}\n"
if 'bot' in h:
prompt += f"Assistant: {h['bot']}\n"
prompt += f"User: {user_input}\nAssistant: "
return prompt
# Initialize model
bangdim_ai = BangdimAI()
# For HuggingFace Spaces
def predict(user_input, history=[]):
response = bangdim_ai.generate_response(user_input, history)
return response