import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer import torch model_name = "cahya/gpt2-small-indonesian-522M" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) def generate_response(prompt): input_ids = tokenizer.encode(prompt, return_tensors="pt") with torch.no_grad(): output = model.generate( input_ids, max_length=80, do_sample=True, top_k=50, top_p=0.95, temperature=0.9, pad_token_id=tokenizer.eos_token_id ) return tokenizer.decode(output[0], skip_special_tokens=True) gr.Interface( fn=generate_response, inputs=gr.Textbox(lines=4, placeholder="Tulis rayuanmu di sini... 🤭"), outputs="text", title="🧠 AI Nakal Bahasa Indonesia", description="Powered by GPT-2 Indo — Coba tanya yang manja, romantis, atau... 🌚" ).launch()