|
|
import os |
|
|
import gradio as gr |
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
|
|
|
|
|
hf_token = os.getenv("HUGGINGFACE_TOKEN") |
|
|
if not hf_token: |
|
|
raise ValueError("Token Hugging Face tidak ditemukan. Harap atur variabel lingkungan 'HUGGINGFACE_TOKEN'.") |
|
|
|
|
|
|
|
|
model_name = "Erlanggaa/TextChatbot" |
|
|
try: |
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=hf_token) |
|
|
model = AutoModelForCausalLM.from_pretrained(model_name, use_auth_token=hf_token) |
|
|
except Exception as e: |
|
|
raise ValueError(f"Gagal memuat model atau tokenizer: {str(e)}") |
|
|
|
|
|
|
|
|
def generate_text(prompt): |
|
|
if not prompt or len(prompt.strip()) == 0: |
|
|
return "Masukkan prompt yang valid." |
|
|
|
|
|
inputs = tokenizer(prompt, return_tensors="pt") |
|
|
|
|
|
|
|
|
outputs = model.generate( |
|
|
inputs['input_ids'], |
|
|
max_length=500, |
|
|
min_length=30, |
|
|
temperature=0.7, |
|
|
top_k=50, |
|
|
top_p=0.9, |
|
|
repetition_penalty=1.2 |
|
|
) |
|
|
|
|
|
|
|
|
result = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
|
|
|
|
|
return result.strip() |
|
|
|
|
|
|
|
|
interface = gr.Interface( |
|
|
fn=generate_text, |
|
|
inputs="text", |
|
|
outputs="text", |
|
|
title="LLaMA 3.2-1B Text Generator", |
|
|
description="Masukkan prompt, dan model LLaMA 3.2-1B akan menghasilkan teks." |
|
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
interface.launch() |
|
|
|