|
|
|
|
|
import gradio as gr |
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline |
|
|
import torch |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model_name = "google/gemma-2b-it" |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
model = AutoModelForCausalLM.from_pretrained( |
|
|
model_name, |
|
|
torch_dtype=torch.float32, |
|
|
device_map="auto", |
|
|
) |
|
|
|
|
|
pipe = pipeline( |
|
|
"text-generation", |
|
|
model=model, |
|
|
tokenizer=tokenizer, |
|
|
max_new_tokens=512, |
|
|
temperature=0.7, |
|
|
top_p=0.9, |
|
|
repetition_penalty=1.1, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def chat(user_input, history): |
|
|
if not user_input.strip(): |
|
|
return history, history |
|
|
|
|
|
history = history or [] |
|
|
|
|
|
|
|
|
prompt = ( |
|
|
"Kamu adalah YuuVx, asisten AI yang dibuat oleh Yusuf Ramadhani. " |
|
|
"Kamu cerdas, tenang, dan selalu berbicara dalam Bahasa Indonesia dengan nada santai tapi sopan. " |
|
|
"Gunakan kalimat yang alami dan tidak terlalu panjang.\n\n" |
|
|
) |
|
|
|
|
|
for msg in history: |
|
|
role = msg["role"] |
|
|
content = msg["content"] |
|
|
if role == "user": |
|
|
prompt += f"User: {content}\n" |
|
|
else: |
|
|
prompt += f"YuuVx: {content}\n" |
|
|
|
|
|
prompt += f"User: {user_input}\nYuuVx:" |
|
|
|
|
|
|
|
|
output = pipe(prompt, max_new_tokens=256, do_sample=True) |
|
|
response = output[0]["generated_text"].split("YuuVx:")[-1].strip() |
|
|
|
|
|
history.append({"role": "user", "content": user_input}) |
|
|
history.append({"role": "assistant", "content": response}) |
|
|
|
|
|
return history, history |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks(theme=gr.themes.Soft()) as demo: |
|
|
gr.Markdown("<h1 style='text-align:center;'>๐ค YuuVx Chat - Gemma 2B</h1>") |
|
|
gr.Markdown("<p style='text-align:center;'>Dibuat oleh <b>Yusuf Ramadhani</b></p>") |
|
|
|
|
|
chatbot = gr.Chatbot(label="๐ฌ Chat dengan YuuVx", height=520, type="messages") |
|
|
msg = gr.Textbox(label="Ketik pesanmu di sini...", placeholder="Tulis sesuatu...") |
|
|
state = gr.State([]) |
|
|
|
|
|
def respond(message, chat_history): |
|
|
result, chat_history = chat(message, chat_history) |
|
|
return "", chat_history, chat_history |
|
|
|
|
|
msg.submit(respond, [msg, state], [msg, chatbot, state]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch() |