vortexa64 commited on
Commit
af00347
Β·
verified Β·
1 Parent(s): bce2087

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -30
app.py CHANGED
@@ -1,37 +1,71 @@
1
- import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
 
4
 
 
5
  model_name = "cahya/gpt2-small-indonesian-522M"
 
 
6
 
7
- tokenizer = AutoTokenizer.from_pretrained(model_name)
8
- model = AutoModelForCausalLM.from_pretrained(model_name)
9
-
10
  with open("prompt.txt", "r", encoding="utf-8") as f:
11
- base_prompt = f.read().strip()
12
-
13
- def respond(messages):
14
- history = [f"Arya: {m[0]}\nCici: {m[1]}" for m in messages if m[0] and m[1]]
15
- last_user = messages[-1][0] if messages else ""
16
- full_prompt = base_prompt + "\n" + "\n".join(history) + f"\nArya: {last_user}\nCici:"
17
-
18
- inputs = tokenizer(full_prompt, return_tensors="pt", truncation=True, max_length=1024)
19
- with torch.no_grad():
20
- outputs = model.generate(
21
- **inputs,
22
- max_new_tokens=100,
23
- pad_token_id=tokenizer.eos_token_id,
24
- do_sample=True,
25
- top_k=50,
26
- top_p=0.95,
27
- temperature=0.8
28
- )
29
-
30
- generated = tokenizer.decode(outputs[0], skip_special_tokens=True)
31
- response = generated.split("Cici:")[-1].strip().split("Arya:")[0].strip()
32
- return response
33
-
34
- chatbot_ui = gr.Chatbot(label="Cici πŸ€­πŸ’•").style(height=450)
35
- demo = gr.ChatInterface(fn=respond, chatbot=chatbot_ui, title="Cici AI Roleplay", theme="soft")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
  demo.launch()
 
1
+ from transformers import GPT2LMHeadModel, GPT2Tokenizer
 
2
  import torch
3
+ import gradio as gr
4
 
5
+ # Load tokenizer dan model IndoGPT (pastikan support GPT2-style)
6
  model_name = "cahya/gpt2-small-indonesian-522M"
7
+ tokenizer = GPT2Tokenizer.from_pretrained(model_name)
8
+ model = GPT2LMHeadModel.from_pretrained(model_name)
9
 
10
+ # Load prompt dasar
 
 
11
  with open("prompt.txt", "r", encoding="utf-8") as f:
12
+ base_prompt = f.read()
13
+
14
+ # Simpan riwayat chat
15
+ history = []
16
+
17
+ def chat(user_input):
18
+ global history
19
+ # Tambahkan input user ke riwayat
20
+ history.append(f"Arya: {user_input}")
21
+ prompt = base_prompt + "\n" + "\n".join(history) + "\nCici:"
22
+
23
+ # Tokenisasi prompt
24
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=1024)
25
+
26
+ # Generate teks
27
+ outputs = model.generate(
28
+ **inputs,
29
+ max_new_tokens=100,
30
+ do_sample=True,
31
+ top_k=50,
32
+ top_p=0.95,
33
+ temperature=0.8,
34
+ pad_token_id=tokenizer.eos_token_id
35
+ )
36
+
37
+ result = tokenizer.decode(outputs[0], skip_special_tokens=True)
38
+
39
+ # Ambil jawaban terakhir dari "Cici:"
40
+ cici_reply = result.split("Cici:")[-1].strip().split("Arya:")[0].strip()
41
+
42
+ # Tambahkan ke riwayat
43
+ history.append(f"Cici: {cici_reply}")
44
+
45
+ # Format untuk gr.Chatbot (message history)
46
+ chat_messages = []
47
+ for msg in history:
48
+ if msg.startswith("Arya:"):
49
+ chat_messages.append(("Arya", msg[6:]))
50
+ elif msg.startswith("Cici:"):
51
+ chat_messages.append(("Cici πŸ€­πŸ’•", msg[6:]))
52
+
53
+ return chat_messages
54
+
55
+ def reset():
56
+ global history
57
+ history = []
58
+ return []
59
+
60
+ with gr.Blocks() as demo:
61
+ gr.Markdown("## πŸ€– Chatbot Roleplay Cici πŸ€­πŸ’•")
62
+ chatbot_ui = gr.Chatbot(label="Cici πŸ€­πŸ’•", height=450)
63
+ txt = gr.Textbox(label="Ketik pesan kamu...")
64
+
65
+ btn_send = gr.Button("Kirim")
66
+ btn_clear = gr.Button("Reset Chat")
67
+
68
+ btn_send.click(chat, inputs=txt, outputs=chatbot_ui)
69
+ btn_clear.click(reset, outputs=chatbot_ui)
70
 
71
  demo.launch()