vortexa64 commited on
Commit
abe64fa
·
verified ·
1 Parent(s): b17a9d4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -61
app.py CHANGED
@@ -1,71 +1,33 @@
1
- import torch
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import gradio as gr
 
 
4
 
5
- # Load model dan tokenizer
6
- model_id = "cahya/gpt2-small-indonesian-522M"
7
- tokenizer = AutoTokenizer.from_pretrained(model_id)
8
- model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32)
9
- model.to("cuda" if torch.cuda.is_available() else "cpu")
10
- model.eval()
11
-
12
- # Baca prompt awal
13
- try:
14
- with open("prompt.txt", "r", encoding="utf-8") as f:
15
- base_prompt = f.read()
16
- except FileNotFoundError:
17
- base_prompt = ""
18
-
19
- # Fungsi buat ngegabungin chat history
20
- def generate_prompt(message, chat_history):
21
- full_prompt = base_prompt
22
- for user_msg, ai_msg in chat_history:
23
- full_prompt += f"Arya: {user_msg}\nCici: {ai_msg}\n"
24
- full_prompt += f"Arya: {message}\nCici:"
25
- return full_prompt
26
 
27
- # Fungsi buat ngerespon input user
28
- def predict(message, chat_history):
29
- prompt = generate_prompt(message, chat_history)
30
- inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=1024).to(model.device)
31
-
32
  with torch.no_grad():
33
- outputs = model.generate(
34
- **inputs,
35
- max_new_tokens=128,
36
  do_sample=True,
37
- temperature=0.9,
38
  top_p=0.95,
 
39
  pad_token_id=tokenizer.eos_token_id,
40
- eos_token_id=tokenizer.eos_token_id,
41
  )
42
-
43
- output_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
44
-
45
- # Ambil jawaban terakhir setelah "Cici:"
46
- if "Cici:" in output_text:
47
- answer = output_text.split("Cici:")[-1].strip()
48
- else:
49
- answer = "Maaf ya, Cici bingung jawabnya 😢"
50
-
51
- chat_history.append((message, answer))
52
- return answer, chat_history
53
-
54
- # Komponen Gradio
55
- chatbot = gr.Chatbot()
56
- with gr.Blocks(css=".gradio-container {background-color: #fefefe}") as demo:
57
- gr.Markdown("<h1 style='text-align: center;'>🩷 Cici Chatbot Indo 😳🤭</h1>")
58
- with gr.Row():
59
- with gr.Column():
60
- message = gr.Textbox(label="Ketik di sini sayang~ 😚")
61
- clear = gr.Button("🧹 Bersihin Chat")
62
- with gr.Column():
63
- output = chatbot
64
-
65
- state = gr.State([])
66
 
67
- message.submit(predict, [message, state], [output, state])
68
- clear.click(lambda: ([], []), None, [output, state])
 
 
 
 
 
 
69
 
70
- # Launch
71
- demo.launch()
 
 
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ import torch
4
 
5
+ # Load GPT-2 Bahasa Indonesia (contoh: IndoGPT2 dari cahya)
6
+ model_name = "cahya/gpt2-small-indonesian-522M"
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
+ model = AutoModelForCausalLM.from_pretrained(model_name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
+ def generate_response(prompt):
11
+ input_ids = tokenizer.encode(prompt, return_tensors="pt")
 
 
 
12
  with torch.no_grad():
13
+ output = model.generate(
14
+ input_ids,
15
+ max_length=100,
16
  do_sample=True,
17
+ top_k=50,
18
  top_p=0.95,
19
+ temperature=0.9,
20
  pad_token_id=tokenizer.eos_token_id,
 
21
  )
22
+ return tokenizer.decode(output[0], skip_special_tokens=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
+ # Gradio UI
25
+ iface = gr.Interface(
26
+ fn=generate_response,
27
+ inputs=gr.Textbox(lines=4, placeholder="Ketik sesuatu yang nakal... 🤭"),
28
+ outputs="text",
29
+ title="🧠 AI Nakal Bahasa Indonesia",
30
+ description="Powered by GPT-2 Indo | Coba ketik rayuan atau pertanyaan nakal 😳💕"
31
+ )
32
 
33
+ iface.launch()