irhamni commited on
Commit
e54d4f9
·
verified ·
1 Parent(s): 122760a

Upload 2 files

Browse files
Files changed (2) hide show
  1. app (2).py +95 -0
  2. chatbot_model (1).pkl +3 -0
app (2).py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """app.ipynb
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1Pl40oGz5tSGYUl-qkOCO9EQQuNw4ElvP
8
+ """
9
+
10
+ # =================================================================
11
+ # KODE APP.PY: UNTUK DIUNGGAH KE HUGGING FACE
12
+ # =================================================================
13
+ import streamlit as st
14
+ import pandas as pd
15
+ import pickle
16
+ import re
17
+ import nltk
18
+
19
+ # --- KONFIGURASI FILE ---
20
+ MODEL_PATH = 'chatbot_model.pkl'
21
+ FAQ_PATH = 'perpustakaan_faq.csv'
22
+
23
+ # Unduh punkt NLTK
24
+ try:
25
+ nltk.data.find('tokenizers/punkt')
26
+ except nltk.downloader.DownloadError:
27
+ nltk.download('punkt')
28
+
29
+ # --- 1. Muat Model dan Data (Caching) ---
30
+ @st.cache_resource
31
+ def load_resources():
32
+ """Memuat model dan data FAQ sekali saja menggunakan caching Streamlit."""
33
+ try:
34
+ # Muat Model Klasifikasi (Pipeline TFIDF + Naive Bayes)
35
+ with open(MODEL_PATH, 'rb') as file:
36
+ # Karena versi scikit-learn dan numpy telah dikunci, loading seharusnya berhasil
37
+ model_pipeline = pickle.load(file)
38
+
39
+ # Muat Data FAQ untuk mendapatkan Jawaban
40
+ df_faq = pd.read_csv(FAQ_PATH)
41
+
42
+ return model_pipeline, df_faq
43
+ except FileNotFoundError as e:
44
+ st.error(f"Gagal memuat file penting: {e}. Pastikan '{MODEL_PATH}' dan '{FAQ_PATH}' telah diunggah.")
45
+ st.stop()
46
+ except Exception as e:
47
+ # Menangkap error loading (termasuk binary incompatibility)
48
+ st.error(f"Terjadi kesalahan fatal saat memuat model. Pastikan versi scikit-learn dan numpy di requirements.txt sama persis dengan yang digunakan saat pelatihan model. Error: {e}")
49
+ st.stop()
50
+
51
+ model, df_faq = load_resources()
52
+
53
+ # --- 2. Fungsi Pembersihan Teks dan Logika Chatbot ---
54
+ def clean_text(text):
55
+ """Membersihkan teks (sesuai dengan yang digunakan saat pelatihan)."""
56
+ text = re.sub(r'[^\w\s]', '', text)
57
+ return text.lower().strip()
58
+
59
+ def predict_and_respond(query, model, df_faq):
60
+ """Memprediksi Intent dan mengambil Jawaban yang sesuai."""
61
+
62
+ cleaned_q = clean_text(query)
63
+ predicted_intent = model.predict([cleaned_q])[0]
64
+ responses = df_faq[df_faq['kategori'] == predicted_intent]['chatbot_response'].tolist()
65
+
66
+ if responses:
67
+ return responses[0]
68
+ else:
69
+ return f"Maaf, saya tidak dapat menemukan jawaban. (Intent: {predicted_intent})"
70
+
71
+ # --- 3. Antarmuka Streamlit ---
72
+
73
+ st.set_page_config(page_title="Perpustakaan Chatbot")
74
+ st.title("📚 Asisten Virtual Perpustakaan")
75
+ st.markdown("Halo! Tanyakan tentang **keanggotaan, peminjaman, atau fasilitas** kami.")
76
+
77
+ if "messages" not in st.session_state:
78
+ st.session_state.messages = []
79
+
80
+ for message in st.session_state.messages:
81
+ with st.chat_message(message["role"]):
82
+ st.markdown(message["content"])
83
+
84
+ if prompt := st.chat_input("Tulis pertanyaan Anda di sini..."):
85
+ st.session_state.messages.append({"role": "user", "content": prompt})
86
+ with st.chat_message("user"):
87
+ st.markdown(prompt)
88
+
89
+ response = predict_and_respond(prompt, model, df_faq)
90
+
91
+ with st.chat_message("assistant"):
92
+ st.markdown(response)
93
+
94
+ st.session_state.messages.append({"role": "assistant", "content": response})
95
+
chatbot_model (1).pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2ec834278832bc8a1f44a934b75819e29cc8017342e37adf5917e6a9e09b44d
3
+ size 5459