| | import os |
| | import json |
| | import numpy as np |
| | import tensorflow as tf |
| | import gradio as gr |
| |
|
| | |
| | ASSETS_DIR = "model_assets" |
| |
|
| | |
| | print("--- Loading Model & Assets... ---") |
| | try: |
| | |
| | model_path = os.path.join(ASSETS_DIR, "model_klasifikasi_aduan.h5") |
| | model = tf.keras.models.load_model(model_path) |
| | |
| | |
| | with open(os.path.join(ASSETS_DIR, "config.json"), 'r') as f: |
| | config = json.load(f) |
| | MAX_SEQUENCE_LENGTH = config.get('max_sequence_length', 100) |
| | |
| | |
| | with open(os.path.join(ASSETS_DIR, "label_map.json"), 'r') as f: |
| | label_map = json.load(f) |
| | index_to_label = {v: k for k, v in label_map.items()} |
| | |
| | |
| | with open(os.path.join(ASSETS_DIR, "vocabulary.txt"), "r") as f: |
| | vocabulary = [line.strip() for line in f.readlines()] |
| | word_to_index = {word: index for index, word in enumerate(vocabulary)} |
| | |
| | print("โ
Success: Model Loaded!") |
| | except Exception as e: |
| | print(f"โ Error Loading Assets: {e}") |
| | print("Pastikan folder 'model_assets' berisi file .h5, config, label_map, dan vocabulary.") |
| |
|
| | |
| | def preprocess_text(text): |
| | if not text: return None |
| | tokens = text.lower().split() |
| | |
| | token_indices = [word_to_index.get(token, 1) for token in tokens] |
| | |
| | |
| | padded_indices = tf.keras.preprocessing.sequence.pad_sequences( |
| | [token_indices], |
| | maxlen=MAX_SEQUENCE_LENGTH, |
| | padding='post', |
| | truncating='post' |
| | ) |
| | return padded_indices |
| |
|
| | |
| | def klasifikasi_aduan(text): |
| | if not text or len(text.strip()) < 3: |
| | return "โ ๏ธ Mohon masukkan kalimat aduan yang jelas." |
| | |
| | processed_input = preprocess_text(text) |
| | prediction = model.predict(processed_input)[0] |
| | |
| | |
| | results = {} |
| | for i, score in enumerate(prediction): |
| | label_name = index_to_label.get(i, f"Label {i}") |
| | results[label_name] = float(score) |
| | |
| | return results |
| |
|
| | |
| | |
| | examples = [ |
| | ["Jalan raya di kecamatan Wonosobo berlubang parah dan membahayakan pengendara motor."], |
| | ["Anak saya dipersulit saat mengurus administrasi pindah sekolah, mohon bantuannya."], |
| | ["Lampu penerangan jalan umum di desa mati total sudah satu bulan belum diperbaiki."], |
| | ["Ada pungutan liar di lokasi wisata yang tidak sesuai dengan tiket resmi."] |
| | ] |
| |
|
| | demo = gr.Interface( |
| | fn=klasifikasi_aduan, |
| | inputs=gr.Textbox(lines=4, placeholder="Ketik aduan Anda di sini..."), |
| | outputs=gr.Label(num_top_classes=3, label="Hasil Analisis Model"), |
| | title="๐๏ธ LaporGub! Complaints Classifier", |
| | description="A Bi-LSTM deep learning model designed to classify Central Java public complaints into 14 government sectors with 87% F1-score accuracy.", |
| | examples=examples, |
| | theme="soft" |
| | ) |
| |
|
| | if __name__ == "__main__": |
| | demo.launch(share=True) |