#!/usr/bin/env python3
"""
Streamlit App for Government Complaint Classification
Author: Based on XLM-RoBERTa implementation by Farrikh Alzami
"""
import streamlit as st
import pandas as pd
import numpy as np
import time
import io
from typing import List, Dict, Tuple
import os
from pathlib import Path
# Custom imports
from utils.model_loader import ModelLoader
from utils.text_preprocessor import TextPreprocessor
from utils.visualization import Visualizer
# Page configuration
st.set_page_config(
page_title="Government Complaint Classifier",
page_icon="๐๏ธ",
layout="wide",
initial_sidebar_state="expanded"
)
# Custom CSS for warm color scheme
st.markdown("""
""", unsafe_allow_html=True)
class StreamlitApp:
def __init__(self):
self.model_loader = ModelLoader()
self.text_preprocessor = TextPreprocessor()
self.visualizer = Visualizer()
# Initialize session state
if 'model_type' not in st.session_state:
st.session_state.model_type = 'cross_entropy'
if 'model_loaded' not in st.session_state:
st.session_state.model_loaded = False
if 'predictions_history' not in st.session_state:
st.session_state.predictions_history = []
if 'last_analyzed_text' not in st.session_state:
st.session_state.last_analyzed_text = ""
if 'current_results' not in st.session_state:
st.session_state.current_results = None
if 'batch_results' not in st.session_state:
st.session_state.batch_results = None
def render_header(self):
"""Render application header"""
st.markdown("""
๐๏ธ Government Complaint Classifier
Klasifikasi Otomatis Keluhan Masyarakat menggunakan XLM-RoBERTa
""", unsafe_allow_html=True)
def render_sidebar(self):
"""Render sidebar with model selection"""
with st.sidebar:
st.header("โ๏ธ Model Configuration")
# Model selection toggle
model_options = {
'cross_entropy': '๐ฏ Cross Entropy Loss',
'focal_loss': '๐ฅ Focal Loss'
}
selected_model = st.radio(
"Pilih Model:",
options=list(model_options.keys()),
format_func=lambda x: model_options[x],
index=0 if st.session_state.model_type == 'cross_entropy' else 1
)
# Update session state if model changed
if selected_model != st.session_state.model_type:
st.session_state.model_type = selected_model
st.session_state.model_loaded = False
st.rerun()
st.markdown("---")
# Model availability check
st.subheader("๐ Model Files Status")
available_models = self.model_loader.get_available_models()
for model_type in ['cross_entropy', 'focal_loss']:
if model_type in available_models:
# Check if this model is currently loaded
is_current_loaded = (
hasattr(self.model_loader, 'current_model_type') and
self.model_loader.current_model_type == model_type and
hasattr(self.model_loader, 'classifier_pipeline') and
self.model_loader.classifier_pipeline is not None
)
if is_current_loaded and model_type == st.session_state.model_type:
st.success(f"โ
{model_type.replace('_', ' ').title()} (Currently Loaded)")
else:
st.success(f"โ
{model_type.replace('_', ' ').title()}")
else:
st.error(f"โ {model_type.replace('_', ' ').title()}")
if not available_models:
st.warning("โ ๏ธ No models found! Please check model directory.")
st.info("""
Expected structure:
```
models/
โโโ cross_entropy/
โ โโโ model.safetensors
โ โโโ config.json
โ โโโ ...
โโโ focal_loss/
โโโ model.safetensors
โโโ config.json
โโโ ...
```
""")
st.markdown("---")
# Model info
st.subheader("๐ Model Information")
# Real-time check model status
is_model_actually_loaded = (
hasattr(self.model_loader, 'classifier_pipeline') and
self.model_loader.classifier_pipeline is not None and
self.model_loader.current_model_type == st.session_state.model_type
)
if is_model_actually_loaded:
model_info = self.model_loader.get_model_info()
st.success(f"**Status:** โ
{model_info['status']}")
st.info(f"**Current Model:** {model_info['model_type'].replace('_', ' ').title()}")
st.info(f"**Device:** {model_info['device']}")
st.info(f"**Categories:** {model_info['num_labels']}")
# Show some model details
with st.expander("๐ Model Details"):
st.write(f"**Model Size:** {model_info['model_size']}")
st.write(f"**Available Categories:**")
categories = model_info.get('categories', [])
if categories:
# Show first 10 categories
display_categories = categories[:10]
st.write(", ".join(display_categories))
if len(categories) > 10:
st.write(f"... and {len(categories) - 10} more categories")
else:
st.write("Categories not available")
else:
st.info(f"""
**Current Model:** {model_options[st.session_state.model_type]}
**Architecture:** XLM-RoBERTa Base
**Max Length:** 256 tokens
**Languages:** Multilingual (ID, EN, etc.)
**Status:** โณ Not loaded (will load on first use)
""")
# Show loading hint
if not st.session_state.model_loaded:
st.info("๐ก Model will be loaded automatically when you analyze text.")
st.markdown("---")
# Global reset button
st.subheader("๐ Reset Application")
if st.button("๐งน Clear All & Reset Models", use_container_width=True, type="secondary"):
# Clear all session states
for key in list(st.session_state.keys()):
if key.startswith(('model_', 'predictions_', 'last_', 'current_', 'batch_')):
del st.session_state[key]
# Reinitialize essential states
st.session_state.model_type = 'cross_entropy'
st.session_state.model_loaded = False
st.session_state.predictions_history = []
st.session_state.last_analyzed_text = ""
st.session_state.current_results = None
st.session_state.batch_results = None
# Clear model loader state
self.model_loader.model = None
self.model_loader.tokenizer = None
self.model_loader.label_mappings = None
self.model_loader.classifier_pipeline = None
self.model_loader.current_model_type = None
# Clear cache
st.cache_resource.clear()
st.success("โ
Application reset complete!")
st.rerun()
st.markdown("---")
# Prediction history
if st.session_state.predictions_history:
st.subheader("๐ Recent Predictions")
for i, pred in enumerate(st.session_state.predictions_history[-3:]):
with st.expander(f"Prediction {len(st.session_state.predictions_history) - i}"):
st.write(f"**Text:** {pred['text'][:100]}...")
st.write(f"**Category:** {pred['category']}")
st.write(f"**Confidence:** {pred['confidence']:.2%}")
def predict_single_text(self, text: str) -> Dict:
"""Predict single text with timing"""
start_time = time.time()
# Preprocess text
cleaned_text = self.text_preprocessor.clean_text(text)
# Force reload if model type changed or model not available
force_reload = (
not st.session_state.model_loaded or
self.model_loader.current_model_type != st.session_state.model_type or
self.model_loader.classifier_pipeline is None
)
# Load model if needed
try:
if force_reload:
with st.spinner("Loading model..."):
# Clear existing model first
self.model_loader.model = None
self.model_loader.tokenizer = None
self.model_loader.label_mappings = None
self.model_loader.classifier_pipeline = None
self.model_loader.current_model_type = None
# Load fresh model
self.model_loader.load_model(st.session_state.model_type)
# Update session state explicitly
st.session_state.model_loaded = True
except Exception as e:
st.error(f"Failed to load model: {str(e)}")
return {
'predicted_category': 'Error: Model Loading Failed',
'confidence': 0.0,
'predicted_id': -1,
'all_predictions': {'Error': 1.0},
'processing_time': 0.0,
'original_text': text,
'cleaned_text': cleaned_text
}
# Make prediction
try:
result = self.model_loader.predict(cleaned_text)
except Exception as e:
st.error(f"Failed to make prediction: {str(e)}")
return {
'predicted_category': 'Error: Prediction Failed',
'confidence': 0.0,
'predicted_id': -1,
'all_predictions': {'Error': 1.0},
'processing_time': 0.0,
'original_text': text,
'cleaned_text': cleaned_text
}
processing_time = time.time() - start_time
result['processing_time'] = processing_time
result['original_text'] = text
result['cleaned_text'] = cleaned_text
return result
def predict_batch_texts(self, texts: List[str]) -> List[Dict]:
"""Predict batch of texts"""
# Force reload if model type changed or model not available
force_reload = (
not st.session_state.model_loaded or
self.model_loader.current_model_type != st.session_state.model_type or
self.model_loader.classifier_pipeline is None
)
# Load model once for batch
try:
if force_reload:
with st.spinner("Loading model for batch processing..."):
# Clear existing model first
self.model_loader.model = None
self.model_loader.tokenizer = None
self.model_loader.label_mappings = None
self.model_loader.classifier_pipeline = None
self.model_loader.current_model_type = None
# Load fresh model
self.model_loader.load_model(st.session_state.model_type)
# Update session state explicitly
st.session_state.model_loaded = True
except Exception as e:
st.error(f"Failed to load model for batch processing: {str(e)}")
# Return error results for all texts
error_result = {
'predicted_category': 'Error: Model Loading Failed',
'confidence': 0.0,
'predicted_id': -1,
'all_predictions': {'Error': 1.0}
}
return [error_result] * len(texts)
results = []
progress_bar = st.progress(0)
for i, text in enumerate(texts):
try:
# Preprocess
cleaned_text = self.text_preprocessor.clean_text(text)
# Predict
result = self.model_loader.predict(cleaned_text)
result['original_text'] = text
result['cleaned_text'] = cleaned_text
results.append(result)
except Exception as e:
st.warning(f"Failed to process text {i+1}: {str(e)}")
# Add error result for this specific text
error_result = {
'predicted_category': 'Error: Prediction Failed',
'confidence': 0.0,
'predicted_id': -1,
'all_predictions': {'Error': 1.0},
'original_text': text,
'cleaned_text': self.text_preprocessor.clean_text(text)
}
results.append(error_result)
# Update progress
progress_bar.progress((i + 1) / len(texts))
return results
def render_single_text_tab(self):
"""Render single text analysis tab"""
st.header("๐ Single Text Analysis")
# Show current model status at top
is_model_loaded = (
hasattr(self.model_loader, 'classifier_pipeline') and
self.model_loader.classifier_pipeline is not None and
self.model_loader.current_model_type == st.session_state.model_type
)
if is_model_loaded:
st.success(f"๐ฏ Current Model: **{st.session_state.model_type.replace('_', ' ').title()} - READY**")
else:
st.info(f"โณ Current Model: **{st.session_state.model_type.replace('_', ' ').title()} - Will load on first use**")
# Text input
user_text = st.text_area(
"Masukkan teks keluhan masyarakat:",
height=150,
placeholder="Contoh: Saya ingin melaporkan jalan rusak di daerah saya yang sudah lama tidak diperbaiki...",
key="main_text_input"
)
# Analysis button
col1, col2, col3, col4 = st.columns([2, 1, 1, 2])
with col2:
analyze_button = st.button(
"๐ Analyze Text",
type="primary",
use_container_width=True
)
with col3:
clear_button = st.button(
"๐งน Clear",
type="secondary",
use_container_width=True,
help="Clear results and reset model state"
)
if clear_button:
# Clear all states
st.session_state.model_loaded = False
st.session_state.predictions_history = []
# Clear model loader state
self.model_loader.model = None
self.model_loader.tokenizer = None
self.model_loader.label_mappings = None
self.model_loader.classifier_pipeline = None
self.model_loader.current_model_type = None
# Clear cache
st.cache_resource.clear()
st.success("โ
Cleared all states and model cache!")
st.rerun()
if 'last_analyzed_text' not in st.session_state:
st.session_state.last_analyzed_text = ""
if 'current_results' not in st.session_state:
st.session_state.current_results = None
# Check if text has changed since last analysis
text_changed = user_text.strip() != st.session_state.last_analyzed_text
if clear_button:
# Clear all states
st.session_state.model_loaded = False
st.session_state.predictions_history = []
st.session_state.last_analyzed_text = ""
st.session_state.current_results = None
# Clear model loader state
self.model_loader.model = None
self.model_loader.tokenizer = None
self.model_loader.label_mappings = None
self.model_loader.classifier_pipeline = None
self.model_loader.current_model_type = None
# Clear cache
st.cache_resource.clear()
st.success("โ
Cleared all states and model cache!")
st.rerun()
if analyze_button and user_text.strip():
try:
with st.spinner("Analyzing text..."):
result = self.predict_single_text(user_text)
# Store in history and session state
st.session_state.predictions_history.append({
'text': user_text,
'category': result['predicted_category'],
'confidence': result['confidence']
})
st.session_state.last_analyzed_text = user_text.strip()
st.session_state.current_results = result
# Display results
self.display_single_prediction_results(result)
except Exception as e:
st.error(f"โ Error during analysis: {str(e)}")
st.info("๐ก Try clicking the 'Clear' button to reset the model state.")
elif analyze_button and not user_text.strip():
st.warning("โ ๏ธ Please enter some text to analyze!")
# Display previous results if available and text hasn't changed
elif st.session_state.current_results and not text_changed and not analyze_button:
st.info("๐ Showing previous analysis results. Click 'Analyze Text' to update or 'Clear' to reset.")
self.display_single_prediction_results(st.session_state.current_results)
# Show hint if text has changed
elif text_changed and st.session_state.current_results:
st.info("โ๏ธ Text has been modified. Click 'Analyze Text' to get new predictions or 'Clear' to reset.")
def display_single_prediction_results(self, result: Dict):
"""Display single prediction results"""
st.markdown("## ๐ Analysis Results")
# Main prediction container
st.markdown(f"""