aliffatulmf's picture
update ui
a072a60
import os
import gradio as gr
import httpx
import numpy as np
import onnxruntime as ort
from scipy.special import softmax
from transformers import AutoTokenizer
from tqdm import tqdm
model_name = os.getenv("MODEL_NAME")
onnx_model_url = os.getenv("ONNX_MODEL_URL")
onnx_model_path = os.getenv("ONNX_MODEL_PATH")
def download_onnx_model():
if not os.path.exists(onnx_model_path):
print("πŸ“₯ Downloading ONNX model...")
try:
with httpx.Client(timeout=300, follow_redirects=True) as client:
# First, get the content length for progress bar
with client.stream("GET", onnx_model_url) as response:
response.raise_for_status()
total_size = int(response.headers.get("content-length", 0))
# Initialize progress bar
progress_bar = tqdm(
total=total_size,
unit="B",
unit_scale=True,
unit_divisor=1024,
desc="Downloading model",
ncols=100,
bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]"
)
# Download with progress tracking
with open(onnx_model_path, "wb") as f:
for chunk in response.iter_bytes(chunk_size=8192):
if chunk:
f.write(chunk)
progress_bar.update(len(chunk))
progress_bar.close()
print(f"βœ… ONNX model downloaded successfully! ({total_size / (1024 * 1024):.2f} MB)")
except Exception as e:
print(f"❌ Error downloading ONNX model: {e}")
return False
else:
print("πŸ“ ONNX model already exists, skipping download.")
return True
try:
if download_onnx_model():
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)
sess_options = ort.SessionOptions()
sess_options.inter_op_num_threads = 2
sess_options.intra_op_num_threads = 2
sess_options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL
sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
ort_session = ort.InferenceSession(
onnx_model_path,
sess_options=sess_options,
providers=[
(
"CPUExecutionProvider",
{
"arena_extend_strategy": "kNextPowerOfTwo",
"enable_cpu_mem_arena": True,
"memory_pattern_optimization": True,
"use_parallel_mode": False,
},
)
],
)
labels = ["NEGATIVE", "POSITIVE"]
model_loaded = True
print("βœ… Model ONNX berhasil dimuat dengan optimasi CPU!")
else:
model_loaded = False
except Exception as e:
print(f"❌ Error loading model: {e}")
model_loaded = False
def preprocess_text_cached(text):
encoded = tokenizer(
text,
truncation=True,
padding="max_length",
max_length=128,
return_tensors="np",
)
return encoded["input_ids"], encoded["attention_mask"]
def preprocess_text(text):
input_ids, attention_mask = preprocess_text_cached(text)
return {"input_ids": input_ids, "attention_mask": attention_mask}
def run_onnx_inference(text):
try:
encoded = preprocess_text(text)
ort_inputs = {
"input_ids": encoded["input_ids"].astype(np.int64),
"attention_mask": encoded["attention_mask"].astype(np.int64),
}
ort_outputs = ort_session.run(None, ort_inputs)
logits = ort_outputs[0]
scores = softmax(logits[0])
return scores
except Exception as e:
raise Exception(f"Error inferensi ONNX: {str(e)}")
def text_classifier(text):
if not model_loaded:
return create_elegant_result("Error", "Model tidak dapat dimuat", 0, 0)
if not text or text.strip() == "":
return create_elegant_result("Warning", "Masukkan teks untuk diklasifikasi", 0, 0)
text = text[:500]
try:
scores = run_onnx_inference(text)
predicted_idx = np.argmax(scores)
predicted_label = labels[predicted_idx]
label_mapping = {
"NEGATIVE": "BUKAN JUDOL",
"POSITIVE": "JUDOL",
}
label_indo = label_mapping.get(predicted_label, predicted_label)
bukan_judol_pct = scores[0] * 100
judol_pct = scores[1] * 100
return create_elegant_result(label_indo, f"Confidence: {scores[predicted_idx] * 100:.1f}%", bukan_judol_pct, judol_pct)
except Exception as e:
return create_elegant_result("Error", f"Classification failed: {str(e)}", 0, 0)
def create_elegant_result(classification, confidence, bukan_judol_pct, judol_pct):
# Determine colors based on classification
if classification == "BUKAN JUDOL":
main_color = "#22c55e" # Green
elif classification == "JUDOL":
main_color = "#ef4444" # Red
else:
main_color = "#6b7280" # Gray for errors
return f"""
<div class="result-container">
<div style="text-align: center; margin-bottom: 20px;">
<h3 style="color: {main_color};
font-size: 24px;
font-weight: bold;
margin: 0 0 8px 0;
text-shadow: 0 1px 2px rgba(0, 0, 0, 0.1);">
{classification}
</h3>
<p class="confidence-text">
{confidence}
</p>
</div>
<div style="margin-top: 20px;">
<div style="margin-bottom: 15px;">
<div style="display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 6px;">
<span style="font-weight: 600;
color: #22c55e;
font-size: 14px;">
🟒 BUKAN JUDOL
</span>
<span style="font-weight: bold;
color: #22c55e;
font-size: 14px;">
{bukan_judol_pct:.1f}%
</span>
</div>
<div class="progress-bar-bg">
<div style="background: linear-gradient(90deg, #22c55e 0%, #16a34a 100%);
height: 100%;
width: {bukan_judol_pct}%;
border-radius: 10px;
transition: width 0.5s ease;">
</div>
</div>
</div>
<div style="margin-bottom: 10px;">
<div style="display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 6px;">
<span style="font-weight: 600;
color: #ef4444;
font-size: 14px;">
πŸ”΄ JUDOL
</span>
<span style="font-weight: bold;
color: #ef4444;
font-size: 14px;">
{judol_pct:.1f}%
</span>
</div>
<div class="progress-bar-bg">
<div style="background: linear-gradient(90deg, #ef4444 0%, #dc2626 100%);
height: 100%;
width: {judol_pct}%;
border-radius: 10px;
transition: width 0.5s ease;">
</div>
</div>
</div>
</div>
</div>
"""
with gr.Blocks(title="Text Classification Panel", theme=gr.themes.Default()) as demo: # Inject custom CSS to limit width
gr.HTML("""
<style>
#input-col, #result-col {
max-width: 800px;
margin-left: auto;
margin-right: auto;
}
#input-textbox, #classify-btn, #result-json, #examples-block {
max-width: 800px;
margin-left: auto;
margin-right: auto;
}
/* Acrylic glassmorphism effect for light mode */
.result-container {
background: rgba(248, 250, 252, 0.7);
backdrop-filter: blur(16px);
-webkit-backdrop-filter: blur(16px);
border-radius: 16px;
padding: 24px;
border: 1px solid rgba(255, 255, 255, 0.2);
box-shadow:
0 8px 32px rgba(0, 0, 0, 0.1),
0 1px 0 rgba(255, 255, 255, 0.5) inset,
0 -1px 0 rgba(0, 0, 0, 0.05) inset;
transition: all 0.3s ease;
}
.confidence-text {
color: rgba(100, 116, 139, 0.9);
font-size: 16px;
margin: 0;
font-weight: 500;
}
.progress-bar-bg {
background: rgba(241, 245, 249, 0.8);
backdrop-filter: blur(8px);
border-radius: 10px;
height: 12px;
overflow: hidden;
box-shadow:
inset 0 2px 4px rgba(0, 0, 0, 0.1),
0 1px 0 rgba(255, 255, 255, 0.3);
}
/* Dark mode styles */
@media (prefers-color-scheme: dark) {
.result-container {
background: rgba(15, 23, 42, 0.7);
border: 1px solid rgba(255, 255, 255, 0.1);
box-shadow:
0 8px 32px rgba(0, 0, 0, 0.3),
0 1px 0 rgba(255, 255, 255, 0.1) inset,
0 -1px 0 rgba(0, 0, 0, 0.2) inset;
}
.confidence-text {
color: rgba(148, 163, 184, 0.9);
}
.progress-bar-bg {
background: rgba(30, 41, 59, 0.8);
box-shadow:
inset 0 2px 4px rgba(0, 0, 0, 0.3),
0 1px 0 rgba(255, 255, 255, 0.1);
}
}
/* Force dark mode for specific Gradio themes */
.dark .result-container,
[data-theme="dark"] .result-container {
background: rgba(15, 23, 42, 0.7);
border: 1px solid rgba(255, 255, 255, 0.1);
box-shadow:
0 8px 32px rgba(0, 0, 0, 0.3),
0 1px 0 rgba(255, 255, 255, 0.1) inset,
0 -1px 0 rgba(0, 0, 0, 0.2) inset;
}
.dark .confidence-text,
[data-theme="dark"] .confidence-text {
color: rgba(148, 163, 184, 0.9);
}
.dark .progress-bar-bg,
[data-theme="dark"] .progress-bar-bg {
background: rgba(30, 41, 59, 0.8);
box-shadow:
inset 0 2px 4px rgba(0, 0, 0, 0.3),
0 1px 0 rgba(255, 255, 255, 0.1);
}
/* Hover effects */
.result-container:hover {
transform: translateY(-2px);
box-shadow:
0 12px 40px rgba(0, 0, 0, 0.15),
0 1px 0 rgba(255, 255, 255, 0.6) inset,
0 -1px 0 rgba(0, 0, 0, 0.1) inset;
}
@media (prefers-color-scheme: dark) {
.result-container:hover {
box-shadow:
0 12px 40px rgba(0, 0, 0, 0.4),
0 1px 0 rgba(255, 255, 255, 0.15) inset,
0 -1px 0 rgba(0, 0, 0, 0.3) inset;
}
}
.dark .result-container:hover,
[data-theme="dark"] .result-container:hover {
box-shadow:
0 12px 40px rgba(0, 0, 0, 0.4),
0 1px 0 rgba(255, 255, 255, 0.15) inset,
0 -1px 0 rgba(0, 0, 0, 0.3) inset;
}
</style>
""")
gr.HTML("""
<div style="text-align: center; margin-bottom: 20px;">
<h1>πŸ” Text Classification</h1>
<p style="font-size: 16px; color: #666;">Klasifikasi teks otomatis untuk deteksi konten judol</p>
</div>
""")
with gr.Row():
with gr.Column(scale=1, min_width=400, elem_id="input-col"):
input_text = gr.Textbox(
label="Input Text",
placeholder="Masukkan teks yang ingin diklasifikasi...",
lines=4,
max_lines=6,
elem_id="input-textbox",
)
btn_classify = gr.Button("Classify", variant="primary", size="lg", elem_id="classify-btn")
# Status indicator
status_text = gr.HTML(f"""
<div style="text-align: center; margin-top: 10px;">
<span style="color: {'green' if model_loaded else 'red'};">
{'🟒 Model Ready' if model_loaded else 'πŸ”΄ Model Error'}
</span>
</div>
""") # Results section
with gr.Row():
with gr.Column(scale=1, min_width=400, elem_id="result-col"):
output_result = gr.HTML(label="Classification Results", elem_id="result-html")
# Quick examples
gr.Examples(
examples=[
["Akhirrrnyaaa bagus konten abang !! luar biasa coba main di ⭐π—ͺπ—˜π—§π—’π—‘πŸ΄πŸ΄"],
["AI : artificial intelligence ❌ AI : Anak Indonesia βœ…"],
["Cakep Kali 🚩⚑ ᴊ α΄… α΄› 0 α΄› 0 ⚑🚩, Bravo.."],
["Pagi-pagi udah dapet notif WD, mood langsung naik seharian"],
["Parah sih, bikin gue ketagihan parah sampe lupa waktu."],
["Teknologi AI berkembang sangat pesat dalam beberapa tahun terakhir"],
],
inputs=input_text,
label="Sample Texts",
elem_id="examples-block"
)
btn_classify.click(fn=text_classifier, inputs=input_text, outputs=output_result)
input_text.submit(fn=text_classifier, inputs=input_text, outputs=output_result)
if __name__ == "__main__":
demo.launch(
server_name="0.0.0.0",
server_port=int(os.environ.get("PORT", 7860)),
share=False,
show_error=True,
pwa=True,
)