Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import torch | |
| import torch.nn.functional as F | |
| from transformers import RobertaTokenizerFast, RobertaForSequenceClassification | |
| from sklearn.preprocessing import LabelEncoder | |
| import numpy as np | |
| # Load tokenizer dan model | |
| # Load model directly | |
| tokenizer = RobertaTokenizerFast.from_pretrained("FadQ/results") | |
| model = RobertaForSequenceClassification.from_pretrained("FadQ/results") | |
| model.eval() | |
| # Label Ekman (urutan harus cocok dengan urutan training) | |
| ekman_labels = ['anger', 'disgust', 'fear', 'joy', 'sadness', 'surprise', 'neutral'] | |
| label_encoder = LabelEncoder() | |
| label_encoder.fit(ekman_labels) | |
| # Fungsi prediksi rata-rata emosi per baris | |
| def predict_emotion_distribution(text): | |
| lines = [line.strip() for line in text.split('\n') if line.strip()] | |
| all_probs = [] | |
| for line in lines: | |
| inputs = tokenizer(line, return_tensors="pt", truncation=True, padding=True, max_length=128) | |
| with torch.no_grad(): | |
| logits = model(**inputs).logits | |
| probs = F.softmax(logits, dim=-1).squeeze().cpu().numpy() | |
| all_probs.append(probs) | |
| if not all_probs: | |
| return {label: 0.0 for label in ekman_labels} | |
| avg_probs = np.mean(all_probs, axis=0) | |
| result = {label: float(np.round(prob, 4)) for label, prob in zip(ekman_labels, avg_probs)} | |
| return result | |
| # Gradio Interface | |
| interface = gr.Interface( | |
| fn=predict_emotion_distribution, | |
| inputs=gr.Textbox(lines=10, placeholder="Tulis diary-mu. Setiap baris = 1 kalimat...", label="Catatan Harian"), | |
| outputs=gr.Label(num_top_classes=7, label="Distribusi Emosi"), | |
| title="Prediksi Emosi dari Diary Harian", | |
| description="Model mendeteksi emosi (7 label Ekman) dari teks harian. Input dipisah per baris. Output adalah rata-rata probabilitas per emosi." | |
| ) | |
| interface.launch() | |