Tokyosaurus's picture
Upload app.py
f83ba0a verified
"""
Taglish Gaslighting Detection - Interactive App
================================================
Sequential pipeline: Binary Detection -> Tactic Identification
Models trained on Philippine political Reddit discourse (Taglish).
Dataset: 928 annotated samples (IAA kappa = 0.81 binary / kappa = 0.86 tactic)
Split: 70 / 15 / 15 (train / val / test)
Usage (Hugging Face Spaces):
Upload this file as app.py in your Space.
Models are loaded directly from Hugging Face Hub.
"""
import os
import traceback
import gradio as gr
import numpy as np
import pandas as pd
import torch
from transformers import AutoModelForSequenceClassification, AutoTokenizer
# ---------------------------------------------------------------------------
# CONFIGURATION - all performance numbers sourced from training_summary.csv
# (train_model_v6.py run - MCC removed, confusion matrix added)
# ---------------------------------------------------------------------------
# !! UPDATE THIS to your Hugging Face username !!
HF_USERNAME = "Tokyosaurus"
# Model version - points to v2 repos (balanced + sentence-extracted dataset)
MODEL_VERSION = "v2"
class Config:
# Model registry
# Models are loaded directly from Hugging Face Hub.
# Repo IDs follow the pattern: {HF_USERNAME}/{repo_name}
MODELS = {
"roberta-tagalog": {
"display": "RoBERTa-Tagalog",
"description": "",
"binary_repo": f"{HF_USERNAME}/taglish-roberta-binary-v2",
"tactic_repo": f"{HF_USERNAME}/taglish-roberta-tactic-v2",
"performance": {
# Binary - validation
"val_binary_macro_f1": 0.8460,
"val_binary_gas_p": 0.8788,
"val_binary_gas_r": 0.8056,
"val_binary_gas_f1": 0.8406,
"val_binary_roc_auc": 0.8983,
# Binary - test in-domain
"test_binary_macro_f1": 0.7758,
"test_binary_gas_p": 0.8033,
"test_binary_gas_r": 0.7206,
"test_binary_gas_f1": 0.7597,
"test_binary_roc_auc": 0.8832,
# Tactic - validation
"val_tactic_macro_f1": 0.5984,
"val_tactic_f1_dd": 0.7000,
"val_tactic_f1_tm": 0.2424,
"val_tactic_f1_ci": 0.9000,
"val_tactic_f1_ki": 0.3636,
# Tactic - test in-domain
"test_tactic_macro_f1": 0.6111,
"test_tactic_f1_dd": 0.6250,
"test_tactic_f1_tm": 0.4615,
"test_tactic_f1_ci": 0.8889,
"test_tactic_f1_ki": 0.3077,
},
},
"mbert": {
"display": "mBERT",
"description": "",
"binary_repo": f"{HF_USERNAME}/taglish-mbert-binary-v2",
"tactic_repo": f"{HF_USERNAME}/taglish-mbert-tactic-v2",
"performance": {
# Binary - validation
"val_binary_macro_f1": 0.8460,
"val_binary_gas_p": 0.8788,
"val_binary_gas_r": 0.8056,
"val_binary_gas_f1": 0.8406,
"val_binary_roc_auc": 0.9072,
# Binary - test in-domain
"test_binary_macro_f1": 0.8171,
"test_binary_gas_p": 0.9057,
"test_binary_gas_r": 0.7059,
"test_binary_gas_f1": 0.7934,
"test_binary_roc_auc": 0.9252,
# Tactic - validation
"val_tactic_macro_f1": 0.5670,
"val_tactic_f1_dd": 0.7179,
"val_tactic_f1_tm": 0.3077,
"val_tactic_f1_ci": 0.7826,
"val_tactic_f1_ki": 0.2400,
# Tactic - test in-domain
"test_tactic_macro_f1": 0.4948,
"test_tactic_f1_dd": 0.4848,
"test_tactic_f1_tm": 0.2857,
"test_tactic_f1_ci": 0.6829,
"test_tactic_f1_ki": 0.2308,
},
},
"xlm-roberta": {
"display": "XLM-RoBERTa",
"description": "",
"binary_repo": f"{HF_USERNAME}/taglish-xlm-binary-v2",
"tactic_repo": f"{HF_USERNAME}/taglish-xlm-tactic-v2",
"performance": {
# Binary - validation
"val_binary_macro_f1": 0.8252,
"val_binary_gas_p": 0.8310,
"val_binary_gas_r": 0.8194,
"val_binary_gas_f1": 0.8252,
"val_binary_roc_auc": 0.8891,
# Binary - test in-domain
"test_binary_macro_f1": 0.7828,
"test_binary_gas_p": 0.8167,
"test_binary_gas_r": 0.7206,
"test_binary_gas_f1": 0.7656,
"test_binary_roc_auc": 0.8642,
# Tactic - validation
"val_tactic_macro_f1": 0.5042,
"val_tactic_f1_dd": 0.6977,
"val_tactic_f1_tm": 0.1818,
"val_tactic_f1_ci": 0.6538,
"val_tactic_f1_ki": 0.1905,
# Tactic - test in-domain
"test_tactic_macro_f1": 0.4673,
"test_tactic_f1_dd": 0.5405,
"test_tactic_f1_tm": 0.1000,
"test_tactic_f1_ci": 0.6522,
"test_tactic_f1_ki": 0.2727,
},
},
}
# Labels
BINARY_LABELS = {0: "Non-Gaslighting", 1: "Gaslighting"}
# Tactic model outputs 5 classes (0 = Non-Gaslighting, 1-4 = tactics).
# For display we only show the tactic name when class > 0.
TACTIC_LABELS = {
0: "Non-Gaslighting",
1: "Distortion & Denial",
2: "Trivialization & Minimization",
3: "Coercion & Intimidation",
4: "Knowledge Invalidation",
}
TACTIC_DESCRIPTIONS = {
1: "**Distortion & Denial** - Rewrites or denies documented facts, reshapes past events "
"to alter how they are perceived.",
2: "**Trivialization & Minimization** - Downplays or mocks concerns, frames them as "
"insignificant, exaggerated, or emotionally irrational.",
3: "**Coercion & Intimidation** - Pressures, threatens, or silences through fear, "
"aggression, name-calling, or social dominance.",
4: "**Knowledge Invalidation** - Attacks cognitive capacity specifically; implies the "
"target is incapable of understanding or making valid judgments.",
}
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
MAX_LENGTH = 128
# ---------------------------------------------------------------------------
# MODEL CACHE - loads from Hugging Face Hub
# ---------------------------------------------------------------------------
class ModelCache:
def __init__(self):
self._cache: dict = {}
def load(self, model_key: str) -> dict:
if model_key in self._cache:
return self._cache[model_key]
info = Config.MODELS[model_key]
print(f" Loading {info['display']} from Hugging Face Hub ...")
def _load(repo_id):
print(f" Fetching: {repo_id}")
tok = AutoTokenizer.from_pretrained(repo_id)
model = AutoModelForSequenceClassification.from_pretrained(repo_id)
model.to(Config.DEVICE).eval()
return tok, model
try:
b_tok, b_model = _load(info["binary_repo"])
t_tok, t_model = _load(info["tactic_repo"])
except Exception as e:
raise RuntimeError(
f"Failed to load {info['display']} from Hub.\n"
f"Binary repo: {info['binary_repo']}\n"
f"Tactic repo: {info['tactic_repo']}\n"
f"Error: {e}"
)
entry = {
"binary": {"tokenizer": b_tok, "model": b_model},
"tactic": {"tokenizer": t_tok, "model": t_model},
"info": info,
}
self._cache[model_key] = entry
print(f" {info['display']} ready")
return entry
_cache = ModelCache()
# ---------------------------------------------------------------------------
# INFERENCE HELPERS
# ---------------------------------------------------------------------------
def _infer(tokenizer, model, text: str):
"""Tokenize, run model, return (probs_np, pred_int, confidence_float)."""
enc = tokenizer(
text,
truncation=True,
max_length=Config.MAX_LENGTH,
padding=True,
return_tensors="pt",
)
enc = {k: v.to(Config.DEVICE) for k, v in enc.items()}
# Strip token_type_ids for models that do not use them
enc.pop("token_type_ids", None)
with torch.no_grad():
logits = model(**enc).logits
probs = torch.softmax(logits, dim=-1)[0].cpu().numpy()
pred = int(np.argmax(probs))
return probs, pred, float(probs[pred])
# ---------------------------------------------------------------------------
# SINGLE TEXT PREDICTION
# ---------------------------------------------------------------------------
def predict_sequential(text: str, model_key: str):
if not text or not text.strip():
return "Please enter some text to analyze.", None, None
try:
models = _cache.load(model_key)
info = models["info"]
perf = info["performance"]
# Step 1: Binary
b_probs, b_pred, b_conf = _infer(
models["binary"]["tokenizer"],
models["binary"]["model"],
text,
)
is_gas = b_pred == 1
binary_label = Config.BINARY_LABELS[b_pred]
binary_prob_dict = {
"Non-Gaslighting": float(b_probs[0]),
"Gaslighting": float(b_probs[1]),
}
# Step 2: Tactic (only if gaslighting)
tactic_section = ""
# Zeroed probabilities prevent Gradio UI crashes when text is non-GL
tactic_prob_dict = {
"Distortion & Denial": 0.0,
"Trivialization & Minimization": 0.0,
"Coercion & Intimidation": 0.0,
"Knowledge Invalidation": 0.0,
}
if is_gas:
t_probs, t_pred, t_conf = _infer(
models["tactic"]["tokenizer"],
models["tactic"]["model"],
text,
)
tactic_label = Config.TACTIC_LABELS[t_pred]
tactic_desc = Config.TACTIC_DESCRIPTIONS.get(
t_pred, "_No description available._"
)
# Only expose tactic probabilities for the 4 gaslighting classes
tactic_prob_dict = {
Config.TACTIC_LABELS[i]: float(t_probs[i])
for i in range(1, 5)
}
tactic_section = f"""
### Tactic: {tactic_label}
**Confidence:** {t_conf:.1%}
{tactic_desc}
"""
else:
tactic_section = (
"_No tactic classification - text is Non-Gaslighting._"
)
# Format result card
result = f"""
# Result: {binary_label}
**Binary Confidence:** {b_conf:.1%}
---
{tactic_section}
---
## Model: {info['display']}
| | Validation | Test (In-Domain) |
|---|---|---|
| **Binary Macro-F1** | {perf['val_binary_macro_f1']:.4f} | {perf['test_binary_macro_f1']:.4f} |
| **Gas. Precision / Recall / F1** | {perf['val_binary_gas_p']:.3f} / {perf['val_binary_gas_r']:.3f} / {perf['val_binary_gas_f1']:.3f} | {perf['test_binary_gas_p']:.3f} / {perf['test_binary_gas_r']:.3f} / {perf['test_binary_gas_f1']:.3f} |
| **Binary ROC-AUC** | {perf['val_binary_roc_auc']:.4f} | {perf['test_binary_roc_auc']:.4f} |
| **Tactic Macro-F1** | {perf['val_tactic_macro_f1']:.4f} | {perf['test_tactic_macro_f1']:.4f} |
| **F1: D&D / T&M / C&I / KI** | {perf['val_tactic_f1_dd']:.3f} / {perf['val_tactic_f1_tm']:.3f} / {perf['val_tactic_f1_ci']:.3f} / {perf['val_tactic_f1_ki']:.3f} | {perf['test_tactic_f1_dd']:.3f} / {perf['test_tactic_f1_tm']:.3f} / {perf['test_tactic_f1_ci']:.3f} / {perf['test_tactic_f1_ki']:.3f} |
"""
return result, binary_prob_dict, tactic_prob_dict
except Exception as e:
return f"Error: {e}\n\n{traceback.format_exc()}", None, None
# ---------------------------------------------------------------------------
# BATCH PREDICTION
# ---------------------------------------------------------------------------
def batch_predict(file, model_key: str):
try:
df = pd.read_csv(file.name)
if "sentence" not in df.columns:
return pd.DataFrame({"Error": ["CSV must contain a 'sentence' column"]})
models = _cache.load(model_key)
b_tok, b_mod = models["binary"]["tokenizer"], models["binary"]["model"]
t_tok, t_mod = models["tactic"]["tokenizer"], models["tactic"]["model"]
b_labels, b_confs = [], []
t_labels, t_confs = [], []
for text in df["sentence"].astype(str):
b_probs, b_pred, b_conf = _infer(b_tok, b_mod, text)
b_labels.append(Config.BINARY_LABELS[b_pred])
b_confs.append(f"{b_conf:.1%}")
if b_pred == 1:
t_probs, t_pred, t_conf = _infer(t_tok, t_mod, text)
t_labels.append(Config.TACTIC_LABELS[t_pred])
t_confs.append(f"{t_conf:.1%}")
else:
t_labels.append("N/A")
t_confs.append("N/A")
df["binary_prediction"] = b_labels
df["binary_confidence"] = b_confs
df["tactic_prediction"] = t_labels
df["tactic_confidence"] = t_confs
return df
except Exception as e:
return pd.DataFrame({"Error": [str(e)], "Traceback": [traceback.format_exc()]})
# ---------------------------------------------------------------------------
# EXAMPLE TEXTS (political Taglish, aligned with training domain)
# ---------------------------------------------------------------------------
EXAMPLES = {
"Distortion & Denial - History rewrite":
"Hindi totoo na may human rights abuses nung Martial Law, gawa-gawa lang yan ng mga kalaban nila.",
"Trivialization & Minimization - Downplay":
"Ang arte niyo naman sa transport strike. Konting lakad lang, nagrereklamo na kayo agad na para kayong pinahirapan ng todo",
"Coercion & Intimidation - Scaring":
"Kung patuloy mong babatikosin ang gobyerno, wag kang magtaka kung may kumatok sa bahay mo isang gabi. Mag-ingat ka sa mga pino-post mo.",
"Knowledge Invalidation - Attacking intellect":
"Ang tanga mo naman mag-analisa ng economic data. Hindi ka economist, hindi mo kaya ang intindihin ang mga numero kahit ipaliwanag pa namin.",
"Non-Gaslighting - Critique":
"I respectfully disagree with this policy. Based on COA findings, the budget allocation lacks transparency and proper documentation.",
"Non-Gaslighting - Questioning":
"Can you provide a reliable source for that claim about the sudden increase in the budget? Gusto ko lang sana mabasa yung full context nung report.",
}
# ---------------------------------------------------------------------------
# MODEL PROFILES
# ---------------------------------------------------------------------------
def _model_profile(model_key: str):
strengths = {
"roberta-tagalog": (
"Best tactic Macro-F1 (0.6111) and highest C&I detection (0.8889). "
"Strong ROC-AUC (0.8832) - good probabilistic calibration. "
"Monolingual Filipino pretraining captures Taglish political nuances."
),
"mbert": (
"Best binary Gas-F1 (0.7934) and Macro-F1 (0.8171) on test. "
"Highest ROC-AUC (0.9252) - strongest probabilistic discrimination. "
"Very high precision (0.9057) - fewest false positives."
),
"xlm-roberta": (
"Most balanced binary precision/recall (0.8167/0.7206). "
"Best D&D detection among all models (0.5405). "
"SentencePiece tokenizer handles Tagalog morphology well."
),
}
limitations = {
"roberta-tagalog": (
"T&M remains hard (0.4615 test). "
"Lower binary test Macro-F1 (0.7758) than mBERT. "
"Most domain-specific - may struggle outside political discourse."
),
"mbert": (
"Precision-biased (P=0.9057 > R=0.7059) - misses more gaslighting. "
"Weakest tactic Macro-F1 (0.4948). "
"WordPiece oversegments Tagalog tokens."
),
"xlm-roberta": (
"Lowest tactic Macro-F1 (0.4673). T&M F1 = 0.1000 on test - near-random. "
"Lowest binary ROC-AUC (0.8642). "
"Over-sensitive to domain shift."
),
}
return strengths.get(model_key, ""), limitations.get(model_key, "")
# ---------------------------------------------------------------------------
# GRADIO INTERFACE
# ---------------------------------------------------------------------------
def _model_choices():
return [
(f"{Config.MODELS[k]['display']} - {Config.MODELS[k]['description']}", k)
for k in Config.MODELS
]
def create_interface():
css = """
.prediction-output {
font-size: 15px !important;
line-height: 1.8 !important;
padding: 24px !important;
margin-top: 8px !important;
border: 1px solid var(--block-border-color) !important;
border-radius: 10px !important;
background-color: var(--block-background-fill) !important;
color: var(--body-text-color) !important;
}
.gradio-container { max-width: 1400px; margin: auto; }
.model-card {
border: 1px solid var(--block-border-color);
border-radius: 8px;
padding: 15px;
margin: 10px 0;
background-color: var(--block-background-fill);
}
"""
with gr.Blocks(title="Taglish Gaslighting Detector", theme=gr.themes.Soft(), css=css) as app:
gr.Markdown("""
# Taglish Political Gaslighting Detection
**Sequential Pipeline: Binary Detection -> Tactic Identification**
Trained on Philippine political Reddit discourse (r/Philippines, r/PhilippinesPolitics, r/31MillionRegrets).
Dataset: **944 gold-standard samples** - Purely human-annotated - Balanced tactic classes (118 per tactic) - Key-sentence extracted
""")
with gr.Tabs():
# Analyze Text tab
with gr.Tab("Analyze Text"):
gr.Markdown("### Analyze a single Taglish post")
with gr.Row():
with gr.Column(scale=1):
text_input = gr.Textbox(
label="Input text",
placeholder="Paste Tagalog / Taglish text here ...",
lines=6,
)
model_dd = gr.Dropdown(
choices=_model_choices(),
value="roberta-tagalog",
label="Model",
info="RoBERTa-Tagalog is recommended for political Taglish.",
)
analyze_btn = gr.Button("Analyze", variant="primary", size="lg")
gr.Markdown("""
**Model guide (v2 - balanced dataset)**
- **mBERT** - best binary Gas-F1 (0.7934) and Macro-F1 (0.8171); ROC-AUC 0.9252
- **RoBERTa-Tagalog** - best tactic Macro-F1 (0.6111); Gas-F1 0.7597; ROC-AUC 0.8832
- **XLM-RoBERTa** - balanced binary precision/recall (0.8167/0.7206); ROC-AUC 0.8642
""")
with gr.Column(scale=2):
pred_output = gr.Markdown(
label="Result",
elem_classes=["prediction-output"],
)
with gr.Row():
binary_plot = gr.Label(
label="Binary probabilities",
num_top_classes=2,
)
tactic_plot = gr.Label(
label="Tactic probabilities (if gaslighting)",
num_top_classes=4,
)
gr.Markdown("### Quick examples")
example_rows = [
list(EXAMPLES.keys())[:3],
list(EXAMPLES.keys())[3:],
]
for row_keys in example_rows:
with gr.Row():
for name in row_keys:
gr.Button(name, size="sm").click(
fn=lambda n=name: EXAMPLES[n],
inputs=None,
outputs=text_input,
)
analyze_btn.click(
fn=predict_sequential,
inputs=[text_input, model_dd],
outputs=[pred_output, binary_plot, tactic_plot],
)
# Batch Processing tab
with gr.Tab("Batch Processing"):
gr.Markdown("""
### Process multiple texts from a CSV file
Upload a CSV with a `sentence` column.
The pipeline runs binary classification on every row,
then tactic classification on rows flagged as gaslighting.
""")
with gr.Row():
with gr.Column():
file_input = gr.File(label="Upload CSV", file_types=[".csv"])
batch_model = gr.Dropdown(
choices=_model_choices(),
value="roberta-tagalog",
label="Model",
)
process_btn = gr.Button("Process", variant="primary", size="lg")
gr.Markdown("""
**Required CSV format**
```
sentence
"First text here"
"Second text here"
```
""")
with gr.Column():
batch_output = gr.Dataframe(
label="Results preview",
wrap=True,
interactive=False,
)
download_btn = gr.File(label="Download results")
def process_and_save(file, model):
if file is None:
return pd.DataFrame({"Error": ["Please upload a CSV file"]}), None
results = batch_predict(file, model)
out_path = "batch_predictions.csv"
results.to_csv(out_path, index=False, encoding="utf-8-sig")
return results, out_path
process_btn.click(
fn=process_and_save,
inputs=[file_input, batch_model],
outputs=[batch_output, download_btn],
)
# Model Performance tab
with gr.Tab("Model Performance"):
gr.Markdown("## Evaluation Results *(in-domain test set - train_model_v6.py)*")
# Binary table
gr.Markdown("### Binary Classification")
binary_rows = []
for k, info in Config.MODELS.items():
p = info["performance"]
binary_rows.append({
"Model": info["display"],
"Val Macro-F1": f"{p['val_binary_macro_f1']:.4f}",
"Test Gas-P": f"{p['test_binary_gas_p']:.4f}",
"Test Gas-R": f"{p['test_binary_gas_r']:.4f}",
"Test Gas-F1": f"{p['test_binary_gas_f1']:.4f}",
"Test Macro-F1": f"{p['test_binary_macro_f1']:.4f}",
"Test ROC-AUC": f"{p['test_binary_roc_auc']:.4f}",
})
gr.Dataframe(pd.DataFrame(binary_rows), wrap=True)
gr.Markdown("""
> Confusion matrices (2x2 per model) are saved as
> `confusion_matrix_test_id.csv` under each model's output folder.
**Key findings - Binary:**
- **mBERT** achieves the best binary Gas-F1 (0.7934) and Macro-F1 (0.8171) on the test set
- **mBERT** also leads on ROC-AUC (0.9252) - strongest probabilistic discrimination
- **mBERT** is heavily precision-biased (P=0.9057 > R=0.7059) - fewest false positives but misses more true positives
- **RoBERTa-Tagalog** and **XLM-RoBERTa** share the same recall (0.7206) with different precision profiles
- **XLM-RoBERTa** has the most balanced binary precision/recall (0.8167 / 0.7206)
- All 3 models exceed ROC-AUC 0.86 - all are strong probabilistic classifiers
""")
gr.Markdown("---")
# Tactic table
gr.Markdown("### Tactic Classification (5-class: Non-Gas + 4 tactics)")
tactic_rows = []
for k, info in Config.MODELS.items():
p = info["performance"]
tactic_rows.append({
"Model": info["display"],
"Val Macro-F1": f"{p['val_tactic_macro_f1']:.4f}",
"Test Macro-F1": f"{p['test_tactic_macro_f1']:.4f}",
"Test F1 D&D": f"{p['test_tactic_f1_dd']:.4f}",
"Test F1 T&M": f"{p['test_tactic_f1_tm']:.4f}",
"Test F1 C&I": f"{p['test_tactic_f1_ci']:.4f}",
"Test F1 KI": f"{p['test_tactic_f1_ki']:.4f}",
})
gr.Dataframe(pd.DataFrame(tactic_rows), wrap=True)
gr.Markdown("""
> Confusion matrices (5x5 per model) are saved as
> `confusion_matrix_test_id.csv` under each model's output folder.
**Key findings - Tactic:**
- **RoBERTa-Tagalog** leads tactic Macro-F1 (0.6111) - best overall tactic classifier
- **T&M (Trivialization & Minimization) is the hardest tactic** across all models:
RoBERTa 0.4615, mBERT 0.2857, XLM-RoBERTa 0.1000 - sarcasm/dismissal overlaps with normal discourse
- **C&I (Coercion & Intimidation) is the easiest** across all models:
RoBERTa 0.8889, mBERT 0.6829, XLM-RoBERTa 0.6522 - aggressive language is lexically distinctive
- **RoBERTa-Tagalog C&I = 0.8889** - near-perfect detection of intimidation language
- **XLM-RoBERTa T&M = 0.1000** - near-random; sarcasm and dismissal are opaque to cross-lingual models
- **mBERT** has the weakest tactic Macro-F1 (0.4948) despite leading on binary
- Approximately 83 training samples per tactic class remains the primary performance ceiling
**Metric guide (Section 4.5):**
- **Gas-F1** - F1 for the Gaslighting (positive) class; primary binary metric
- **Macro-F1** - equal weight to all classes; primary tactic metric
- **ROC-AUC** - probability calibration; binary only
- **Confusion matrix** - saved per model per test split (binary: 2x2, tactic: 5x5)
""")
gr.Markdown("---")
# Per-model accordions
gr.Markdown("### Per-model full breakdown")
for k, info in Config.MODELS.items():
p = info["performance"]
strengths, limitations = _model_profile(k)
with gr.Accordion(f"{info['display']}", open=False):
gr.Markdown(f"""
**{info['display']}**
**Binary Classification**
| Metric | Validation | Test (In-Domain) |
|--------|-----------|-----------------|
| Macro-F1 | {p['val_binary_macro_f1']:.4f} | {p['test_binary_macro_f1']:.4f} |
| Gas. Precision | {p['val_binary_gas_p']:.4f} | {p['test_binary_gas_p']:.4f} |
| Gas. Recall | {p['val_binary_gas_r']:.4f} | {p['test_binary_gas_r']:.4f} |
| Gas. F1 | {p['val_binary_gas_f1']:.4f} | {p['test_binary_gas_f1']:.4f} |
| ROC-AUC | {p['val_binary_roc_auc']:.4f} | {p['test_binary_roc_auc']:.4f} |
| Confusion matrix | - | confusion_matrix_test_id.csv (2x2) |
**Tactic Classification**
| Metric | Validation | Test (In-Domain) |
|--------|-----------|-----------------|
| Macro-F1 | {p['val_tactic_macro_f1']:.4f} | {p['test_tactic_macro_f1']:.4f} |
| F1 Distortion & Denial | {p['val_tactic_f1_dd']:.4f} | {p['test_tactic_f1_dd']:.4f} |
| F1 Trivialization & Min. | {p['val_tactic_f1_tm']:.4f} | {p['test_tactic_f1_tm']:.4f} |
| F1 Coercion & Intimidation | {p['val_tactic_f1_ci']:.4f} | {p['test_tactic_f1_ci']:.4f} |
| F1 Knowledge Invalidation | {p['val_tactic_f1_ki']:.4f} | {p['test_tactic_f1_ki']:.4f} |
| Confusion matrix | - | confusion_matrix_test_id.csv (5x5) |
**Strengths:** {strengths}
**Limitations:** {limitations}
""")
# Tactics Guide tab
with gr.Tab("Tactics Guide"):
gr.Markdown("## Understanding the 4 Gaslighting Tactics")
with gr.Accordion("Distortion & Denial", open=True):
gr.Markdown("""
**Definition:** Statements that reinterpret, rewrite, or deny reality,
particularly by reshaping past or present events to alter how they are perceived.
**Key linguistic cues:** temporal markers ("dati", "noon", "kanina"),
claims about how events "really" happened, false certainty about another's experience.
**Examples:**
- *"Hindi naman ganyan nangyari dati."*
- *"Walang nangyaring martial law abuses. Propaganda lang yan."*
- *"Binabago mo lang ang mga salita ko para lumabas akong masama."*
""")
with gr.Accordion("Trivialization & Minimization", open=False):
gr.Markdown("""
**Definition:** Statements that downplay or mock concerns, framing them as
insignificant, exaggerated, or emotionally irrational.
**Key linguistic cues:** "OA/arte/joke lang", "di big deal", "move on ka na",
"kalma ka lang".
**Examples:**
- *"Ang liit na bagay, pinapalaki mo."*
- *"Drama mo naman, OA ka talaga."*
- *"Wala namang mangyayari kahit magreklamo ka."*
""")
with gr.Accordion("Coercion & Intimidation", open=False):
gr.Markdown("""
**Definition:** Statements that pressure, threaten, or silence through
fear, aggression, name-calling, or social dominance.
**Key linguistic cues:** demeaning commands ("tumahimik ka na"),
shaming language, bullying phrases intended to suppress speech.
**Note:** Plain blackmail or threats without a gaslighting mechanism
are classified as Non-Gaslighting per the codebook.
**Examples:**
- *"Tumahimik ka na lang, wala kang alam."*
- *"Arte mo, nakakainis ka."*
- *"Mga katulad mo ang dahilan kung bakit hindi makatayo ang Pilipinas."*
""")
with gr.Accordion("Knowledge Invalidation", open=False):
gr.Markdown("""
**Definition:** Statements that attack cognitive capacity specifically -
implying the target is incapable of understanding, reasoning, or making
valid judgments.
**Key linguistic cues:** intelligence insults ("bobo", "tanga",
"mahina umintindi"), claims the person cannot grasp "simple" ideas.
**Examples:**
- *"Hindi mo kasi naiintindihan, ang bobo mo."*
- *"Simple lang yan, di mo pa gets."*
- *"Ang tanga mo naman mag-analisa ng economic data."*
""")
gr.Markdown("""
---
**Decision order (from the annotation codebook):**
1. If threats are central - **Coercion & Intimidation**
2. Else if denial/rewrite drives the move - **Distortion & Denial**
3. Else if downplaying drives the move - **Trivialization & Minimization**
4. Else if cognitive attack is specific - **Knowledge Invalidation**
""")
# About tab
with gr.Tab("About"):
gr.Markdown(f"""
## About This System
### Research Context
Developed as part of a thesis on detecting manipulation in Taglish political discourse.
The system uses a **sequential classification pipeline**:
1. **Binary Classifier** - Gaslighting vs. Non-Gaslighting
2. **Tactic Classifier** - identifies the specific tactic (4 classes + Non-Gaslighting)
### Dataset
| Item | Value |
|------|-------|
| Total annotated rows | 2,134 |
| Inter-annotator kappa (binary) | 0.8133 |
| Inter-annotator kappa (tactic) | 0.8646 |
| Gold-standard rows used | 944 |
| Training / Val / Test split | 662 / 143 / 139 |
| Tactic balance (train) | 83 per tactic class (perfectly balanced) |
| Sentence extraction | Key-sentence heuristic (cue words + position) |
| Sources | r/Philippines, r/PhilippinesPolitics, r/31MillionRegrets |
| Language | Taglish (Tagalog-English code-switching) |
### Best Model Summary (in-domain test set - v2, balanced dataset)
| Task | Best Model | Primary Metric | ROC-AUC | Hardest Tactic |
|------|-----------|---------------|---------|---------------|
| Binary (Gas-F1) | mBERT | Gas-F1 = 0.7934 | 0.9252 | - |
| Binary (ROC-AUC) | mBERT | ROC-AUC = 0.9252 | 0.9252 | - |
| Tactic (Macro-F1) | RoBERTa-Tagalog | Macro-F1 = 0.6111 | - | T&M (F1 = 0.4615) |
### Evaluation Metrics (Section 4.5)
| Task | Metrics |
|------|---------|
| Binary | Gas. Precision, Gas. Recall, Gas. F1, Macro-F1, ROC-AUC, Confusion matrix (2x2) |
| Tactic | Per-class P / R / F1 (D&D, T&M, C&I, KI), Macro-F1, Confusion matrix (5x5) |
Confusion matrices are saved per model under model_outputs/<task>_<model>/confusion_matrix_test_id.csv.
### Technical Details
- Framework: PyTorch + Hugging Face Transformers
- Max sequence length: 128 tokens
- Epochs: up to 8 with early stopping (patience = 3)
- Optimizer: AdamW, lr = 1e-5, warmup ratio = 0.1
- Label smoothing: 0.1
- Checkpoint selection: Gas-F1 (binary), Macro-F1 (tactic)
- Class-weighted cross-entropy loss
- Hardware: {Config.DEVICE.upper()}
### Appropriate Uses
Research on online manipulation, content moderation assistance,
educational tool, supporting human moderators.
### Limitations
Trained on political discourse - performance may degrade on other domains.
Approximately 83 samples per tactic class limits fine-grained detection.
Not a substitute for human judgment. Cultural nuances may be missed.
### Citation
```
Gomez, Tugado (2026). Transformers for Taglish Political Gaslighting:
Binary Detection, Tactic Classification, and Zero-Shot Transfer.
Ateneo De Naga University.
```
---
**Version**: 2.0 - **Updated**: {pd.Timestamp.now().strftime('%B %Y')}
""")
gr.Markdown("""
---
**Disclaimer:** Research prototype. Results should be interpreted carefully and in context.
Always apply human judgment, especially for content moderation decisions.
""")
return app
# ---------------------------------------------------------------------------
# MAIN
# ---------------------------------------------------------------------------
if __name__ == "__main__":
print("\n" + "=" * 70)
print("TAGLISH POLITICAL GASLIGHTING DETECTION - APP")
print("=" * 70)
print(f"Device : {Config.DEVICE}")
print("Models will be loaded from Hugging Face Hub on first use.")
print("\nLaunching Gradio ...\n" + "=" * 70)
app = create_interface()
app.launch()