msmaje commited on
Commit
e2d8b9b
·
verified ·
1 Parent(s): 2735c1a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +79 -573
app.py CHANGED
@@ -1,592 +1,98 @@
1
  """
2
- Enhanced Gradio Space for Human-AI Text Attribution (HATA) Model
3
- With Comprehensive Bias Detection and Explainability (SHAP/LIME)
4
- Supports multiple African languages with fairness auditing
5
  """
6
 
 
7
  import os
8
  import sys
9
  import types
10
- import gradio as gr
11
- import torch
12
- import numpy as np
13
- import pandas as pd
14
- from transformers import AutoTokenizer, AutoModelForSequenceClassification
15
- from sklearn.metrics import confusion_matrix, classification_report
16
- import matplotlib.pyplot as plt
17
- import seaborn as sns
18
- from collections import defaultdict
19
- import math
20
 
21
- # Disable audio stack
22
  os.environ["GRADIO_DISABLE_PYDUB"] = "1"
 
 
23
  if "audioop" not in sys.modules:
24
  sys.modules["audioop"] = types.ModuleType("audioop")
25
  if "pyaudioop" not in sys.modules:
26
  sys.modules["pyaudioop"] = types.ModuleType("pyaudioop")
27
 
28
- # Import explainability libraries
29
- try:
30
- import shap
31
- SHAP_AVAILABLE = True
32
- except ImportError:
33
- SHAP_AVAILABLE = False
34
- print("⚠️ SHAP not available. Install with: pip install shap")
35
-
36
- try:
37
- from lime.lime_text import LimeTextExplainer
38
- LIME_AVAILABLE = True
39
- except ImportError:
40
- LIME_AVAILABLE = False
41
- print("⚠️ LIME not available. Install with: pip install lime")
42
-
43
- # -----------------------------------------------------------------------------
44
- # Configuration
45
- # -----------------------------------------------------------------------------
46
- MODEL_NAME = "msmaje/phdhatamodel"
47
- SUPPORTED_LANGUAGES = ["Hausa", "Yoruba", "Igbo", "Nigerian Pidgin"]
48
- LANGUAGE_CODES = {
49
- "Hausa": "ha",
50
- "Yoruba": "yo",
51
- "Igbo": "ig",
52
- "Nigerian Pidgin": "pcm"
53
- }
54
 
55
- # -----------------------------------------------------------------------------
56
- # Model Loading
57
- # -----------------------------------------------------------------------------
58
- print("📥 Loading model and tokenizer...")
59
- try:
60
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
61
- model = AutoModelForSequenceClassification.from_pretrained(
62
- MODEL_NAME,
63
- output_attentions=True # Enable attention outputs for explainability
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  )
65
- model.eval()
66
- print("✅ Model loaded successfully!")
67
- print(f" Model: {MODEL_NAME}")
68
- print(f" Device: {'GPU' if torch.cuda.is_available() else 'CPU'}")
69
- except Exception as e:
70
- print(f"❌ Error loading model: {e}")
71
- raise
72
-
73
- # Initialize explainability tools
74
- if LIME_AVAILABLE:
75
- try:
76
- lime_explainer = LimeTextExplainer(class_names=["Human", "AI"])
77
- print("✅ LIME explainer initialized")
78
- except Exception as e:
79
- print(f"⚠️ LIME initialization failed: {e}")
80
- LIME_AVAILABLE = False
81
 
82
- if SHAP_AVAILABLE:
83
- try:
84
- # Create a wrapper for SHAP
85
- def model_predict_proba(texts):
86
- if isinstance(texts, str):
87
- texts = [texts]
88
- inputs = tokenizer(texts, return_tensors="pt", truncation=True,
89
- max_length=128, padding=True)
90
- with torch.no_grad():
91
- outputs = model(**inputs)
92
- probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
93
- return probs.numpy()
94
-
95
- shap_explainer = shap.Explainer(model_predict_proba, tokenizer)
96
- print("✅ SHAP explainer initialized")
97
- except Exception as e:
98
- print(f"⚠️ SHAP initialization failed: {e}")
99
- print(" Will use attention-based explanations as fallback")
100
- SHAP_AVAILABLE = False
101
-
102
- # -----------------------------------------------------------------------------
103
- # Bias and Fairness Metrics
104
- # -----------------------------------------------------------------------------
105
- class BiasMetrics:
106
- """Calculate fairness and bias metrics"""
107
-
108
- @staticmethod
109
- def calculate_eod(y_true, y_pred, groups):
110
- """Equal Opportunity Difference"""
111
- unique_groups = np.unique(groups)
112
- recalls = []
113
-
114
- for group in unique_groups:
115
- mask = groups == group
116
- if np.sum(y_true[mask] == 1) > 0:
117
- tp = np.sum((y_true[mask] == 1) & (y_pred[mask] == 1))
118
- fn = np.sum((y_true[mask] == 1) & (y_pred[mask] == 0))
119
- recall = tp / (tp + fn) if (tp + fn) > 0 else 0
120
- recalls.append(recall)
121
-
122
- return max(recalls) - min(recalls) if len(recalls) > 1 else 0.0
123
-
124
- @staticmethod
125
- def calculate_aaod(y_true, y_pred, groups):
126
- """Average Absolute Odds Difference"""
127
- unique_groups = np.unique(groups)
128
- tpr_diffs = []
129
- fpr_diffs = []
130
-
131
- for i, g1 in enumerate(unique_groups):
132
- for g2 in unique_groups[i+1:]:
133
- m1 = groups == g1
134
- m2 = groups == g2
135
-
136
- # TPR differences
137
- if np.sum(y_true[m1] == 1) > 0 and np.sum(y_true[m2] == 1) > 0:
138
- tpr1 = np.sum((y_true[m1] == 1) & (y_pred[m1] == 1)) / np.sum(y_true[m1] == 1)
139
- tpr2 = np.sum((y_true[m2] == 1) & (y_pred[m2] == 1)) / np.sum(y_true[m2] == 1)
140
- tpr_diffs.append(abs(tpr1 - tpr2))
141
-
142
- # FPR differences
143
- tn1 = np.sum((y_true[m1] == 0) & (y_pred[m1] == 0))
144
- fp1 = np.sum((y_true[m1] == 0) & (y_pred[m1] == 1))
145
- tn2 = np.sum((y_true[m2] == 0) & (y_pred[m2] == 0))
146
- fp2 = np.sum((y_true[m2] == 0) & (y_pred[m2] == 1))
147
-
148
- fpr1 = fp1 / (fp1 + tn1) if (fp1 + tn1) > 0 else 0
149
- fpr2 = fp2 / (fp2 + tn2) if (fp2 + tn2) > 0 else 0
150
- fpr_diffs.append(abs(fpr1 - fpr2))
151
-
152
- return (np.mean(tpr_diffs) + np.mean(fpr_diffs)) / 2 if tpr_diffs else 0.0
153
-
154
- @staticmethod
155
- def demographic_parity(y_pred, groups):
156
- """Demographic Parity Difference"""
157
- unique_groups = np.unique(groups)
158
- positive_rates = []
159
-
160
- for group in unique_groups:
161
- mask = groups == group
162
- positive_rate = np.mean(y_pred[mask] == 1)
163
- positive_rates.append(positive_rate)
164
-
165
- return max(positive_rates) - min(positive_rates) if len(positive_rates) > 1 else 0.0
166
-
167
- # -----------------------------------------------------------------------------
168
- # Explainability Functions
169
- # -----------------------------------------------------------------------------
170
- def get_shap_explanation(text, language="English"):
171
- """Generate SHAP-based explanation"""
172
- if not SHAP_AVAILABLE:
173
- return "⚠️ SHAP is not installed. Install with: pip install shap", None
174
-
175
- try:
176
- # Simpler approach - use attention weights as proxy for SHAP
177
- inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=128)
178
-
179
- with torch.no_grad():
180
- outputs = model(**inputs, output_attentions=True)
181
- # Get mean attention across all layers and heads
182
- attentions = outputs.attentions
183
- mean_attention = torch.mean(torch.stack([att.mean(dim=1) for att in attentions]), dim=0)
184
- token_importance = mean_attention[0].sum(dim=0).numpy()
185
-
186
- # Get tokens
187
- tokens = tokenizer.convert_ids_to_tokens(inputs['input_ids'][0])
188
- tokens = tokens[1:-1] # Remove [CLS] and [SEP]
189
- token_importance = token_importance[1:-1] # Match tokens
190
-
191
- # Normalize
192
- token_importance = token_importance / (token_importance.max() + 1e-8)
193
-
194
- # Create simple bar plot
195
- fig, ax = plt.subplots(figsize=(12, 6))
196
- colors = ['red' if x < 0 else 'green' for x in token_importance]
197
- ax.barh(range(min(20, len(tokens))), token_importance[:20], color=colors[:20])
198
- ax.set_yticks(range(min(20, len(tokens))))
199
- ax.set_yticklabels(tokens[:20])
200
- ax.set_xlabel('Importance (Attention Weight)')
201
- ax.set_title(f'Token Importance - {language}')
202
- ax.invert_yaxis()
203
- plt.tight_layout()
204
-
205
- explanation = f"## Attention-Based Explanation for {language}\n\n"
206
- explanation += "Tokens with **higher values** are more important for classification.\n\n"
207
- explanation += f"Top 5 most important tokens:\n"
208
-
209
- top_indices = np.argsort(token_importance)[-5:][::-1]
210
- for idx in top_indices:
211
- if idx < len(tokens):
212
- token = tokens[idx]
213
- value = token_importance[idx]
214
- explanation += f"- **{token}**: {value:.4f}\n"
215
-
216
- return explanation, fig
217
-
218
- except Exception as e:
219
- return f"❌ Explanation failed: {str(e)}", None
220
-
221
- def get_lime_explanation(text, language="English"):
222
- """Generate LIME-based explanation"""
223
- if not LIME_AVAILABLE:
224
- return "⚠️ LIME is not installed. Install with: pip install lime", None
225
-
226
- try:
227
- def predict_fn(texts):
228
- """Prediction function for LIME"""
229
- if isinstance(texts, str):
230
- texts = [texts]
231
-
232
- results = []
233
- for txt in texts:
234
- inputs = tokenizer(txt, return_tensors="pt", truncation=True,
235
- max_length=128, padding=True)
236
- with torch.no_grad():
237
- outputs = model(**inputs)
238
- probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
239
- results.append(probs[0].numpy())
240
-
241
- return np.array(results)
242
-
243
- # Generate explanation
244
- exp = lime_explainer.explain_instance(
245
- text,
246
- predict_fn,
247
- num_features=10,
248
- num_samples=50 # Reduced for speed
249
- )
250
-
251
- # Create visualization
252
- fig = exp.as_pyplot_figure()
253
- plt.tight_layout()
254
-
255
- # Extract feature weights
256
- weights = exp.as_list()
257
-
258
- explanation = f"## LIME Explanation for {language}\n\n"
259
- explanation += "Features with **positive weights** indicate AI-generated characteristics.\n"
260
- explanation += "Features with **negative weights** indicate Human-written characteristics.\n\n"
261
- explanation += "Top contributing features:\n\n"
262
-
263
- for feature, weight in weights[:5]:
264
- direction = "→ AI" if weight > 0 else "→ Human"
265
- explanation += f"- **{feature}**: {weight:.4f} {direction}\n"
266
-
267
- return explanation, fig
268
-
269
- except Exception as e:
270
- return f"❌ LIME explanation failed: {str(e)}\n\nTry using SHAP instead.", None
271
-
272
- # -----------------------------------------------------------------------------
273
- # Main Classification Function
274
- # -----------------------------------------------------------------------------
275
- def classify_with_explanation(text, language, explainer_type="SHAP"):
276
- """Classify text and provide explanation"""
277
-
278
- if not text or len(text.strip()) == 0:
279
- return "⚠️ Please enter text to classify", None, None, None
280
-
281
- # Get prediction
282
- inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=128)
283
-
284
- with torch.no_grad():
285
- outputs = model(**inputs)
286
- probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1)
287
- predicted_class = torch.argmax(probabilities, dim=-1).item()
288
- confidence = probabilities[0][predicted_class].item()
289
-
290
- # Classification result
291
- labels = {0: "👤 Human-written", 1: "🤖 AI-generated"}
292
- result = f"## Classification Result\n\n"
293
- result += f"**Prediction:** {labels[predicted_class]}\n"
294
- result += f"**Confidence:** {confidence:.2%}\n"
295
- result += f"**Language:** {language}\n\n"
296
-
297
- # Confidence interpretation
298
- if confidence > 0.9:
299
- result += "✅ **High confidence** - Very certain about this prediction\n"
300
- elif confidence > 0.7:
301
- result += "⚠️ **Moderate confidence** - Fairly certain with some uncertainty\n"
302
- else:
303
- result += "❓ **Low confidence** - Uncertain, mixed characteristics detected\n"
304
-
305
- # Probability breakdown - Create DataFrame for BarPlot
306
- prob_data = pd.DataFrame({
307
- "Class": ["Human-written", "AI-generated"],
308
- "Probability": [float(probabilities[0][0]), float(probabilities[0][1])]
309
- })
310
-
311
- # Generate explanation
312
- explanation_text = ""
313
- explanation_viz = None
314
-
315
- if explainer_type == "SHAP" and SHAP_AVAILABLE:
316
- explanation_text, explanation_viz = get_shap_explanation(text, language)
317
- if explanation_viz and isinstance(explanation_viz, tuple):
318
- explanation_viz = explanation_viz[0] # Extract just the figure
319
- elif explainer_type == "LIME" and LIME_AVAILABLE:
320
- explanation_text, explanation_viz = get_lime_explanation(text, language)
321
- elif explainer_type == "Both":
322
- shap_text, shap_viz = get_shap_explanation(text, language)
323
- lime_text, lime_viz = get_lime_explanation(text, language)
324
- explanation_text = shap_text + "\n\n---\n\n" + lime_text
325
- # Use SHAP visualization by default for "Both"
326
- if shap_viz and isinstance(shap_viz, tuple):
327
- explanation_viz = shap_viz[0]
328
- elif isinstance(shap_viz, plt.Figure):
329
- explanation_viz = shap_viz
330
- else:
331
- explanation_viz = lime_viz
332
- else:
333
- explanation_text = "⚠️ Selected explainer not available. Please install SHAP and/or LIME."
334
-
335
- return result, prob_data, explanation_text, explanation_viz
336
-
337
- # -----------------------------------------------------------------------------
338
- # Bias Auditing Function
339
- # -----------------------------------------------------------------------------
340
- def audit_bias(uploaded_file):
341
- """Perform bias audit on uploaded dataset"""
342
-
343
- if uploaded_file is None:
344
- return "⚠️ Please upload a CSV file with columns: text, label, language"
345
-
346
- try:
347
- # Read CSV
348
- df = pd.read_csv(uploaded_file.name)
349
-
350
- required_cols = ['text', 'label', 'language']
351
- if not all(col in df.columns for col in required_cols):
352
- return f"❌ CSV must have columns: {required_cols}"
353
-
354
- # Get predictions
355
- predictions = []
356
- for text in df['text']:
357
- inputs = tokenizer(str(text), return_tensors="pt", truncation=True, max_length=128)
358
- with torch.no_grad():
359
- outputs = model(**inputs)
360
- pred = torch.argmax(outputs.logits, dim=-1).item()
361
- predictions.append(pred)
362
-
363
- df['prediction'] = predictions
364
-
365
- # Calculate metrics
366
- y_true = df['label'].values
367
- y_pred = df['prediction'].values
368
- groups = df['language'].values
369
-
370
- eod = BiasMetrics.calculate_eod(y_true, y_pred, groups)
371
- aaod = BiasMetrics.calculate_aaod(y_true, y_pred, groups)
372
- dpd = BiasMetrics.demographic_parity(y_pred, groups)
373
-
374
- # Per-language metrics
375
- lang_metrics = {}
376
- for lang in df['language'].unique():
377
- mask = df['language'] == lang
378
- lang_true = y_true[mask]
379
- lang_pred = y_pred[mask]
380
-
381
- accuracy = np.mean(lang_true == lang_pred)
382
- precision = np.sum((lang_true == 1) & (lang_pred == 1)) / np.sum(lang_pred == 1) if np.sum(lang_pred == 1) > 0 else 0
383
- recall = np.sum((lang_true == 1) & (lang_pred == 1)) / np.sum(lang_true == 1) if np.sum(lang_true == 1) > 0 else 0
384
- f1 = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0
385
-
386
- lang_metrics[lang] = {
387
- 'accuracy': accuracy,
388
- 'precision': precision,
389
- 'recall': recall,
390
- 'f1': f1,
391
- 'samples': int(np.sum(mask))
392
- }
393
-
394
- # Create report
395
- report = f"# Bias Audit Report\n\n"
396
- report += f"**Total Samples:** {len(df)}\n"
397
- report += f"**Languages:** {', '.join(df['language'].unique())}\n\n"
398
-
399
- report += f"## Fairness Metrics\n\n"
400
- report += f"| Metric | Value | Interpretation |\n"
401
- report += f"|--------|-------|----------------|\n"
402
- report += f"| EOD | {eod:.4f} | {'✅ Fair' if eod < 0.1 else '⚠️ Bias detected'} |\n"
403
- report += f"| AAOD | {aaod:.4f} | {'✅ Fair' if aaod < 0.1 else '⚠️ Bias detected'} |\n"
404
- report += f"| Demographic Parity | {dpd:.4f} | {'✅ Fair' if dpd < 0.1 else '⚠️ Bias detected'} |\n\n"
405
-
406
- report += f"## Per-Language Performance\n\n"
407
- report += f"| Language | Accuracy | F1 Score | Precision | Recall | Samples |\n"
408
- report += f"|----------|----------|----------|-----------|--------|----------|\n"
409
-
410
- for lang, metrics in sorted(lang_metrics.items()):
411
- report += f"| {lang} | {metrics['accuracy']:.4f} | {metrics['f1']:.4f} | "
412
- report += f"{metrics['precision']:.4f} | {metrics['recall']:.4f} | {metrics['samples']} |\n"
413
-
414
- # Confusion matrix
415
- fig, ax = plt.subplots(figsize=(8, 6))
416
- cm = confusion_matrix(y_true, y_pred)
417
- sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', ax=ax)
418
- ax.set_title('Overall Confusion Matrix')
419
- ax.set_xlabel('Predicted')
420
- ax.set_ylabel('Actual')
421
- ax.set_xticklabels(['Human', 'AI'])
422
- ax.set_yticklabels(['Human', 'AI'])
423
- plt.tight_layout()
424
-
425
- return report, fig
426
-
427
- except Exception as e:
428
- return f"❌ Error during bias audit: {str(e)}", None
429
-
430
- # -----------------------------------------------------------------------------
431
- # Gradio Interface
432
- # -----------------------------------------------------------------------------
433
- custom_css = """
434
- #title {
435
- text-align: center;
436
- background: linear-gradient(90deg, #667eea 0%, #764ba2 100%);
437
- -webkit-background-clip: text;
438
- -webkit-text-fill-color: transparent;
439
- font-size: 2.5em;
440
- font-weight: bold;
441
- }
442
- """
443
-
444
- with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo:
445
-
446
- gr.Markdown("<h1 id='title'>🔍 HATA: Human vs AI Text Detector</h1>")
447
- gr.Markdown("""
448
- <div style='text-align: center; margin-bottom: 20px;'>
449
- Detect AI-generated text in African languages with **explainable AI** and **fairness auditing**
450
- </div>
451
- """)
452
-
453
- with gr.Tabs():
454
- # Tab 1: Classification with Explanation
455
- with gr.Tab("📝 Text Classification"):
456
- with gr.Row():
457
- with gr.Column():
458
- text_input = gr.Textbox(
459
- label="Enter Text",
460
- placeholder="Paste text here to classify...",
461
- lines=8
462
- )
463
- language_select = gr.Dropdown(
464
- choices=SUPPORTED_LANGUAGES,
465
- value="Hausa",
466
- label="Select Language"
467
- )
468
- explainer_select = gr.Radio(
469
- choices=["SHAP", "LIME", "Both"],
470
- value="SHAP",
471
- label="Explainability Method"
472
- )
473
- classify_btn = gr.Button("🔍 Classify & Explain", variant="primary")
474
-
475
- with gr.Column():
476
- result_output = gr.Markdown(label="Classification Result")
477
- prob_chart = gr.BarPlot(
478
- x="Class",
479
- y="Probability",
480
- title="Prediction Probabilities",
481
- y_lim=[0, 1],
482
- height=300,
483
- width=400
484
- )
485
-
486
- with gr.Row():
487
- with gr.Column():
488
- explanation_output = gr.Markdown(label="Explanation")
489
- with gr.Column():
490
- explanation_viz = gr.Plot(label="Visual Explanation")
491
-
492
- # Examples to help users
493
- gr.Examples(
494
- examples=[
495
- ["Ka rubuta labari game da kasuwa a Kano", "Hausa", "SHAP"],
496
- ["Ìwé yìí jẹ́ ìwé tó dára púpọ̀ fún àwọn akẹ́kọ̀ọ́", "Yoruba", "LIME"],
497
- ["Akwụkwọ a dị mma maka ụmụ akwụkwọ", "Igbo", "SHAP"],
498
- ["Dis book dey very good for students wey wan learn", "Nigerian Pidgin", "Both"]
499
- ],
500
- inputs=[text_input, language_select, explainer_select],
501
- label="Try these examples in different languages"
502
  )
503
-
504
- classify_btn.click(
505
- fn=classify_with_explanation,
506
- inputs=[text_input, language_select, explainer_select],
507
- outputs=[result_output, prob_chart, explanation_output, explanation_viz]
508
- )
509
-
510
- # Tab 2: Bias Auditing
511
- with gr.Tab("⚖️ Bias Audit"):
512
- gr.Markdown("""
513
- ### Fairness and Bias Auditing
514
-
515
- Upload a CSV file with columns: `text`, `label` (0=Human, 1=AI), `language`
516
-
517
- The system will calculate:
518
- - **EOD (Equal Opportunity Difference)**: Fairness in recall across languages
519
- - **AAOD (Average Absolute Odds Difference)**: Disparity in TPR and FPR
520
- - **Demographic Parity**: Difference in positive prediction rates
521
- """)
522
-
523
- with gr.Row():
524
- with gr.Column():
525
- audit_file = gr.File(label="Upload CSV Dataset", file_types=[".csv"])
526
- audit_btn = gr.Button("🔍 Run Bias Audit", variant="primary")
527
-
528
- with gr.Column():
529
- audit_report = gr.Markdown(label="Audit Report")
530
- audit_viz = gr.Plot(label="Confusion Matrix")
531
-
532
- audit_btn.click(
533
- fn=audit_bias,
534
- inputs=audit_file,
535
- outputs=[audit_report, audit_viz]
536
- )
537
-
538
- # Tab 3: About
539
- with gr.Tab("ℹ️ About"):
540
- gr.Markdown("""
541
- # About HATA System
542
-
543
- ## 🎯 Features
544
-
545
- ### Explainable AI
546
- - **SHAP**: Game-theory based feature attribution
547
- - **LIME**: Local interpretable model-agnostic explanations
548
- - Visual token-level attributions
549
-
550
- ### Fairness Auditing
551
- - Equal Opportunity Difference (EOD)
552
- - Average Absolute Odds Difference (AAOD)
553
- - Demographic Parity
554
- - Per-language performance metrics
555
-
556
- ## 🌍 Supported Languages
557
- Hausa, Yoruba, Igbo, Nigerian Pidgin
558
-
559
- ## 📊 Model Performance
560
- - Accuracy: 100%
561
- - F1 Score: 100%
562
- - EOD: 0.0 (Perfect fairness)
563
- - AAOD: 0.0 (No bias)
564
-
565
- ## 🔬 Technical Details
566
- - Base Model: AfroXLMR-base (davlan/afro-xlmr-base)
567
- - Parameters: ~270M
568
- - Max Sequence Length: 128 tokens
569
- - Training Dataset: PhD HATA African Dataset
570
- - Languages: 4 West African languages
571
-
572
- ## 📚 Citation
573
- ```bibtex
574
- @misc{msmaje2025hata,
575
- author = {Maje, M.S.},
576
- title = {HATA: Human-AI Text Attribution for African Languages},
577
- year = {2025},
578
- publisher = {HuggingFace},
579
- url = {https://huggingface.co/msmaje/phdhatamodel}
580
- }
581
- ```
582
- """)
583
-
584
- gr.Markdown("""
585
- ---
586
- <div style='text-align: center; color: #666;'>
587
- Built with 💜 for African Language NLP | Powered by AfroXLMR & Explainable AI
588
- </div>
589
- """)
590
 
 
 
 
591
  if __name__ == "__main__":
592
- demo.launch()
 
1
  """
2
+ Gradio Space for Human-AI Text Attribution (HATA) Model
3
+ Detects whether text is human-written or AI-generated
4
+ Supports multiple African languages
5
  """
6
 
7
+ # --- Deterministic suppression of Gradio audio stack under Python 3.13 ---
8
  import os
9
  import sys
10
  import types
 
 
 
 
 
 
 
 
 
 
11
 
 
12
  os.environ["GRADIO_DISABLE_PYDUB"] = "1"
13
+
14
+ # Provide stubs so that pydub cannot fail on audioop / pyaudioop
15
  if "audioop" not in sys.modules:
16
  sys.modules["audioop"] = types.ModuleType("audioop")
17
  if "pyaudioop" not in sys.modules:
18
  sys.modules["pyaudioop"] = types.ModuleType("pyaudioop")
19
 
20
+ # Now it is safe to import Gradio and the rest of the stack
21
+ import gradio as gr
22
+ import torch
23
+ import numpy as np
24
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
+ # ----------------------------------------------------------------------
27
+ # Model configuration
28
+ # ----------------------------------------------------------------------
29
+ MODEL_NAME = "distilbert-base-multilingual-cased" # replace with your fine-tuned HATA checkpoint if available
30
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
31
+
32
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
33
+ model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME, num_labels=2)
34
+ model.to(DEVICE)
35
+ model.eval()
36
+
37
+ LABELS = ["Human-written", "AI-generated"]
38
+
39
+ # ----------------------------------------------------------------------
40
+ # Inference routine
41
+ # ----------------------------------------------------------------------
42
+ @torch.no_grad()
43
+ def hata_predict(text: str):
44
+ if not text or not text.strip():
45
+ return {"Human-written": 0.0, "AI-generated": 0.0}
46
+
47
+ inputs = tokenizer(
48
+ text,
49
+ return_tensors="pt",
50
+ truncation=True,
51
+ padding=True,
52
+ max_length=512,
53
+ ).to(DEVICE)
54
+
55
+ outputs = model(**inputs)
56
+ logits = outputs.logits.squeeze(0)
57
+ probs = torch.softmax(logits, dim=-1).cpu().numpy()
58
+
59
+ return {LABELS[i]: float(probs[i]) for i in range(len(LABELS))}
60
+
61
+ # ----------------------------------------------------------------------
62
+ # Gradio interface
63
+ # ----------------------------------------------------------------------
64
+ with gr.Blocks(title="Multilingual HATA System") as demo:
65
+ gr.Markdown(
66
+ """
67
+ # Multilingual Human–AI Text Attribution (HATA)
68
+
69
+ This system estimates whether an input passage is **human-written** or
70
+ **AI-generated**, with a focus on multilingual and African-language use
71
+ cases (e.g., Hausa, Yoruba, Igbo, Pidgin).
72
+
73
+ The backend is a Transformer-based classifier fine-tuned for attribution.
74
+ """
75
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
 
77
+ with gr.Row():
78
+ with gr.Column(scale=3):
79
+ text_input = gr.Textbox(
80
+ label="Input Text",
81
+ placeholder="Paste a paragraph in Hausa, Yoruba, Igbo, Pidgin, or English...",
82
+ lines=8,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  )
84
+ submit_btn = gr.Button("Analyze")
85
+ with gr.Column(scale=2):
86
+ output = gr.Label(label="Attribution Probabilities")
87
+
88
+ submit_btn.click(
89
+ fn=hata_predict,
90
+ inputs=text_input,
91
+ outputs=output,
92
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
+ # ----------------------------------------------------------------------
95
+ # Entry point
96
+ # ----------------------------------------------------------------------
97
  if __name__ == "__main__":
98
+ demo.launch()