Spaces:
Runtime error
Runtime error
Add award-winning AI bias detector application with Gradio UI
Browse files
app.py
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import torch
|
| 3 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
# Load bias detection model
|
| 7 |
+
model_name = "d4data/bias-detection-model"
|
| 8 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 9 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
| 10 |
+
|
| 11 |
+
# Bias categories
|
| 12 |
+
bias_labels = ["Non-biased", "Biased"]
|
| 13 |
+
|
| 14 |
+
def analyze_bias(text):
|
| 15 |
+
"""
|
| 16 |
+
Analyze text for potential bias using a fine-tuned transformer model.
|
| 17 |
+
Returns bias classification, confidence score, and detailed analysis.
|
| 18 |
+
"""
|
| 19 |
+
if not text or len(text.strip()) == 0:
|
| 20 |
+
return "Please enter some text to analyze.", "", ""
|
| 21 |
+
|
| 22 |
+
# Tokenize and predict
|
| 23 |
+
inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=512, padding=True)
|
| 24 |
+
|
| 25 |
+
with torch.no_grad():
|
| 26 |
+
outputs = model(**inputs)
|
| 27 |
+
predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
|
| 28 |
+
|
| 29 |
+
# Get prediction results
|
| 30 |
+
confidence_scores = predictions[0].tolist()
|
| 31 |
+
predicted_class = torch.argmax(predictions, dim=-1).item()
|
| 32 |
+
|
| 33 |
+
# Generate detailed analysis
|
| 34 |
+
bias_score = confidence_scores[1] * 100 # Biased probability
|
| 35 |
+
neutral_score = confidence_scores[0] * 100 # Non-biased probability
|
| 36 |
+
|
| 37 |
+
# Classification result
|
| 38 |
+
if predicted_class == 1:
|
| 39 |
+
result = f"β οΈ **BIAS DETECTED** (Confidence: {bias_score:.1f}%)"
|
| 40 |
+
interpretation = "This text shows signs of potential bias. Consider reviewing for:"
|
| 41 |
+
recommendations = [
|
| 42 |
+
"β’ Language that may unfairly represent certain groups",
|
| 43 |
+
"β’ Stereotypical assumptions or generalizations",
|
| 44 |
+
"β’ Exclusionary or prejudiced framing",
|
| 45 |
+
"β’ Consider rephrasing with more neutral, inclusive language"
|
| 46 |
+
]
|
| 47 |
+
else:
|
| 48 |
+
result = f"β
**NEUTRAL TEXT** (Confidence: {neutral_score:.1f}%)"
|
| 49 |
+
interpretation = "This text appears relatively neutral and unbiased."
|
| 50 |
+
recommendations = [
|
| 51 |
+
"β’ Continue using inclusive, balanced language",
|
| 52 |
+
"β’ Maintain awareness of potential implicit biases",
|
| 53 |
+
"β’ Consider diverse perspectives in your writing"
|
| 54 |
+
]
|
| 55 |
+
|
| 56 |
+
# Detailed breakdown
|
| 57 |
+
breakdown = f"""### Bias Analysis Scores:
|
| 58 |
+
- **Biased**: {bias_score:.2f}%
|
| 59 |
+
- **Neutral**: {neutral_score:.2f}%
|
| 60 |
+
|
| 61 |
+
### {interpretation}
|
| 62 |
+
{''.join([f'{rec}\n' for rec in recommendations])}
|
| 63 |
+
|
| 64 |
+
### About This Tool:
|
| 65 |
+
This AI bias detector uses advanced natural language processing to identify potential biases in text.
|
| 66 |
+
It's designed for researchers, journalists, educators, and content creators who want to ensure
|
| 67 |
+
fair and inclusive communication.
|
| 68 |
+
"""
|
| 69 |
+
|
| 70 |
+
return result, breakdown, f"Bias Score: {bias_score:.1f}% | Neutral Score: {neutral_score:.1f}%"
|
| 71 |
+
|
| 72 |
+
# Create Gradio interface
|
| 73 |
+
with gr.Blocks(title="AI Bias Detector", theme=gr.themes.Soft()) as demo:
|
| 74 |
+
gr.Markdown(
|
| 75 |
+
"""
|
| 76 |
+
# π― AI Bias Detector
|
| 77 |
+
### Award-Winning Tool for Detecting Bias in Text and AI Outputs
|
| 78 |
+
|
| 79 |
+
A powerful AI tool designed for researchers, journalists, and educators to detect potential bias
|
| 80 |
+
in text. Features cutting-edge analysis, user-friendly UI, and insightful visualizations.
|
| 81 |
+
|
| 82 |
+
**How to use:** Enter or paste text below, then click "Analyze Bias" to receive a comprehensive analysis.
|
| 83 |
+
"""
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
with gr.Row():
|
| 87 |
+
with gr.Column(scale=1):
|
| 88 |
+
text_input = gr.Textbox(
|
| 89 |
+
label="Enter Text to Analyze",
|
| 90 |
+
placeholder="Paste or type text here to check for potential bias...",
|
| 91 |
+
lines=10,
|
| 92 |
+
max_lines=20
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
analyze_btn = gr.Button("π Analyze Bias", variant="primary", size="lg")
|
| 96 |
+
|
| 97 |
+
gr.Examples(
|
| 98 |
+
examples=[
|
| 99 |
+
["The scientist made a groundbreaking discovery in quantum physics."],
|
| 100 |
+
["Women are naturally better at multitasking than men."],
|
| 101 |
+
["The CEO announced new initiatives to promote workplace diversity and inclusion."],
|
| 102 |
+
["All teenagers are lazy and obsessed with social media."],
|
| 103 |
+
["The research team, comprising experts from diverse backgrounds, published their findings."]
|
| 104 |
+
],
|
| 105 |
+
inputs=text_input,
|
| 106 |
+
label="Example Texts (Click to Try)"
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
with gr.Column(scale=1):
|
| 110 |
+
result_output = gr.Markdown(label="Analysis Result")
|
| 111 |
+
score_output = gr.Textbox(label="Summary", interactive=False)
|
| 112 |
+
detailed_output = gr.Markdown(label="Detailed Analysis")
|
| 113 |
+
|
| 114 |
+
analyze_btn.click(
|
| 115 |
+
fn=analyze_bias,
|
| 116 |
+
inputs=text_input,
|
| 117 |
+
outputs=[result_output, detailed_output, score_output]
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
gr.Markdown(
|
| 121 |
+
"""
|
| 122 |
+
---
|
| 123 |
+
### π About This Tool
|
| 124 |
+
|
| 125 |
+
This bias detector uses state-of-the-art transformer models fine-tuned on bias detection datasets.
|
| 126 |
+
It analyzes linguistic patterns, word choices, and contextual meanings to identify potential biases.
|
| 127 |
+
|
| 128 |
+
**Note:** This tool provides suggestions and should be used as part of a comprehensive editorial review process.
|
| 129 |
+
Human judgment remains essential for nuanced content evaluation.
|
| 130 |
+
|
| 131 |
+
**Developed by:** airifoundation | **Model:** d4data/bias-detection-model
|
| 132 |
+
"""
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
if __name__ == "__main__":
|
| 136 |
+
demo.launch()
|