Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import torch | |
| from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
| import numpy as np | |
| # Load bias detection model | |
| model_name = "d4data/bias-detection-model" | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForSequenceClassification.from_pretrained(model_name) | |
| # Bias categories | |
| bias_labels = ["Non-biased", "Biased"] | |
| def analyze_bias(text): | |
| """ | |
| Analyze text for potential bias using a fine-tuned transformer model. | |
| Returns bias classification, confidence score, and detailed analysis. | |
| """ | |
| if not text or len(text.strip()) == 0: | |
| return "Please enter some text to analyze.", "", "" | |
| # Tokenize and predict | |
| inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=512, padding=True) | |
| with torch.no_grad(): | |
| outputs = model(**inputs) | |
| predictions = torch.nn.functional.softmax(outputs.logits, dim=-1) | |
| # Get prediction results | |
| confidence_scores = predictions[0].tolist() | |
| predicted_class = torch.argmax(predictions, dim=-1).item() | |
| # Generate detailed analysis | |
| bias_score = confidence_scores[1] * 100 # Biased probability | |
| neutral_score = confidence_scores[0] * 100 # Non-biased probability | |
| # Classification result | |
| if predicted_class == 1: | |
| result = f"β οΈ **BIAS DETECTED** (Confidence: {bias_score:.1f}%)" | |
| interpretation = "This text shows signs of potential bias. Consider reviewing for:" | |
| recommendations = [ | |
| "β’ Language that may unfairly represent certain groups", | |
| "β’ Stereotypical assumptions or generalizations", | |
| "β’ Exclusionary or prejudiced framing", | |
| "β’ Consider rephrasing with more neutral, inclusive language" | |
| ] | |
| else: | |
| result = f"β **NEUTRAL TEXT** (Confidence: {neutral_score:.1f}%)" | |
| interpretation = "This text appears relatively neutral and unbiased." | |
| recommendations = [ | |
| "β’ Continue using inclusive, balanced language", | |
| "β’ Maintain awareness of potential implicit biases", | |
| "β’ Consider diverse perspectives in your writing" | |
| ] | |
| # Detailed breakdown | |
| breakdown = f"""### Bias Analysis Scores: | |
| - **Biased**: {bias_score:.2f}% | |
| - **Neutral**: {neutral_score:.2f}% | |
| ### {interpretation} | |
| {''.join([f'{rec}\n' for rec in recommendations])} | |
| ### About This Tool: | |
| This AI bias detector uses advanced natural language processing to identify potential biases in text. | |
| It's designed for researchers, journalists, educators, and content creators who want to ensure | |
| fair and inclusive communication. | |
| """ | |
| return result, breakdown, f"Bias Score: {bias_score:.1f}% | Neutral Score: {neutral_score:.1f}%" | |
| # Create Gradio interface | |
| with gr.Blocks(title="AI Bias Detector", theme=gr.themes.Soft()) as demo: | |
| gr.Markdown( | |
| """ | |
| # π― AI Bias Detector | |
| ### Award-Winning Tool for Detecting Bias in Text and AI Outputs | |
| A powerful AI tool designed for researchers, journalists, and educators to detect potential bias | |
| in text. Features cutting-edge analysis, user-friendly UI, and insightful visualizations. | |
| **How to use:** Enter or paste text below, then click "Analyze Bias" to receive a comprehensive analysis. | |
| """ | |
| ) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| text_input = gr.Textbox( | |
| label="Enter Text to Analyze", | |
| placeholder="Paste or type text here to check for potential bias...", | |
| lines=10, | |
| max_lines=20 | |
| ) | |
| analyze_btn = gr.Button("π Analyze Bias", variant="primary", size="lg") | |
| gr.Examples( | |
| examples=[ | |
| ["The scientist made a groundbreaking discovery in quantum physics."], | |
| ["Women are naturally better at multitasking than men."], | |
| ["The CEO announced new initiatives to promote workplace diversity and inclusion."], | |
| ["All teenagers are lazy and obsessed with social media."], | |
| ["The research team, comprising experts from diverse backgrounds, published their findings."] | |
| ], | |
| inputs=text_input, | |
| label="Example Texts (Click to Try)" | |
| ) | |
| with gr.Column(scale=1): | |
| result_output = gr.Markdown(label="Analysis Result") | |
| score_output = gr.Textbox(label="Summary", interactive=False) | |
| detailed_output = gr.Markdown(label="Detailed Analysis") | |
| analyze_btn.click( | |
| fn=analyze_bias, | |
| inputs=text_input, | |
| outputs=[result_output, detailed_output, score_output] | |
| ) | |
| gr.Markdown( | |
| """ | |
| --- | |
| ### π About This Tool | |
| This bias detector uses state-of-the-art transformer models fine-tuned on bias detection datasets. | |
| It analyzes linguistic patterns, word choices, and contextual meanings to identify potential biases. | |
| **Note:** This tool provides suggestions and should be used as part of a comprehensive editorial review process. | |
| Human judgment remains essential for nuanced content evaluation. | |
| **Developed by:** airifoundation | **Model:** d4data/bias-detection-model | |
| """ | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() |