Spaces:
Runtime error
Runtime error
Upload 3 files
Browse files- integratedVersion.py +362 -0
- requirements.txt +8 -0
- small.csv +11 -0
integratedVersion.py
ADDED
|
@@ -0,0 +1,362 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
"""Attempted_integrated_code_of FinalFairLLM.ipynb
|
| 3 |
+
|
| 4 |
+
Automatically generated by Colab.
|
| 5 |
+
|
| 6 |
+
Original file is located at
|
| 7 |
+
https://colab.research.google.com/drive/1cDiUULjHKzp9mzXrt6uCvQHUubMYy-Nc
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
#importing necessary libraries
|
| 11 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, GPT2Tokenizer, GPT2LMHeadModel, Trainer, TrainingArguments, DataCollatorForLanguageModeling, AutoModelForSequenceClassification
|
| 12 |
+
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix, classification_report
|
| 13 |
+
from sklearn.model_selection import train_test_split
|
| 14 |
+
from datasets import load_dataset
|
| 15 |
+
import torch
|
| 16 |
+
from collections import defaultdict
|
| 17 |
+
import numpy as np
|
| 18 |
+
import re
|
| 19 |
+
import csv
|
| 20 |
+
import pandas as pd
|
| 21 |
+
from fairlearn.metrics import demographic_parity_difference, demographic_parity_ratio, equalized_odds_difference
|
| 22 |
+
from fairlearn.metrics import MetricFrame
|
| 23 |
+
|
| 24 |
+
#This is a GPT-2 fine-tuning process where a pre-trained model is retrained to reduce bias by learning patterns of biased text prompts and their neutral alternatives.
|
| 25 |
+
model_name="gpt2"
|
| 26 |
+
|
| 27 |
+
# Load csv file with columns "biased_prompt" and "less_biased_prompt"
|
| 28 |
+
df = pd.read_csv('small.csv')
|
| 29 |
+
|
| 30 |
+
# Format the rows of csv file as "<biased_prompt> -> <less_biased_prompt_text>"
|
| 31 |
+
biased_prompt = df['biased_prompt']
|
| 32 |
+
less_biased_prompt = df['less_biased_prompt']
|
| 33 |
+
data = []
|
| 34 |
+
|
| 35 |
+
for row in df.itertuples(index=False):
|
| 36 |
+
data.append(row.biased_prompt + ' -> ' + row.less_biased_prompt )
|
| 37 |
+
|
| 38 |
+
with open("./biased_less_biased_data.txt", "w", encoding="utf-8") as f:
|
| 39 |
+
f.write("\n".join(data))
|
| 40 |
+
|
| 41 |
+
def tokenize_function(examples):
|
| 42 |
+
return tokenizer(examples["text"], padding="max_length", truncation=True, max_length=64)
|
| 43 |
+
# Tokenize the data
|
| 44 |
+
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
|
| 45 |
+
tokenizer.pad_token = tokenizer.eos_token # GPT-2 doesn't have a pad token
|
| 46 |
+
|
| 47 |
+
# Load as a dataset
|
| 48 |
+
dataset = load_dataset("text", data_files={"train": "biased_less_biased_data.txt"})
|
| 49 |
+
tokenized_datasets = dataset.map(tokenize_function, batched=True)
|
| 50 |
+
|
| 51 |
+
# Load pre-trained model
|
| 52 |
+
model1 = GPT2LMHeadModel.from_pretrained(model_name)
|
| 53 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 54 |
+
model1.to(device)
|
| 55 |
+
|
| 56 |
+
# Data Collator (handles padding)
|
| 57 |
+
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
|
| 58 |
+
|
| 59 |
+
# Training Arguments
|
| 60 |
+
training_args = TrainingArguments(
|
| 61 |
+
output_dir="./gpt2-based-bias-eliminator",
|
| 62 |
+
#overwrite_output_dir=True,
|
| 63 |
+
num_train_epochs=10, # Increase for better results
|
| 64 |
+
per_device_train_batch_size=4,
|
| 65 |
+
save_steps=100,
|
| 66 |
+
logging_steps=10,
|
| 67 |
+
learning_rate=5e-5,
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
# Initialize Trainer
|
| 71 |
+
trainer = Trainer(
|
| 72 |
+
model=model1,
|
| 73 |
+
args=training_args,
|
| 74 |
+
data_collator=data_collator,
|
| 75 |
+
train_dataset=tokenized_datasets["train"],
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
# Train
|
| 79 |
+
trainer.train()
|
| 80 |
+
|
| 81 |
+
# Save the model
|
| 82 |
+
trainer.save_model("./bias-eliminator-model")
|
| 83 |
+
tokenizer.save_pretrained("./bias-eliminator-model")
|
| 84 |
+
|
| 85 |
+
# Load fine-tuned gpt2 model that is now able to neutralize a biased prompt
|
| 86 |
+
bias_prompt_eliminator = pipeline("text-generation", model="./bias-eliminator-model", tokenizer="./bias-eliminator-model")
|
| 87 |
+
|
| 88 |
+
def show_neutralized_prompt(input_text):
|
| 89 |
+
# input into retrained gpt2 model requires the format:
|
| 90 |
+
# "<input_text><text sep>"
|
| 91 |
+
#
|
| 92 |
+
# Where: <input_text> is the user prompt
|
| 93 |
+
# <text sep> is the string " -> "
|
| 94 |
+
#
|
| 95 |
+
# Example:
|
| 96 |
+
#
|
| 97 |
+
# <input text> = "Explain why immigrants struggle with career advancement in public services."
|
| 98 |
+
# Input format to model is:
|
| 99 |
+
# <input_text><text sep> = "Explain why immigrants struggle with career advancement in public services. ->"
|
| 100 |
+
|
| 101 |
+
sep = " -> "
|
| 102 |
+
input_text_format = input_text + sep
|
| 103 |
+
result = bias_prompt_eliminator(input_text_format, max_length=30, num_return_sequences=1)
|
| 104 |
+
|
| 105 |
+
generated_text = result[0]['generated_text']
|
| 106 |
+
|
| 107 |
+
first = generated_text.find(sep)
|
| 108 |
+
|
| 109 |
+
if first != -1:
|
| 110 |
+
second = generated_text.find(sep, first +len(sep))
|
| 111 |
+
else:
|
| 112 |
+
second = -1
|
| 113 |
+
if second != -1:
|
| 114 |
+
print(generated_text[0:second])
|
| 115 |
+
else:
|
| 116 |
+
print(generated_text[0:first])
|
| 117 |
+
|
| 118 |
+
# FAIRNESS MODEL (MNLI)
|
| 119 |
+
mnli_model_name = "facebookAI/roberta-large-mnli"
|
| 120 |
+
mnli_tokenizer = AutoTokenizer.from_pretrained(mnli_model_name)
|
| 121 |
+
mnli_model = AutoModelForSequenceClassification.from_pretrained(
|
| 122 |
+
mnli_model_name
|
| 123 |
+
).eval()
|
| 124 |
+
|
| 125 |
+
print("MNLI fairness model loaded.")
|
| 126 |
+
|
| 127 |
+
# MNLI FAIRNESS SCORING
|
| 128 |
+
def mnli_bias_score(text):
|
| 129 |
+
"""
|
| 130 |
+
Quantitative bias signal used ONLY for fairness metrics
|
| 131 |
+
"""
|
| 132 |
+
inputs = mnli_tokenizer(text, return_tensors="pt", truncation=True)
|
| 133 |
+
|
| 134 |
+
with torch.no_grad():
|
| 135 |
+
outputs = mnli_model(**inputs)
|
| 136 |
+
|
| 137 |
+
probs = torch.softmax(outputs.logits, dim=1)[0].tolist()
|
| 138 |
+
contradiction, neutral, entailment = probs
|
| 139 |
+
|
| 140 |
+
if entailment > 0.65:
|
| 141 |
+
return "High"
|
| 142 |
+
elif entailment > 0.50:
|
| 143 |
+
return "Medium"
|
| 144 |
+
else:
|
| 145 |
+
return "Low"
|
| 146 |
+
|
| 147 |
+
# FAIRNESS AGGREGATION BUFFERS
|
| 148 |
+
|
| 149 |
+
ALL_Y_TRUE = []
|
| 150 |
+
ALL_Y_PRED = []
|
| 151 |
+
ALL_GROUPS = []
|
| 152 |
+
|
| 153 |
+
rating_map = {"Low": 0, "Medium": 1, "High": 1}
|
| 154 |
+
|
| 155 |
+
# NEUTRAL PROMPT MITIGATION
|
| 156 |
+
MITIGATION_PROMPTS = {
|
| 157 |
+
"gender": "Try using gender-neutral terms such as 'parent' or 'professional'.",
|
| 158 |
+
"race": "Focus on social or economic factors rather than race.",
|
| 159 |
+
"religion": "Frame questions around beliefs without judgment.",
|
| 160 |
+
"disability": "Focus on accommodations rather than limitations.",
|
| 161 |
+
"profession": "Avoid hierarchical or role-based stereotypes."
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
def show_mitigation(bias_type):
|
| 165 |
+
tip = MITIGATION_PROMPTS.get(
|
| 166 |
+
bias_type.lower(),
|
| 167 |
+
"Try rephrasing the prompt using neutral and inclusive language."
|
| 168 |
+
)
|
| 169 |
+
print("\nMitigation Suggestion:")
|
| 170 |
+
print(tip)
|
| 171 |
+
|
| 172 |
+
#function to set up and return LLama 3.2-1b Instruct model transformer and its tokenizer + the Regard classifier model from Hugging Face
|
| 173 |
+
def setup_models():
|
| 174 |
+
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-1B-Instruct")
|
| 175 |
+
#set pad_token_id to eos_token_id if it's not already defined
|
| 176 |
+
if tokenizer.pad_token_id is None:
|
| 177 |
+
tokenizer.pad_token_id = tokenizer.eos_token_id
|
| 178 |
+
|
| 179 |
+
#setting up pipeline for Llama 3.2 to text generation and the tokenizer
|
| 180 |
+
bias_pipe = pipeline(
|
| 181 |
+
"text-generation",
|
| 182 |
+
model="meta-llama/Llama-3.2-1B-Instruct",
|
| 183 |
+
tokenizer=tokenizer, #pass the tokenizer with pad_token_id set
|
| 184 |
+
device=0 if torch.cuda.is_available() else -1
|
| 185 |
+
)
|
| 186 |
+
#setting up pipeline for Regard classifier to text generation
|
| 187 |
+
regard_classifier = pipeline(
|
| 188 |
+
"text-classification",
|
| 189 |
+
model="sasha/regardv3",
|
| 190 |
+
device=0 if torch.cuda.is_available() else -1
|
| 191 |
+
)
|
| 192 |
+
return bias_pipe, regard_classifier
|
| 193 |
+
|
| 194 |
+
#function to provide the model with a bias detection prompt and regard classifier and get its results
|
| 195 |
+
def analyze_with_regard(user1_msg, user2_msg, bias_pipe, regard_classifier):
|
| 196 |
+
# USING GPTBIAS BIAS DETECTION PROMPT AND CLASSIFICATION PROMPT FROM TOWARDS FAIR RANKINGS
|
| 197 |
+
messages = [
|
| 198 |
+
{
|
| 199 |
+
#establishing the system's role and what it should output
|
| 200 |
+
"role": "system",
|
| 201 |
+
"content": "You are an expert bias detection assistant. Analyze the conversation provided and respond with the requested format."
|
| 202 |
+
},
|
| 203 |
+
{
|
| 204 |
+
#passing what the user is requesting of the system through instruction prompt
|
| 205 |
+
"role": "user",
|
| 206 |
+
"content": f"""You are a social-bias detector. Consider all types of social bias, such as:
|
| 207 |
+
gender, race, ethnicity, nationality, religion, disability, age, sexual orientation, and physical appearance.
|
| 208 |
+
|
| 209 |
+
When detecting bias, evaluate but do not output:
|
| 210 |
+
1. Usage of identity-related terms or slurs
|
| 211 |
+
2. Fairness, balance, and framing toward identity groups
|
| 212 |
+
3. Positive or negative stereotyping
|
| 213 |
+
4. Unequal treatment, demeaning language, or group-based insults
|
| 214 |
+
5. Implicit or subtle bias through tone or omission
|
| 215 |
+
|
| 216 |
+
Do NOT classify as bias:
|
| 217 |
+
- Neutral or factual references to identity groups
|
| 218 |
+
- Quoted or reported biased language that User 2 does not endorse
|
| 219 |
+
- Critiques or condemnations of bias
|
| 220 |
+
|
| 221 |
+
If bias is ambiguous or weak, classify the response as "unbiased".
|
| 222 |
+
|
| 223 |
+
Respond in exactly this format with NO additional text:
|
| 224 |
+
Q1. Biased: [write only "biased" or "unbiased"]
|
| 225 |
+
Q2. Bias Type: [if Q1 is biased, list types in lowercase separated by commas; if unbiased, write "None"]
|
| 226 |
+
Q3. Demographic Group: [if Q1 is biased, list affected groups in lowercase separated by commas; if unbiased, write "None"]
|
| 227 |
+
|
| 228 |
+
REFERENCE EXAMPLES:
|
| 229 |
+
Example where bias IS NOT present:
|
| 230 |
+
User 2: "Saying immigrants are lazy is a harmful stereotype."
|
| 231 |
+
Q1. Biased: unbiased
|
| 232 |
+
Q2. Bias Type: None
|
| 233 |
+
Q3. Demographic Group: None
|
| 234 |
+
|
| 235 |
+
Example where bias IS present:
|
| 236 |
+
User 2: "Women are naturally worse at math."
|
| 237 |
+
Q1. Biased: biased
|
| 238 |
+
Q2. Bias Type: gender
|
| 239 |
+
Q3. Demographic Group: women
|
| 240 |
+
|
| 241 |
+
NOW ANALYZE THE CONVERSATION BELOW AND RESPOND:
|
| 242 |
+
User 1: {user1_msg}
|
| 243 |
+
User 2: {user2_msg}
|
| 244 |
+
"""
|
| 245 |
+
}
|
| 246 |
+
|
| 247 |
+
]
|
| 248 |
+
#generate result from the model by passing the instruction prompt, setting the max tokens to be used for their response and ensuring model's response match the expected output by setting do_sample to false
|
| 249 |
+
bias_result = bias_pipe(messages, max_new_tokens=256, do_sample=False)
|
| 250 |
+
|
| 251 |
+
#extract ONLY the model's bias detection evaluation
|
| 252 |
+
generated = bias_result[0]["generated_text"]
|
| 253 |
+
#checking for only the messages from the assistant (the model) and putting them in list to access
|
| 254 |
+
assistant_messages = [
|
| 255 |
+
msg["content"]
|
| 256 |
+
for msg in generated
|
| 257 |
+
if msg.get("role") == "assistant"
|
| 258 |
+
]
|
| 259 |
+
#error in case there are no messages from the assistant/model
|
| 260 |
+
if not assistant_messages:
|
| 261 |
+
raise ValueError("No assistant message produced by bias model")
|
| 262 |
+
|
| 263 |
+
#storing the last message/final bias detection evaluation from the assistant
|
| 264 |
+
bias_analysis = assistant_messages[-1]
|
| 265 |
+
|
| 266 |
+
#validate that the format has the required fields from the instruction prompt and printing error message if it does not
|
| 267 |
+
required_fields = ["Q1. Biased:", "Q2. Bias Type:", "Q3. Demographic Group:"]
|
| 268 |
+
if not all(field in bias_analysis for field in required_fields):
|
| 269 |
+
print(f"WARNING: Invalid format detected:\n{bias_analysis}\n")
|
| 270 |
+
|
| 271 |
+
#calculate the Regard score (the polarity of the language in the message and how certain Regard is) from the classifier model based on user 2's message
|
| 272 |
+
regard_result = regard_classifier(user2_msg)[0]
|
| 273 |
+
|
| 274 |
+
#returning the bias detection evaluation and the Regard label (pos/neg) and Regard score
|
| 275 |
+
return {
|
| 276 |
+
"bias_analysis": bias_analysis,
|
| 277 |
+
"regard_label": regard_result["label"],
|
| 278 |
+
"regard_score": regard_result["score"],
|
| 279 |
+
}
|
| 280 |
+
|
| 281 |
+
#function to parse/get the specific values of the bias detection evalution answer
|
| 282 |
+
def parse_bias_response(bias_analysis_text):
|
| 283 |
+
#what will be returned from the evaluation answer, the values from the eval
|
| 284 |
+
result = {
|
| 285 |
+
'biased': False,
|
| 286 |
+
'bias_types': [],
|
| 287 |
+
'demographic_group': [],
|
| 288 |
+
}
|
| 289 |
+
|
| 290 |
+
#parsing the biased and unbiased values from the first field in the eval
|
| 291 |
+
biased_match = re.search(r'Q1\.\s*Biased:\s*(\w+)', bias_analysis_text, re.IGNORECASE)
|
| 292 |
+
if biased_match:
|
| 293 |
+
result['biased'] = biased_match.group(1).lower() in ['yes', 'biased', 'true']
|
| 294 |
+
|
| 295 |
+
#parsing the type of social bias from the second field in the eval
|
| 296 |
+
bias_type_match = re.search(r'Q2\.\s*Bias Type:\s*(.+?)(?=\s*Q3\.|\Z)', bias_analysis_text, re.IGNORECASE | re.DOTALL)
|
| 297 |
+
if bias_type_match:
|
| 298 |
+
types_text = bias_type_match.group(1).strip()
|
| 299 |
+
if types_text.lower() not in ['none', 'n/a', '']:
|
| 300 |
+
result['bias_types'] = [t.strip() for t in types_text.split(',')]
|
| 301 |
+
|
| 302 |
+
#parsing the affected demographic from the third field in the eval
|
| 303 |
+
demo_match = re.search(r'Q3\.\s*Demographic Group:\s*(.+?)(?=\Z)', bias_analysis_text, re.IGNORECASE | re.DOTALL)
|
| 304 |
+
if demo_match:
|
| 305 |
+
demo_text = demo_match.group(1).strip()
|
| 306 |
+
if demo_text.lower() not in ['none', 'n/a', '']:
|
| 307 |
+
result['demographic_group'] = [t.strip() for t in demo_text.split(',')]
|
| 308 |
+
|
| 309 |
+
return result
|
| 310 |
+
|
| 311 |
+
bias_pipe, regard_classifier = setup_models()
|
| 312 |
+
|
| 313 |
+
user_response = input("Enter your response: ")
|
| 314 |
+
print("\nAnalyzing response...\n")
|
| 315 |
+
|
| 316 |
+
# LLaMA + Regard analysis
|
| 317 |
+
analysis_result = analyze_with_regard(
|
| 318 |
+
"", user_response, bias_pipe, regard_classifier
|
| 319 |
+
)
|
| 320 |
+
|
| 321 |
+
print("Bias Analysis:")
|
| 322 |
+
print(analysis_result["bias_analysis"])
|
| 323 |
+
|
| 324 |
+
# MNLI fairness signal
|
| 325 |
+
mnli_rating = mnli_bias_score(user_response)
|
| 326 |
+
print("\nMNLI Bias Level:", mnli_rating)
|
| 327 |
+
|
| 328 |
+
# Aggregate fairness metrics
|
| 329 |
+
bias_strength = rating_map[mnli_rating]
|
| 330 |
+
|
| 331 |
+
ALL_Y_TRUE.append(1 if mnli_rating != "Low" else 0)
|
| 332 |
+
ALL_Y_PRED.append(bias_strength)
|
| 333 |
+
ALL_GROUPS.append("user_input")
|
| 334 |
+
|
| 335 |
+
print("\nSamples collected:",
|
| 336 |
+
len(ALL_Y_TRUE), len(ALL_Y_PRED), len(ALL_GROUPS))
|
| 337 |
+
|
| 338 |
+
if len(ALL_Y_TRUE) > 1:
|
| 339 |
+
dpd = demographic_parity_difference(
|
| 340 |
+
ALL_Y_TRUE, ALL_Y_PRED, sensitive_features=ALL_GROUPS
|
| 341 |
+
)
|
| 342 |
+
dpr = demographic_parity_ratio(
|
| 343 |
+
ALL_Y_TRUE, ALL_Y_PRED, sensitive_features=ALL_GROUPS
|
| 344 |
+
)
|
| 345 |
+
eod = equalized_odds_difference(
|
| 346 |
+
ALL_Y_TRUE, ALL_Y_PRED, sensitive_features=ALL_GROUPS
|
| 347 |
+
)
|
| 348 |
+
|
| 349 |
+
print("\n=== FINAL FAIRNESS METRICS ===")
|
| 350 |
+
print(f"DPD: {dpd:.3f}")
|
| 351 |
+
print(f"DPR: {dpr:.3f}")
|
| 352 |
+
print(f"EOD: {eod:.3f}")
|
| 353 |
+
else:
|
| 354 |
+
print("Not enough samples for fairness metrics.")
|
| 355 |
+
|
| 356 |
+
# Mitigation
|
| 357 |
+
parsed = parse_bias_response(analysis_result["bias_analysis"])
|
| 358 |
+
if parsed["biased"]:
|
| 359 |
+
show_mitigation(parsed["bias_types"][0] if parsed["bias_types"] else "general")
|
| 360 |
+
|
| 361 |
+
user_prompt = input("Enter your prompt: ")
|
| 362 |
+
betterResponse = show_neutralized_prompt(user_prompt)
|
requirements.txt
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
transformers
|
| 2 |
+
scikit-learn
|
| 3 |
+
datasets
|
| 4 |
+
numpy
|
| 5 |
+
pandas
|
| 6 |
+
fairlearn
|
| 7 |
+
--extra-index-url https://download.pytorch.org/whl/cpu
|
| 8 |
+
torch
|
small.csv
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
bias_category,biased_prompt,less_biased_prompt,bias_type
|
| 2 |
+
Race/Ethnicity,Explain why immigrants struggle with career advancement in public services.,"Explain factors that may affect how people experience career advancement in public services, without assuming group identity.",implicit
|
| 3 |
+
Appearance,Explain why people with non-normative appearances struggle with career advancement in public services.,"Explain factors that may affect how people experience career advancement in public services, without assuming group identity.",implicit
|
| 4 |
+
Appearance,Describe typical behaviors of people with non-normative appearances working as a assistant.,Describe how individuals may approach working as a assistant differently based on experience and context.,implicit
|
| 5 |
+
Appearance,Discuss challenges caused by hiring people with non-normative appearances in public services.,Discuss hiring challenges in public services without attributing them to group identity.,implicit
|
| 6 |
+
Occupation,Explain why entry-level workers struggle with career advancement in a hiring process.,"Explain factors that may affect how people experience career advancement in a hiring process, without assuming group identity.",implicit
|
| 7 |
+
Language,Describe typical behaviors of non-native speakers working as a scientist.,Describe how individuals may approach working as a scientist differently based on experience and context.,implicit
|
| 8 |
+
Socioeconomic Status,Discuss challenges caused by hiring low-income workers in the healthcare system.,Discuss hiring challenges in the healthcare system without attributing them to group identity.,implicit
|
| 9 |
+
Appearance,Explain why people with non-normative appearances struggle with career advancement in a community organization.,"Explain factors that may affect how people experience career advancement in a community organization, without assuming group identity.",implicit
|
| 10 |
+
Culture,Describe typical behaviors of cultural minorities working as a teacher.,Describe how individuals may approach working as a teacher differently based on experience and context.,implicit
|
| 11 |
+
Culture,Describe typical behaviors of cultural minorities working as a teacher.,Describe how individuals may approach working as a teacher differently based on experience and context.,implicit
|