aryn25 commited on
Commit
c5474bb
Β·
verified Β·
1 Parent(s): 042983c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -14
app.py CHANGED
@@ -4,21 +4,21 @@ from transformers import BertTokenizer, BertForSequenceClassification
4
  import zipfile
5
  import os
6
 
7
- # βœ… Unzip the fine-tuned model if it's not already extracted
8
  if not os.path.exists("fine_tuned_model"):
9
  with zipfile.ZipFile("fine_tuned_model.zip", 'r') as zip_ref:
10
  zip_ref.extractall("fine_tuned_model")
11
 
12
- # βœ… Load your fine-tuned model and tokenizer
13
  model_path = "./fine_tuned_model"
14
  tokenizer = BertTokenizer.from_pretrained(model_path)
15
  model = BertForSequenceClassification.from_pretrained(model_path)
16
  model.eval()
17
 
18
- # βœ… Define label mapping (adjust based on your labels)
19
- label_map = {0: "Original-like", 1: "Swapped-like"}
20
 
21
- # βœ… Inference function
22
  def detect_bias(text):
23
  inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
24
  with torch.no_grad():
@@ -27,21 +27,44 @@ def detect_bias(text):
27
  probs = torch.softmax(logits, dim=1).squeeze()
28
  pred_label = torch.argmax(probs).item()
29
  confidence = round(probs[pred_label].item(), 2)
30
-
 
 
 
 
 
 
 
31
  return {
32
- "Predicted Class": label_map[pred_label],
33
- "Confidence": confidence
 
34
  }
35
 
36
- # βœ… Gradio UI
37
  with gr.Blocks() as demo:
38
- gr.Markdown("# Bias Bin – Fine-Tuned BERT Version")
39
- gr.Markdown("This interface uses a fine-tuned BERT model to classify gender bias in narrative text.")
40
-
41
- text_input = gr.Textbox(label="Enter Narrative Text", lines=4, placeholder="Type here...")
 
 
 
 
 
 
 
42
  submit_btn = gr.Button("Detect Bias")
43
- output = gr.JSON(label="Output")
44
 
45
  submit_btn.click(fn=detect_bias, inputs=text_input, outputs=output)
46
 
 
 
 
 
 
 
 
 
47
  demo.launch()
 
4
  import zipfile
5
  import os
6
 
7
+ #Unzip model if needed
8
  if not os.path.exists("fine_tuned_model"):
9
  with zipfile.ZipFile("fine_tuned_model.zip", 'r') as zip_ref:
10
  zip_ref.extractall("fine_tuned_model")
11
 
12
+ #Load model and tokenizer
13
  model_path = "./fine_tuned_model"
14
  tokenizer = BertTokenizer.from_pretrained(model_path)
15
  model = BertForSequenceClassification.from_pretrained(model_path)
16
  model.eval()
17
 
18
+ #Define output labels
19
+ label_map = {0: "Unbiased", 1: "Biased"}
20
 
21
+ #Bias classification function
22
  def detect_bias(text):
23
  inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
24
  with torch.no_grad():
 
27
  probs = torch.softmax(logits, dim=1).squeeze()
28
  pred_label = torch.argmax(probs).item()
29
  confidence = round(probs[pred_label].item(), 2)
30
+
31
+ explanation = (
32
+ "⚠️ This text may contain stereotypical gender associations or role biases. "
33
+ "Consider rephrasing to ensure neutrality and inclusiveness."
34
+ if pred_label == 1
35
+ else "βœ… This text appears neutral with no obvious gender bias based on the model's understanding."
36
+ )
37
+
38
  return {
39
+ "Bias Classification": label_map[pred_label],
40
+ "Confidence Score": confidence,
41
+ "Explanation": explanation
42
  }
43
 
44
+ #Gradio Interface
45
  with gr.Blocks() as demo:
46
+ gr.Markdown(
47
+ "# Bias Bin – Fine-Tuned BERT Version by Aryan, Gowtham & Manoj\n"
48
+ "This tool detects **gender bias** in narrative text using a BERT model fine-tuned on custom counterfactual data."
49
+ )
50
+
51
+ text_input = gr.Textbox(
52
+ label="Enter Narrative Text",
53
+ placeholder="E.g., 'She is a great leader and he takes care of the house.'",
54
+ lines=4
55
+ )
56
+
57
  submit_btn = gr.Button("Detect Bias")
58
+ output = gr.JSON(label="Prediction Output")
59
 
60
  submit_btn.click(fn=detect_bias, inputs=text_input, outputs=output)
61
 
62
+ #Disclaimer
63
+ gr.Markdown(
64
+ "___\n"
65
+ "<span style='color: gray; font-style: italic;'>⚠️ Disclaimer: This model is trained on a small, synthetic dataset. "
66
+ "Its predictions may not always be accurate or generalizable. Use with caution and consider human review when necessary.</span>",
67
+ unsafe_allow_html=True
68
+ )
69
+
70
  demo.launch()