mariamSoub commited on
Commit
3920171
·
verified ·
1 Parent(s): 1e26638

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -4
app.py CHANGED
@@ -197,12 +197,10 @@ def rule_based_bias_check(text):
197
 
198
 
199
  def show_mitigation(bias_type):
200
- tip = MITIGATION_PROMPTS.get(
201
  bias_type.lower(),
202
  "Try rephrasing the prompt using neutral and inclusive language."
203
  )
204
- print("\nMitigation Suggestion:")
205
- print(tip)
206
 
207
  #function to set up and return LLama 3.2-1b Instruct model transformer and its tokenizer + the Regard classifier model from Hugging Face
208
  def setup_models():
@@ -415,6 +413,8 @@ def analyze_response(user_response):
415
  dpr = demographic_parity_ratio(
416
  ALL_Y_TRUE, ALL_Y_PRED, sensitive_features=ALL_GROUPS
417
  )
 
 
418
  eod = equalized_odds_difference(
419
  ALL_Y_TRUE, ALL_Y_PRED, sensitive_features=ALL_GROUPS
420
  )
@@ -434,9 +434,15 @@ Samples collected: {len(ALL_Y_TRUE)}
434
 
435
  # 🔥 RULE-BASED OVERRIDE (THIS FIXES YOUR PROBLEM)
436
  rule_check = rule_based_bias_check(user_response)
437
- if rule_check:
 
438
  parsed = rule_check
439
 
 
 
 
 
 
440
  # ✅ Fix fairness group (prevents DPR = nan)
441
  group = parsed["bias_types"][0] if parsed["bias_types"] else "neutral"
442
  ALL_GROUPS[-1] = group
 
197
 
198
 
199
  def show_mitigation(bias_type):
200
+ return MITIGATION_PROMPTS.get(
201
  bias_type.lower(),
202
  "Try rephrasing the prompt using neutral and inclusive language."
203
  )
 
 
204
 
205
  #function to set up and return LLama 3.2-1b Instruct model transformer and its tokenizer + the Regard classifier model from Hugging Face
206
  def setup_models():
 
413
  dpr = demographic_parity_ratio(
414
  ALL_Y_TRUE, ALL_Y_PRED, sensitive_features=ALL_GROUPS
415
  )
416
+ if np.isnan(dpr):
417
+ dpr = 0.0
418
  eod = equalized_odds_difference(
419
  ALL_Y_TRUE, ALL_Y_PRED, sensitive_features=ALL_GROUPS
420
  )
 
434
 
435
  # 🔥 RULE-BASED OVERRIDE (THIS FIXES YOUR PROBLEM)
436
  rule_check = rule_based_bias_check(user_response)
437
+
438
+ if rule_check is not None:
439
  parsed = rule_check
440
 
441
+ # 🔥 OVERRIDE DISPLAY OUTPUT (THIS IS THE MISSING PIECE)
442
+ bias_analysis = f"""Q1. Biased: biased
443
+ Q2. Bias Type: {parsed['bias_types'][0]}
444
+ Q3. Demographic Group: {parsed['demographic_group'][0]}"""
445
+
446
  # ✅ Fix fairness group (prevents DPR = nan)
447
  group = parsed["bias_types"][0] if parsed["bias_types"] else "neutral"
448
  ALL_GROUPS[-1] = group