mariamSoub commited on
Commit
cc8e999
Β·
verified Β·
1 Parent(s): 65de8e6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -8
app.py CHANGED
@@ -102,7 +102,7 @@ def show_neutralized_prompt(input_text):
102
  if sep in generated_text:
103
  output = generated_text.split(sep)[-1].strip()
104
 
105
- # βœ… CLEAN OUTPUT
106
  if len(output.split()) < 3:
107
  return "Rewrite using neutral, inclusive language."
108
 
@@ -425,7 +425,7 @@ def run_light_auto_test():
425
  ALL_Y_PRED = []
426
  ALL_GROUPS = []
427
 
428
- for sample in AUTO_TEST_SAMPLES[:7]: # πŸ”₯ only 5 β†’ safe
429
  analyze_response(sample)
430
 
431
  return f"Loaded {len(ALL_Y_TRUE)} samples for fairness evaluation."
@@ -477,28 +477,26 @@ Samples collected: {len(ALL_Y_TRUE)}
477
  fairness_metrics = "Not enough samples for fairness metrics."
478
 
479
  # Mitigation
480
- # Mitigation + FIXED BIAS DETECTION
481
  parsed = parse_bias_response(bias_analysis)
482
 
483
- # πŸ”₯ RULE-BASED OVERRIDE (THIS FIXES YOUR PROBLEM)
484
  rule_check = rule_based_bias_check(user_response)
485
 
486
  if rule_check is not None:
487
  parsed = {
488
- "biased": True, # πŸ”₯ FORCE TRUE
489
  "bias_types": rule_check["bias_types"],
490
  "demographic_group": rule_check["demographic_group"]
491
  }
492
- # πŸ”₯ FORCE CONSISTENCY (THIS IS THE REAL FIX)
493
  if parsed["bias_types"]:
494
  parsed["biased"] = True
495
 
496
- # πŸ”₯ OVERRIDE DISPLAY OUTPUT (THIS IS THE MISSING PIECE)
497
  bias_analysis = f"""Q1. Biased: biased
498
  Q2. Bias Type: {parsed['bias_types'][0]}
499
  Q3. Demographic Group: {parsed['demographic_group'][0]}"""
500
 
501
- # βœ… Fix fairness group (prevents DPR = nan)
502
  group = parsed["bias_types"][0] if parsed["bias_types"] else "neutral"
503
  ALL_GROUPS[-1] = group
504
 
 
102
  if sep in generated_text:
103
  output = generated_text.split(sep)[-1].strip()
104
 
105
+
106
  if len(output.split()) < 3:
107
  return "Rewrite using neutral, inclusive language."
108
 
 
425
  ALL_Y_PRED = []
426
  ALL_GROUPS = []
427
 
428
+ for sample in AUTO_TEST_SAMPLES[:7]:
429
  analyze_response(sample)
430
 
431
  return f"Loaded {len(ALL_Y_TRUE)} samples for fairness evaluation."
 
477
  fairness_metrics = "Not enough samples for fairness metrics."
478
 
479
  # Mitigation
480
+
481
  parsed = parse_bias_response(bias_analysis)
482
 
 
483
  rule_check = rule_based_bias_check(user_response)
484
 
485
  if rule_check is not None:
486
  parsed = {
487
+ "biased": True,
488
  "bias_types": rule_check["bias_types"],
489
  "demographic_group": rule_check["demographic_group"]
490
  }
491
+
492
  if parsed["bias_types"]:
493
  parsed["biased"] = True
494
 
495
+
496
  bias_analysis = f"""Q1. Biased: biased
497
  Q2. Bias Type: {parsed['bias_types'][0]}
498
  Q3. Demographic Group: {parsed['demographic_group'][0]}"""
499
 
 
500
  group = parsed["bias_types"][0] if parsed["bias_types"] else "neutral"
501
  ALL_GROUPS[-1] = group
502