sheltonmaharesh commited on
Commit
9f83410
·
verified ·
1 Parent(s): c6cb93b

Deploy backend Flask app

Browse files
Files changed (1) hide show
  1. bot_detector_api.py +32 -39
bot_detector_api.py CHANGED
@@ -13,15 +13,16 @@ app = Flask("Bot detector")
13
  def home():
14
  return "✅ Welcome to the Bot Prediction API!"
15
 
16
- # Load model artifacts
17
  model = joblib.load("model.joblib")
18
  encoders = joblib.load("encoders.joblib")
19
  scaler = joblib.load("scaler.joblib")
20
  if_model = joblib.load("best_if_model.joblib")
21
  svm_model = joblib.load("best_svm_model.joblib")
22
  feature_names = joblib.load("feature_names.joblib")
 
 
23
 
24
- # SHAP explainer
25
  explainer = shap.TreeExplainer(model)
26
 
27
  def parse_url_params(url):
@@ -38,6 +39,7 @@ def prepare_features(row_dict):
38
  'device': row_dict.get('device', 'unknown'),
39
  'd': row_dict.get('d', '')
40
  }
 
41
  query_params = parse_url_params(base['d'])
42
  combined = {**base, **query_params}
43
  combined.pop('d', None)
@@ -58,34 +60,32 @@ def prepare_features(row_dict):
58
  df[col] = pd.to_numeric(df[col], errors='coerce').fillna(0)
59
 
60
  df_scaled = scaler.transform(df)
61
- iso_score = if_model.decision_function(df_scaled)
62
- svm_score = svm_model.decision_function(df_scaled)
63
 
64
- # Compute anomaly scores directly
65
- iso_score = if_model.decision_function(df_scaled)[0]
66
- svm_score = svm_model.decision_function(df_scaled)[0]
 
 
67
 
68
- # Invert to get anomaly-like probability (higher = more anomalous)
69
- df['iso_anomaly_prob'] = round(float(-iso_score), 4)
70
- df['svm_anomaly_prob'] = round(float(-svm_score), 4)
71
 
72
  return df[feature_names]
73
 
74
  def generate_shap_explanation(index, shap_values, X, encoders=None, class_index=1, top_n=10):
75
- # Extract SHAP values
76
- if isinstance(shap_values, list):
77
  shap_vals = shap_values[class_index][index]
78
- base_val = float(explainer.expected_value[class_index])
79
  else:
80
  shap_vals = shap_values[index]
81
- base_val = float(explainer.expected_value if np.isscalar(explainer.expected_value) else explainer.expected_value[0])
82
-
83
- # Convert SHAP values to float scalars
84
- shap_scalar_vals = [float(s[0]) if isinstance(s, np.ndarray) else float(s) for s in shap_vals]
85
 
86
  x_vals = X.iloc[index]
87
  feature_names = X.columns
88
 
 
 
 
89
  decoded_vals = {}
90
  for col in feature_names:
91
  val = x_vals[col]
@@ -97,34 +97,28 @@ def generate_shap_explanation(index, shap_values, X, encoders=None, class_index=
97
  except:
98
  decoded_vals[col] = val
99
 
100
- # Top contributing features
101
  feature_contribs = list(zip(feature_names, decoded_vals.values(), shap_scalar_vals))
102
  feature_contribs = sorted(feature_contribs, key=lambda x: abs(x[2]), reverse=True)[:top_n]
103
 
104
- positive_impacts = []
105
- negative_impacts = []
106
-
107
  for fname, fval, sval in feature_contribs:
108
- line = f" - {fname:20} = {str(fval):<20} contributed {abs(sval):.4f}"
109
- if sval > 0:
110
- positive_impacts.append(line)
111
- elif sval < 0:
112
- negative_impacts.append(line)
113
 
114
- final_log_odds = float(base_val + np.sum(shap_scalar_vals))
115
 
116
- # Explanation text
117
- explanation = f"\n==== SHAP Explanation for Bot Attack Classification ====\n"
118
  explanation += f"Base value (log-odds for class 1) : {base_val:.4f}\n"
119
  explanation += f"Predicted log-odds (class 1) : {final_log_odds:.4f}\n\n"
120
 
121
- if positive_impacts:
122
- explanation += "🔺 Factors that INCREASED Bot Likelihood:\n" + "\n".join(positive_impacts) + "\n\n"
123
- if negative_impacts:
124
- explanation += "🔻 Factors that DECREASED Bot Likelihood:\n" + "\n".join(negative_impacts) + "\n\n"
125
 
126
  explanation += "📝 These features collectively explain the model's decision.\n"
127
- return base_val, final_log_odds, explanation
128
 
129
  @app.post('/v1/predict')
130
  def predict():
@@ -133,16 +127,15 @@ def predict():
133
  X = prepare_features(row)
134
 
135
  probs = model.predict_proba(X)[0]
136
- class_index = list(model.classes_).index(1)
137
- pred_label = model.predict(X)[0]
138
 
139
  shap_values = explainer.shap_values(X)
140
- base_val, final_log_odds, explanation = generate_shap_explanation(0, shap_values, X, encoders, class_index)
141
 
142
  return jsonify({
143
  "Prediction": "Bot Attack" if pred_label == 1 else "Legitimate",
144
- "SHAP Base Value": round(base_val, 4),
145
- "SHAP Predicted Value": round(final_log_odds, 4),
146
  "SHAP Explanation": explanation
147
  })
148
 
 
13
  def home():
14
  return "✅ Welcome to the Bot Prediction API!"
15
 
16
+ # Load all model artifacts
17
  model = joblib.load("model.joblib")
18
  encoders = joblib.load("encoders.joblib")
19
  scaler = joblib.load("scaler.joblib")
20
  if_model = joblib.load("best_if_model.joblib")
21
  svm_model = joblib.load("best_svm_model.joblib")
22
  feature_names = joblib.load("feature_names.joblib")
23
+ iso_scaler = joblib.load("iso_scaler.joblib")
24
+ svm_scaler = joblib.load("svm_scaler.joblib")
25
 
 
26
  explainer = shap.TreeExplainer(model)
27
 
28
  def parse_url_params(url):
 
39
  'device': row_dict.get('device', 'unknown'),
40
  'd': row_dict.get('d', '')
41
  }
42
+
43
  query_params = parse_url_params(base['d'])
44
  combined = {**base, **query_params}
45
  combined.pop('d', None)
 
60
  df[col] = pd.to_numeric(df[col], errors='coerce').fillna(0)
61
 
62
  df_scaled = scaler.transform(df)
 
 
63
 
64
+ iso_score = if_model.decision_function(df_scaled).reshape(-1, 1)
65
+ iso_prob = 1 - iso_scaler.transform(iso_score)[0][0]
66
+
67
+ svm_score = svm_model.decision_function(df_scaled).reshape(-1, 1)
68
+ svm_prob = 1 - svm_scaler.transform(svm_score)[0][0]
69
 
70
+ df['iso_anomaly_prob'] = iso_prob
71
+ df['svm_anomaly_prob'] = svm_prob
 
72
 
73
  return df[feature_names]
74
 
75
  def generate_shap_explanation(index, shap_values, X, encoders=None, class_index=1, top_n=10):
76
+ if isinstance(shap_values, list): # Tree explainer returns list for multiclass
 
77
  shap_vals = shap_values[class_index][index]
78
+ base_val = explainer.expected_value[class_index]
79
  else:
80
  shap_vals = shap_values[index]
81
+ base_val = explainer.expected_value if np.isscalar(explainer.expected_value) else explainer.expected_value[class_index]
 
 
 
82
 
83
  x_vals = X.iloc[index]
84
  feature_names = X.columns
85
 
86
+ # Flatten scalar values
87
+ shap_scalar_vals = [float(s[0]) if isinstance(s, np.ndarray) else float(s) for s in shap_vals]
88
+
89
  decoded_vals = {}
90
  for col in feature_names:
91
  val = x_vals[col]
 
97
  except:
98
  decoded_vals[col] = val
99
 
 
100
  feature_contribs = list(zip(feature_names, decoded_vals.values(), shap_scalar_vals))
101
  feature_contribs = sorted(feature_contribs, key=lambda x: abs(x[2]), reverse=True)[:top_n]
102
 
103
+ pos, neg = [], []
 
 
104
  for fname, fval, sval in feature_contribs:
105
+ line = f" - {fname:20} = {str(fval):<20} contributed {sval:.4f}"
106
+ if sval > 0: pos.append(line)
107
+ elif sval < 0: neg.append(line)
 
 
108
 
109
+ final_log_odds = base_val + sum(shap_scalar_vals)
110
 
111
+ explanation = f"==== SHAP Explanation for Bot Attack Classification ====\n\n"
 
112
  explanation += f"Base value (log-odds for class 1) : {base_val:.4f}\n"
113
  explanation += f"Predicted log-odds (class 1) : {final_log_odds:.4f}\n\n"
114
 
115
+ if pos:
116
+ explanation += "🔺 Factors that INCREASED Bot Likelihood:\n" + "\n".join(pos) + "\n\n"
117
+ if neg:
118
+ explanation += "🔻 Factors that DECREASED Bot Likelihood:\n" + "\n".join(neg) + "\n\n"
119
 
120
  explanation += "📝 These features collectively explain the model's decision.\n"
121
+ return explanation, base_val, final_log_odds
122
 
123
  @app.post('/v1/predict')
124
  def predict():
 
127
  X = prepare_features(row)
128
 
129
  probs = model.predict_proba(X)[0]
130
+ pred_label = model.classes_[np.argmax(probs)]
 
131
 
132
  shap_values = explainer.shap_values(X)
133
+ explanation, base_val, final_log_odds = generate_shap_explanation(0, shap_values, X, encoders)
134
 
135
  return jsonify({
136
  "Prediction": "Bot Attack" if pred_label == 1 else "Legitimate",
137
+ "SHAP Base Value": round(float(base_val), 4),
138
+ "SHAP Predicted Value": round(float(final_log_odds), 4),
139
  "SHAP Explanation": explanation
140
  })
141