Update modules/machine_failure.py

#7
by pranshh - opened
Files changed (1) hide show
  1. modules/machine_failure.py +108 -273
modules/machine_failure.py CHANGED
@@ -1,39 +1,26 @@
1
- from flask import Blueprint, render_template, request, jsonify, redirect, url_for, flash, session
2
  import pandas as pd
3
  import numpy as np
4
- import plotly.express as px
5
- import plotly.utils
6
- import json
7
- import os
8
- import joblib
9
- from datetime import datetime
10
  from sklearn.model_selection import train_test_split
11
  from sklearn.preprocessing import StandardScaler, LabelEncoder
12
  from sklearn.ensemble import RandomForestClassifier
13
  from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
14
- import random
15
 
16
  machine_failure_bp = Blueprint('machine_failure', __name__, url_prefix='/predict/machine_failure')
17
 
18
- UPLOAD_FOLDER = 'temp_uploads'
19
- os.makedirs(UPLOAD_FOLDER, exist_ok=True)
 
 
 
 
 
 
20
 
21
- def get_current_df():
22
- try:
23
- csv_path = session.get('machine_csv_path')
24
- print(f"Debug - CSV Path from session: {csv_path}")
25
-
26
- if csv_path and os.path.exists(csv_path):
27
- print(f"Debug - File exists at path: {csv_path}")
28
- return pd.read_csv(csv_path)
29
-
30
- print("Debug - No valid CSV path found")
31
- return None
32
- except Exception as e:
33
- print(f"Debug - Error in get_current_df: {str(e)}")
34
- return None
35
 
36
  def get_summary_stats(df):
 
37
  return {
38
  'total_rows': len(df),
39
  'total_columns': len(df.columns),
@@ -44,67 +31,35 @@ def get_summary_stats(df):
44
  }
45
 
46
  def preprocess_data(df, for_prediction=False, label_encoders=None):
47
- """Preprocess the dataframe for machine learning
48
-
49
- Args:
50
- df (pd.DataFrame): The input DataFrame.
51
- for_prediction (bool): True if preprocessing for a single prediction, False for training.
52
- label_encoders (dict): Dictionary of pre-fitted LabelEncoders for single prediction.
53
- """
54
  df_processed = df.copy()
55
-
56
- categorical_columns = []
57
- numerical_columns = []
58
 
59
- # Dynamically determine column types based on the current DataFrame
60
- for column in df_processed.columns:
61
- if column in ['timestamp', 'maintenance_timestamp']:
62
- continue
63
-
64
- # Check if column is numeric or can be converted to numeric (after dropping NaNs for check)
65
- if pd.api.types.is_numeric_dtype(df_processed[column]):
66
- numerical_columns.append(column)
67
- else:
68
- try:
69
- # Attempt to convert to numeric, if successful, it's numeric
70
- if pd.to_numeric(df_processed[column].dropna()).notna().all():
71
- numerical_columns.append(column)
72
- else:
73
- categorical_columns.append(column)
74
- except ValueError:
75
- categorical_columns.append(column)
76
-
77
  # Handle timestamps
78
  for time_col in ['timestamp', 'maintenance_timestamp']:
79
  if time_col in df_processed.columns:
80
- # Convert to datetime, coercing errors
81
  df_processed[time_col] = pd.to_datetime(df_processed[time_col], errors='coerce')
82
-
83
- # Extract features only if the column contains valid datetime values
84
- if not df_processed[time_col].isnull().all():
85
- df_processed[f'{time_col}_hour'] = df_processed[time_col].dt.hour.fillna(0) # Fill NaN with 0 if time part is missing
86
- df_processed[f'{time_col}_day'] = df_processed[time_col].dt.day.fillna(0)
87
- df_processed[f'{time_col}_month'] = df_processed[time_col].dt.month.fillna(0)
88
- else:
89
- df_processed[f'{time_col}_hour'] = 0
90
- df_processed[f'{time_col}_day'] = 0
91
- df_processed[f'{time_col}_month'] = 0
92
-
93
  df_processed = df_processed.drop(columns=[time_col])
94
 
95
  # Encode categorical variables
96
  current_label_encoders = {}
97
- if not for_prediction: # During training, fit and save encoders
 
98
  for col in categorical_columns:
99
  if col in df_processed.columns:
100
  le = LabelEncoder()
101
- df_processed[col] = le.fit_transform(df_processed[col].astype(str))
102
  current_label_encoders[col] = le
103
- else:
104
  for col, le in label_encoders.items():
105
  if col in df_processed.columns:
106
- # Use a lambda function to handle unseen labels: map to -1 or a default if not in classes
107
- df_processed[col] = df_processed[col].astype(str).apply(
108
  lambda x: le.transform([x])[0] if x in le.classes_ else -1
109
  )
110
  return df_processed, current_label_encoders
@@ -112,10 +67,14 @@ def preprocess_data(df, for_prediction=False, label_encoders=None):
112
 
113
  @machine_failure_bp.route('/', methods=['GET'])
114
  def show_machine_failure():
 
115
  return render_template('machine_failure.html', title="Machine Failure Prediction")
116
 
117
- @machine_failure_bp.route('/upload_file', methods=['POST'])
 
118
  def upload_file_machine():
 
 
119
  if 'machine_file' not in request.files:
120
  flash('No file selected')
121
  return redirect(url_for('machine_failure.show_machine_failure'))
@@ -126,78 +85,59 @@ def upload_file_machine():
126
  return redirect(url_for('machine_failure.show_machine_failure'))
127
 
128
  try:
129
- timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
130
- safe_filename = f"machine_data_{timestamp}.csv"
131
- file_path = os.path.join(UPLOAD_FOLDER, safe_filename)
132
-
133
- file.save(file_path)
134
- session['machine_csv_path'] = file_path
135
- print(f"Debug - Saved file to: {file_path}")
136
-
137
- df = pd.read_csv(file_path)
138
- preview_data = df.head().to_dict('records')
139
- summary_stats = get_summary_stats(df)
140
-
141
- session['original_columns'] = df.columns.tolist()
142
 
143
  return render_template('machine_failure.html',
144
  title="Machine Failure Prediction",
145
  preview_data=preview_data,
146
- columns=df.columns.tolist(),
147
  summary_stats=summary_stats)
148
-
149
  except Exception as e:
150
- print(f"Debug - Upload error: {str(e)}")
151
  flash(f'Error processing file: {str(e)}')
152
  return redirect(url_for('machine_failure.show_machine_failure'))
153
-
 
154
  @machine_failure_bp.route('/run_prediction', methods=['POST'])
155
  def run_prediction():
156
- try:
157
- df = get_current_df()
158
- if df is None:
159
- return jsonify({'success': False, 'error': 'No data available. Please upload a CSV file first.'})
160
 
161
- target_col = request.form.get('target_col')
162
- if not target_col:
163
- return jsonify({'success': False, 'error': 'Target column not selected.'})
164
 
165
- # Preprocess the data for training
166
- df_processed, label_encoders = preprocess_data(df.copy(), for_prediction=False)
167
-
168
- encoders_path = os.path.join(UPLOAD_FOLDER, f'encoders_{datetime.now().strftime("%Y%m%d_%H%M%S")}.joblib')
169
- joblib.dump(label_encoders, encoders_path)
170
- session['encoders_path'] = encoders_path
171
 
172
- # Prepare features and target
173
  if target_col not in df_processed.columns:
174
- return jsonify({'success': False, 'error': f"Target column '{target_col}' not found after preprocessing. Check if it was dropped or transformed incorrectly."})
175
 
176
  X = df_processed.drop(columns=[target_col])
177
  y = df_processed[target_col]
 
178
 
179
  X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
180
- scaler = StandardScaler()
181
- X_train_scaled = scaler.fit_transform(X_train)
182
- X_test_scaled = scaler.transform(X_test)
183
-
184
- clf = RandomForestClassifier(random_state=42)
185
- clf.fit(X_train_scaled, y_train)
186
- y_pred = clf.predict(X_test_scaled)
187
-
188
- importances = clf.feature_importances_
189
- feature_names = X.columns
190
- feature_importance = sorted(
191
- zip(feature_names, importances),
192
- key=lambda x: x[1],
193
- reverse=True
194
- )[:5]
195
 
 
 
 
 
 
 
196
  top_features = [{'feature': f, 'importance': float(imp)} for f, imp in feature_importance]
197
-
198
- session['feature_names'] = X.columns.tolist()
199
- session['target_column_name'] = target_col
200
-
201
  metrics = {
202
  'Accuracy': accuracy_score(y_test, y_pred),
203
  'Precision': precision_score(y_test, y_pred, average='weighted', zero_division=0),
@@ -205,172 +145,67 @@ def run_prediction():
205
  'F1 Score': f1_score(y_test, y_pred, average='weighted', zero_division=0)
206
  }
207
 
208
- model_path = os.path.join(UPLOAD_FOLDER, f'model_{datetime.now().strftime("%Y%m%d_%H%M%S")}.joblib')
209
- scaler_path = os.path.join(UPLOAD_FOLDER, f'scaler_{datetime.now().strftime("%Y%m%d_%H%M%S")}.joblib')
210
-
211
- joblib.dump(clf, model_path)
212
- joblib.dump(scaler, scaler_path)
213
-
214
- session['model_path'] = model_path
215
- session['scaler_path'] = scaler_path
216
-
217
- return jsonify({
218
- 'success': True,
219
- 'metrics': metrics,
220
- 'top_features': top_features,
221
- })
222
-
223
  except Exception as e:
224
- print(f"Error in run_prediction: {e}")
225
- return jsonify({'success': False, 'error': str(e)})
226
 
227
  @machine_failure_bp.route('/get_form_data', methods=['GET'])
228
  def get_form_data():
229
- try:
230
- df = get_current_df()
231
- if df is None:
232
- return jsonify({'success': False, 'error': 'No data available. Please upload a file first.'})
233
-
234
- target_col = session.get('target_column_name')
235
- if not target_col:
236
- return jsonify({'success': False, 'error': 'Target column not found in session. Please run prediction first.'})
237
-
238
- exclude_cols = ['error_severity', 'downtime_minutes', 'failure_type', target_col]
239
-
240
- form_fields = []
241
- for col in df.columns:
242
- if col.lower() in [ec.lower() for ec in exclude_cols]:
243
- continue
244
-
245
- default_value = None
246
- if not df[col].dropna().empty:
247
- if pd.api.types.is_numeric_dtype(df[col]):
248
- min_val = df[col].min()
249
- max_val = df[col].max()
250
- if pd.isna(min_val) or pd.isna(max_val):
251
- default_value = 0.0
252
- else:
253
- default_value = round(random.uniform(float(min_val), float(max_val)), 2)
254
- elif col in ['timestamp', 'maintenance_timestamp']:
255
- sample_date = random.choice(df[col].dropna().tolist())
256
- try:
257
- parsed_date = pd.to_datetime(sample_date)
258
- if pd.isna(parsed_date):
259
- default_value = "YYYY-MM-DD HH:MM:SS" # Fallback for invalid dates
260
- else:
261
- default_value = parsed_date.strftime('%Y-%m-%d %H:%M:%S')
262
- except Exception:
263
- default_value = "YYYY-MM-DD HH:MM:SS"
264
- else:
265
- unique_vals_str = [str(x) for x in df[col].dropna().unique()]
266
- if unique_vals_str:
267
- default_value = random.choice(unique_vals_str)
268
- else:
269
- default_value = ""
270
-
271
- if pd.api.types.is_numeric_dtype(df[col]):
272
- form_fields.append({
273
- 'name': col,
274
- 'type': 'number',
275
- 'default_value': default_value
276
- })
277
- elif col in ['timestamp', 'maintenance_timestamp']:
278
- form_fields.append({
279
- 'name': col,
280
- 'type': 'text',
281
- 'placeholder': 'YYYY-MM-DD HH:MM:SS (optional)',
282
- 'default_value': default_value
283
- })
284
- else:
285
- unique_values = [str(x) for x in df[col].dropna().unique().tolist()]
286
- form_fields.append({
287
- 'name': col,
288
- 'type': 'select',
289
- 'options': unique_values,
290
- 'default_value': default_value
291
- })
292
-
293
- return jsonify({'success': True, 'form_fields': form_fields})
294
-
295
- except Exception as e:
296
- print(f"Error in get_form_data: {e}")
297
- return jsonify({'success': False, 'error': str(e)})
298
 
299
 
300
  @machine_failure_bp.route('/predict_single', methods=['POST'])
301
  def predict_single():
 
 
 
 
302
  try:
303
- model_path = session.get('model_path')
304
- scaler_path = session.get('scaler_path')
305
- encoders_path = session.get('encoders_path')
306
- feature_names = session.get('feature_names')
307
- target_col = session.get('target_column_name')
308
-
309
- if not all([model_path, scaler_path, encoders_path, feature_names, target_col]):
310
- return jsonify({'success': False, 'error': 'Model or preprocessing artifacts not found. Please train a model first.'})
311
-
312
- model = joblib.load(model_path)
313
- scaler = joblib.load(scaler_path)
314
- label_encoders = joblib.load(encoders_path)
315
-
316
  input_data = request.json
317
- if not input_data:
318
- return jsonify({'success': False, 'error': 'No input data provided.'})
319
 
320
- original_uploaded_columns = session.get('original_columns')
321
 
322
- if not original_uploaded_columns:
323
- return jsonify({'success': False, 'error': 'Original dataset column names not found in session. Please upload a file.'})
 
324
 
325
- full_input_df = pd.DataFrame(columns=original_uploaded_columns)
326
 
327
- single_row_input_df = pd.DataFrame([input_data])
 
328
 
329
- for col in original_uploaded_columns:
330
- if col in single_row_input_df.columns:
331
- full_input_df.loc[0, col] = single_row_input_df.loc[0, col]
332
- else:
333
- full_input_df.loc[0, col] = np.nan
334
-
335
 
336
- preprocessed_input_df, _ = preprocess_data(full_input_df.copy(), for_prediction=True, label_encoders=label_encoders)
337
-
338
- final_input_features = pd.DataFrame(columns=feature_names)
339
-
340
- for col in feature_names:
341
- if col in preprocessed_input_df.columns:
342
- final_input_features[col] = pd.to_numeric(preprocessed_input_df[col], errors='coerce').values
343
- else:
344
- final_input_features[col] = 0.0
345
-
346
- final_input_features = final_input_features.fillna(0.0)
347
-
348
- input_scaled = scaler.transform(final_input_features)
349
-
350
- prediction_value = model.predict(input_scaled)[0]
351
-
352
- prediction_display = prediction_value
353
- if target_col in label_encoders:
354
- if isinstance(prediction_value, (int, np.integer)) and prediction_value < len(label_encoders[target_col].classes_):
355
- prediction_display = str(label_encoders[target_col].inverse_transform([prediction_value])[0])
356
- else:
357
- prediction_display = str(prediction_value) + " (Unknown Class)"
358
- else:
359
- if isinstance(prediction_value, np.number):
360
- prediction_display = float(prediction_value)
361
- else:
362
- prediction_display = prediction_value
363
-
364
- probability = None
365
- if hasattr(model, 'predict_proba'):
366
- probability = model.predict_proba(input_scaled)[0].tolist()
367
- probability = [float(p) for p in probability]
368
-
369
- return jsonify({
370
- 'success': True,
371
- 'prediction': prediction_display,
372
- 'probability': probability
373
- })
374
  except Exception as e:
375
- print(f"Error in predict_single: {e}")
376
- return jsonify({'success': False, 'error': str(e)})
 
1
+ from flask import Blueprint, render_template, request, jsonify, redirect, url_for, flash
2
  import pandas as pd
3
  import numpy as np
 
 
 
 
 
 
4
  from sklearn.model_selection import train_test_split
5
  from sklearn.preprocessing import StandardScaler, LabelEncoder
6
  from sklearn.ensemble import RandomForestClassifier
7
  from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
8
+ import random
9
 
10
  machine_failure_bp = Blueprint('machine_failure', __name__, url_prefix='/predict/machine_failure')
11
 
12
+ # --- Global variables to hold data and models (simple logic) ---
13
+ _current_df_machine = None
14
+ _model_machine = None
15
+ _scaler_machine = None
16
+ _encoders_machine = None
17
+ _feature_names_machine = None
18
+ _target_col_machine = None
19
+ _original_cols_machine = None
20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  def get_summary_stats(df):
23
+ """Helper function to get summary statistics from a dataframe."""
24
  return {
25
  'total_rows': len(df),
26
  'total_columns': len(df.columns),
 
31
  }
32
 
33
  def preprocess_data(df, for_prediction=False, label_encoders=None):
34
+ """Helper function to preprocess data for modeling."""
 
 
 
 
 
 
35
  df_processed = df.copy()
 
 
 
36
 
37
+ # Identify categorical columns before any modifications
38
+ categorical_columns = [col for col in df_processed.columns if df_processed[col].dtype == 'object' and col not in ['timestamp', 'maintenance_timestamp']]
39
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  # Handle timestamps
41
  for time_col in ['timestamp', 'maintenance_timestamp']:
42
  if time_col in df_processed.columns:
 
43
  df_processed[time_col] = pd.to_datetime(df_processed[time_col], errors='coerce')
44
+ df_processed[f'{time_col}_hour'] = df_processed[time_col].dt.hour.fillna(0)
45
+ df_processed[f'{time_col}_day'] = df_processed[time_col].dt.day.fillna(0)
46
+ df_processed[f'{time_col}_month'] = df_processed[time_col].dt.month.fillna(0)
 
 
 
 
 
 
 
 
47
  df_processed = df_processed.drop(columns=[time_col])
48
 
49
  # Encode categorical variables
50
  current_label_encoders = {}
51
+ if not for_prediction: # Fit new encoders for training
52
+ current_label_encoders = {}
53
  for col in categorical_columns:
54
  if col in df_processed.columns:
55
  le = LabelEncoder()
56
+ df_processed[col] = le.fit_transform(df_processed[col].astype(str).fillna('missing'))
57
  current_label_encoders[col] = le
58
+ else: # Use existing encoders for prediction
59
  for col, le in label_encoders.items():
60
  if col in df_processed.columns:
61
+ # Handle unseen labels during prediction by mapping them to -1
62
+ df_processed[col] = df_processed[col].astype(str).fillna('missing').apply(
63
  lambda x: le.transform([x])[0] if x in le.classes_ else -1
64
  )
65
  return df_processed, current_label_encoders
 
67
 
68
  @machine_failure_bp.route('/', methods=['GET'])
69
  def show_machine_failure():
70
+ """Renders the main page for the machine failure tool."""
71
  return render_template('machine_failure.html', title="Machine Failure Prediction")
72
 
73
+
74
+ @machine_failure_bp.route('/upload_machine', methods=['POST'])
75
  def upload_file_machine():
76
+ """Handles file upload and displays data preview."""
77
+ global _current_df_machine, _original_cols_machine
78
  if 'machine_file' not in request.files:
79
  flash('No file selected')
80
  return redirect(url_for('machine_failure.show_machine_failure'))
 
85
  return redirect(url_for('machine_failure.show_machine_failure'))
86
 
87
  try:
88
+ _current_df_machine = pd.read_csv(file)
89
+ _original_cols_machine = _current_df_machine.columns.tolist() # Save original column order
90
+ preview_data = _current_df_machine.head().to_dict('records')
91
+ summary_stats = get_summary_stats(_current_df_machine)
 
 
 
 
 
 
 
 
 
92
 
93
  return render_template('machine_failure.html',
94
  title="Machine Failure Prediction",
95
  preview_data=preview_data,
96
+ columns=_current_df_machine.columns.tolist(),
97
  summary_stats=summary_stats)
 
98
  except Exception as e:
 
99
  flash(f'Error processing file: {str(e)}')
100
  return redirect(url_for('machine_failure.show_machine_failure'))
101
+
102
+
103
  @machine_failure_bp.route('/run_prediction', methods=['POST'])
104
  def run_prediction():
105
+ """Trains the model and returns performance metrics."""
106
+ global _current_df_machine, _model_machine, _scaler_machine, _encoders_machine, _feature_names_machine, _target_col_machine
107
+ if _current_df_machine is None:
108
+ return jsonify({'success': False, 'error': 'No data available. Please upload a CSV file first.'})
109
 
110
+ target_col = request.form.get('target_col')
111
+ if not target_col:
112
+ return jsonify({'success': False, 'error': 'Target column not selected.'})
113
 
114
+ _target_col_machine = target_col
115
+
116
+ try:
117
+ df_processed, label_encoders = preprocess_data(_current_df_machine.copy())
118
+ _encoders_machine = label_encoders
 
119
 
 
120
  if target_col not in df_processed.columns:
121
+ return jsonify({'success': False, 'error': f"Target column '{target_col}' not found after preprocessing."})
122
 
123
  X = df_processed.drop(columns=[target_col])
124
  y = df_processed[target_col]
125
+ _feature_names_machine = X.columns.tolist()
126
 
127
  X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
128
+
129
+ _scaler_machine = StandardScaler()
130
+ X_train_scaled = _scaler_machine.fit_transform(X_train)
131
+ X_test_scaled = _scaler_machine.transform(X_test)
 
 
 
 
 
 
 
 
 
 
 
132
 
133
+ _model_machine = RandomForestClassifier(random_state=42)
134
+ _model_machine.fit(X_train_scaled, y_train)
135
+ y_pred = _model_machine.predict(X_test_scaled)
136
+
137
+ importances = _model_machine.feature_importances_
138
+ feature_importance = sorted(zip(_feature_names_machine, importances), key=lambda x: x[1], reverse=True)[:5]
139
  top_features = [{'feature': f, 'importance': float(imp)} for f, imp in feature_importance]
140
+
 
 
 
141
  metrics = {
142
  'Accuracy': accuracy_score(y_test, y_pred),
143
  'Precision': precision_score(y_test, y_pred, average='weighted', zero_division=0),
 
145
  'F1 Score': f1_score(y_test, y_pred, average='weighted', zero_division=0)
146
  }
147
 
148
+ return jsonify({'success': True, 'metrics': metrics, 'top_features': top_features})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
  except Exception as e:
150
+ return jsonify({'success': False, 'error': f'An error occurred: {str(e)}'})
151
+
152
 
153
  @machine_failure_bp.route('/get_form_data', methods=['GET'])
154
  def get_form_data():
155
+ """Generates the fields for the single prediction form."""
156
+ if _current_df_machine is None:
157
+ return jsonify({'success': False, 'error': 'No data available. Please upload a file first.'})
158
+ if _target_col_machine is None:
159
+ return jsonify({'success': False, 'error': 'Model not trained yet. Please run a prediction first.'})
160
+
161
+ df = _current_df_machine
162
+ exclude_cols = ['error_severity', 'downtime_minutes', 'failure_type', _target_col_machine]
163
+ form_fields = []
164
+
165
+ for col in df.columns:
166
+ if col.lower() in [ec.lower() for ec in exclude_cols]:
167
+ continue
168
+
169
+ field_info = {'name': col}
170
+ if pd.api.types.is_numeric_dtype(df[col]):
171
+ field_info['type'] = 'number'
172
+ field_info['default_value'] = round(df[col].mean(), 2) if not df[col].empty else 0
173
+ elif col in ['timestamp', 'maintenance_timestamp']:
174
+ field_info['type'] = 'text'
175
+ field_info['placeholder'] = 'YYYY-MM-DD HH:MM:SS'
176
+ field_info['default_value'] = pd.to_datetime(df[col].mode()[0]).strftime('%Y-%m-%d %H:%M:%S') if not df[col].mode().empty else ''
177
+ else:
178
+ field_info['type'] = 'select'
179
+ field_info['options'] = [str(x) for x in df[col].dropna().unique().tolist()]
180
+ field_info['default_value'] = df[col].mode()[0] if not df[col].mode().empty else ''
181
+ form_fields.append(field_info)
182
+
183
+ return jsonify({'success': True, 'form_fields': form_fields})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
 
185
 
186
  @machine_failure_bp.route('/predict_single', methods=['POST'])
187
  def predict_single():
188
+ """Makes a prediction for a single instance of data."""
189
+ if not all([_model_machine, _scaler_machine, _encoders_machine, _feature_names_machine, _original_cols_machine]):
190
+ return jsonify({'success': False, 'error': 'Model or configuration not ready. Please run a prediction first.'})
191
+
192
  try:
 
 
 
 
 
 
 
 
 
 
 
 
 
193
  input_data = request.json
194
+ input_df = pd.DataFrame([input_data], columns=_original_cols_machine) # Ensure correct column order
 
195
 
196
+ preprocessed_df, _ = preprocess_data(input_df.copy(), for_prediction=True, label_encoders=_encoders_machine)
197
 
198
+ # Ensure all feature names are present
199
+ final_features = pd.DataFrame(columns=_feature_names_machine)
200
+ final_features = pd.concat([final_features, preprocessed_df], ignore_index=True).fillna(0)
201
 
202
+ input_scaled = _scaler_machine.transform(final_features[_feature_names_machine])
203
 
204
+ prediction = _model_machine.predict(input_scaled)[0]
205
+ prediction_display = "Failure" if prediction == 1 else "No Failure"
206
 
207
+ probability = _model_machine.predict_proba(input_scaled)[0].tolist()
 
 
 
 
 
208
 
209
+ return jsonify({'success': True, 'prediction': prediction_display, 'probability': probability})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210
  except Exception as e:
211
+ return jsonify({'success': False, 'error': f'An error occurred during prediction: {str(e)}'})