Mehak-Mazhar commited on
Commit
b911157
·
verified ·
1 Parent(s): a335cea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +79 -118
app.py CHANGED
@@ -1,7 +1,3 @@
1
- # Gradio app: CSV -> Preprocessing -> Logistic Regression with hyperparameter tuning
2
- # Save this file as gradio_logreg_app.py and run: python gradio_logreg_app.py
3
-
4
- import io
5
  import pandas as pd
6
  import numpy as np
7
  import matplotlib.pyplot as plt
@@ -11,9 +7,11 @@ from sklearn.impute import SimpleImputer
11
  from sklearn.compose import ColumnTransformer
12
  from sklearn.pipeline import Pipeline
13
  from sklearn.linear_model import LogisticRegression
14
- from sklearn.metrics import accuracy_score, confusion_matrix, classification_report, roc_auc_score
15
  import gradio as gr
16
 
 
 
17
  def load_csv(file_path):
18
  try:
19
  df = pd.read_csv(file_path)
@@ -24,16 +22,23 @@ def load_csv(file_path):
24
  return None, f"Failed to read file: {e} / {e2}"
25
  return df, None
26
 
 
 
27
  def on_upload(file):
28
  if file is None:
29
- return gr.Dropdown.update(choices=[]), "No file uploaded", None
30
- df, err = load_csv(file.name) # file.name = temp file path
 
31
  if err:
32
- return gr.Dropdown.update(choices=[]), f"Error: {err}", None
 
33
  cols = df.columns.tolist()
34
- return gr.Dropdown.update(choices=cols, value=cols[-1] if len(cols) > 0 else None), f"Loaded {len(df)} rows, {len(cols)} columns", df
 
 
 
35
 
36
- # Helper: build preprocessing + model pipeline
37
  def build_pipeline(df, target_col, impute_strategy, apply_scaling, encode_categorical):
38
  X = df.drop(columns=[target_col])
39
  numeric_cols = X.select_dtypes(include=[np.number]).columns.tolist()
@@ -46,175 +51,131 @@ def build_pipeline(df, target_col, impute_strategy, apply_scaling, encode_catego
46
  num_transformers.append(('imputer', SimpleImputer(strategy=impute_strategy)))
47
  if apply_scaling:
48
  num_transformers.append(('scaler', StandardScaler()))
49
- if num_transformers:
50
- from sklearn.pipeline import make_pipeline
51
- transformers.append(('num', make_pipeline(*[t[1] for t in num_transformers]), numeric_cols))
52
 
53
  if categorical_cols and encode_categorical:
54
- cat_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='most_frequent')),
55
- ('ohe', OneHotEncoder(handle_unknown='ignore', sparse=False))])
 
 
56
  transformers.append(('cat', cat_transformer, categorical_cols))
57
 
58
- if transformers:
59
- preprocessor = ColumnTransformer(transformers=transformers, remainder='passthrough')
60
- else:
61
- preprocessor = 'passthrough'
62
 
63
  pipe = Pipeline(steps=[('preproc', preprocessor), ('clf', LogisticRegression(max_iter=200))])
64
  return pipe
65
 
66
 
67
- # Training function
68
  def train_model(df, target_col, test_size, random_state, impute_strategy, apply_scaling, encode_categorical,
69
  use_grid, c_min, c_max, c_steps, penalties, solver, cv_folds, max_iter, n_jobs):
70
- # Basic checks
71
  if df is None:
72
  return "No data loaded", None, None, None
73
  if target_col not in df.columns:
74
  return f"Target column '{target_col}' not found", None, None, None
75
 
76
- # Drop rows where target is missing
77
- data = df.copy()
78
- data = data.dropna(subset=[target_col])
79
-
80
- # If target is not numeric, try to encode it
81
  y = data[target_col]
82
  if y.dtype == object or y.dtype.name == 'category' or y.dtype == bool:
83
  y = pd.factorize(y)[0]
84
 
85
  X = data.drop(columns=[target_col])
86
 
87
- X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state, stratify=y if len(np.unique(y))>1 else None)
 
 
 
88
 
89
  pipe = build_pipeline(pd.concat([X_train, y_train], axis=1), target_col, impute_strategy, apply_scaling, encode_categorical)
90
  pipe.named_steps['clf'].max_iter = max_iter
91
 
92
  if use_grid:
93
- # build param grid for C and penalty
94
  C_values = np.linspace(c_min, c_max, int(max(1, c_steps)))
95
- param_grid = {}
96
- # penalty and solver interaction needs care
97
- selected_penalties = penalties if len(penalties)>0 else ['l2']
98
- param_grid['clf__C'] = C_values
99
- param_grid['clf__penalty'] = selected_penalties
100
- param_grid['clf__solver'] = [solver]
101
-
102
  gs = GridSearchCV(pipe, param_grid, cv=cv_folds, n_jobs=n_jobs, scoring='accuracy')
103
  gs.fit(X_train, y_train)
104
- best = gs.best_estimator_
105
- best_params = gs.best_params_
106
- model = best
107
- train_pred = model.predict(X_train)
108
- test_pred = model.predict(X_test)
109
- acc = accuracy_score(y_test, test_pred)
110
- report = classification_report(y_test, test_pred)
111
- cm = confusion_matrix(y_test, test_pred)
112
- extra = f"Best params: {best_params}"
113
  else:
114
- # set hyperparams from UI
115
  clf = pipe.named_steps['clf']
116
- try:
117
- clf.set_params(C=float((c_min+c_max)/2), penalty=penalties[0] if penalties else 'l2', solver=solver)
118
- except Exception:
119
- # fallback: set only C
120
- clf.set_params(C=float((c_min+c_max)/2))
121
-
122
  pipe.fit(X_train, y_train)
123
  model = pipe
124
- train_pred = model.predict(X_train)
125
- test_pred = model.predict(X_test)
126
- acc = accuracy_score(y_test, test_pred)
127
- report = classification_report(y_test, test_pred)
128
- cm = confusion_matrix(y_test, test_pred)
129
  extra = "Trained with provided hyperparameters"
130
 
131
- # Plot confusion matrix
132
- fig, ax = plt.subplots(figsize=(4,4))
 
 
 
 
 
133
  ax.imshow(cm, interpolation='nearest')
134
  ax.set_title('Confusion matrix')
135
  ax.set_xlabel('Predicted')
136
  ax.set_ylabel('Actual')
137
  for i in range(cm.shape[0]):
138
  for j in range(cm.shape[1]):
139
- ax.text(j, i, str(cm[i, j]), ha='center', va='center', color='white' if cm[i,j]>cm.max()/2 else 'black')
 
140
  plt.tight_layout()
141
 
142
- return f"Accuracy: {acc:.4f}\n{extra}", fig, report, model
143
 
144
 
145
- # Build Gradio interface
146
- with gr.Blocks(title="CSV -> Logistic Regression (with tuning)") as demo:
147
- gr.Markdown("""
148
- # CSV → Preprocessing → Logistic Regression
149
- 1. Upload a CSV or Excel file.
150
- 2. Select the target (label) column.
151
- 3. Choose preprocessing options and hyperparameters.
152
- 4. Train model and view accuracy, confusion matrix and classification report.
153
- """)
154
 
155
  with gr.Row():
156
- with gr.Column(scale=1):
157
  file_input = gr.File(label="Upload CSV/Excel file", file_types=['.csv', '.xls', '.xlsx'])
158
  load_status = gr.Textbox(label="File status", interactive=False)
159
- target_dropdown = gr.Dropdown(label="Select target column", choices=[], value=None)
160
- preview_button = gr.Button("Preview data")
161
- preview_output = gr.Dataframe(headers=None, interactive=False)
162
 
163
- with gr.Column(scale=1):
164
- gr.Markdown("**Preprocessing**")
165
- impute_radio = gr.Radio(['mean','median','most_frequent','constant','none'], value='mean', label='Numeric imputation (if needed)')
166
  scaler_checkbox = gr.Checkbox(label='Apply Standard Scaling', value=True)
167
  encode_checkbox = gr.Checkbox(label='One-Hot Encode categorical', value=True)
168
-
169
- gr.Markdown("**Train / Test & Randomness**")
170
  test_size = gr.Slider(0.05, 0.5, value=0.2, step=0.05, label='Test size')
171
- random_state = gr.Number(value=42, precision=0, label='Random state (int)')
172
 
173
- gr.Markdown("**Logistic Regression hyperparams**")
174
- use_grid = gr.Checkbox(label='Use GridSearchCV for hyperparameter tuning', value=True)
175
- c_min = gr.Number(value=0.01, label='C (min)')
176
- c_max = gr.Number(value=10.0, label='C (max)')
177
- c_steps = gr.Slider(1, 20, value=5, step=1, label='C steps (grid size)')
178
- penalties = gr.CheckboxGroup(['l1','l2','elasticnet','none'], label='Penalties to try (Grid only / or choose first)', value=['l2'])
179
- solver = gr.Dropdown(['lbfgs','liblinear','saga','sag','newton-cg'], value='lbfgs', label='Solver')
180
- max_iter = gr.Slider(50,1000,value=200,step=10,label='Max iterations')
181
- cv_folds = gr.Slider(2,10,value=5,step=1,label='CV folds for GridSearch')
182
- n_jobs = gr.Slider(1,8,value=1,step=1,label='n_jobs for GridSearch')
183
 
184
- train_btn = gr.Button("Train model")
185
 
186
  with gr.Row():
187
- with gr.Column():
188
- accuracy_text = gr.Textbox(label='Accuracy & notes', interactive=False)
189
- conf_plot = gr.Plot(label='Confusion Matrix')
190
- with gr.Column():
191
- class_report = gr.Textbox(label='Classification report', interactive=False)
192
- model_obj = gr.JSON(label='Trained model (sklearn pipeline as repr)')
193
 
194
- # State to keep dataframe
195
  df_state = gr.State()
196
 
197
- # Wire upload -> get columns
198
- file_input.change(fn=on_upload, inputs=[file_input], outputs=[target_dropdown, load_status, df_state])
199
-
200
- def preview(df):
201
- if df is None:
202
- return pd.DataFrame()
203
- return df.head(20)
204
-
205
- preview_button.click(fn=preview, inputs=[df_state], outputs=[preview_output])
206
-
207
- def do_train(df, target, test_size_val, rand_state, impute_s, scale_flag, encode_flag,
208
- use_grid_flag, cmin, cmax, csteps, penalties_sel, solver_sel, cv_f, max_it, n_jobs_val):
209
- msg, fig, report, model = train_model(df, target, test_size_val, int(rand_state), impute_s, scale_flag, encode_flag,
210
- use_grid_flag, float(cmin), float(cmax), int(csteps), penalties_sel, solver_sel, int(cv_f), int(max_it), int(n_jobs_val))
211
- model_repr = str(model)
212
- return msg, fig, report, model_repr
213
-
214
- train_btn.click(fn=do_train, inputs=[df_state, target_dropdown, test_size, random_state, impute_radio, scaler_checkbox, encode_checkbox,
215
- use_grid, c_min, c_max, c_steps, penalties, solver, cv_folds, max_iter, n_jobs],
216
- outputs=[accuracy_text, conf_plot, class_report, model_obj])
217
 
 
 
 
 
 
 
218
 
219
  if __name__ == '__main__':
220
- demo.launch(server_name='0.0.0.0', share=False)
 
 
 
 
 
1
  import pandas as pd
2
  import numpy as np
3
  import matplotlib.pyplot as plt
 
7
  from sklearn.compose import ColumnTransformer
8
  from sklearn.pipeline import Pipeline
9
  from sklearn.linear_model import LogisticRegression
10
+ from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
11
  import gradio as gr
12
 
13
+
14
+ # Function to load CSV/Excel
15
  def load_csv(file_path):
16
  try:
17
  df = pd.read_csv(file_path)
 
22
  return None, f"Failed to read file: {e} / {e2}"
23
  return df, None
24
 
25
+
26
+ # File upload handler
27
  def on_upload(file):
28
  if file is None:
29
+ return gr.Dropdown.update(choices=[]), "No file uploaded", None, pd.DataFrame()
30
+
31
+ df, err = load_csv(file.name) # use file.name to get path
32
  if err:
33
+ return gr.Dropdown.update(choices=[]), f"Error: {err}", None, pd.DataFrame()
34
+
35
  cols = df.columns.tolist()
36
+ status_msg = f"Loaded {len(df)} rows, {len(cols)} columns"
37
+ preview_df = df.head(20)
38
+ return gr.Dropdown.update(choices=cols, value=cols[-1] if cols else None), status_msg, df, preview_df
39
+
40
 
41
+ # Build preprocessing pipeline
42
  def build_pipeline(df, target_col, impute_strategy, apply_scaling, encode_categorical):
43
  X = df.drop(columns=[target_col])
44
  numeric_cols = X.select_dtypes(include=[np.number]).columns.tolist()
 
51
  num_transformers.append(('imputer', SimpleImputer(strategy=impute_strategy)))
52
  if apply_scaling:
53
  num_transformers.append(('scaler', StandardScaler()))
54
+ from sklearn.pipeline import make_pipeline
55
+ transformers.append(('num', make_pipeline(*[t[1] for t in num_transformers]), numeric_cols))
 
56
 
57
  if categorical_cols and encode_categorical:
58
+ cat_transformer = Pipeline(steps=[
59
+ ('imputer', SimpleImputer(strategy='most_frequent')),
60
+ ('ohe', OneHotEncoder(handle_unknown='ignore', sparse=False))
61
+ ])
62
  transformers.append(('cat', cat_transformer, categorical_cols))
63
 
64
+ preprocessor = ColumnTransformer(transformers=transformers, remainder='passthrough') if transformers else 'passthrough'
 
 
 
65
 
66
  pipe = Pipeline(steps=[('preproc', preprocessor), ('clf', LogisticRegression(max_iter=200))])
67
  return pipe
68
 
69
 
70
+ # Train model
71
  def train_model(df, target_col, test_size, random_state, impute_strategy, apply_scaling, encode_categorical,
72
  use_grid, c_min, c_max, c_steps, penalties, solver, cv_folds, max_iter, n_jobs):
73
+
74
  if df is None:
75
  return "No data loaded", None, None, None
76
  if target_col not in df.columns:
77
  return f"Target column '{target_col}' not found", None, None, None
78
 
79
+ data = df.copy().dropna(subset=[target_col])
 
 
 
 
80
  y = data[target_col]
81
  if y.dtype == object or y.dtype.name == 'category' or y.dtype == bool:
82
  y = pd.factorize(y)[0]
83
 
84
  X = data.drop(columns=[target_col])
85
 
86
+ X_train, X_test, y_train, y_test = train_test_split(
87
+ X, y, test_size=test_size, random_state=random_state,
88
+ stratify=y if len(np.unique(y)) > 1 else None
89
+ )
90
 
91
  pipe = build_pipeline(pd.concat([X_train, y_train], axis=1), target_col, impute_strategy, apply_scaling, encode_categorical)
92
  pipe.named_steps['clf'].max_iter = max_iter
93
 
94
  if use_grid:
 
95
  C_values = np.linspace(c_min, c_max, int(max(1, c_steps)))
96
+ param_grid = {
97
+ 'clf__C': C_values,
98
+ 'clf__penalty': penalties if penalties else ['l2'],
99
+ 'clf__solver': [solver]
100
+ }
 
 
101
  gs = GridSearchCV(pipe, param_grid, cv=cv_folds, n_jobs=n_jobs, scoring='accuracy')
102
  gs.fit(X_train, y_train)
103
+ model = gs.best_estimator_
104
+ extra = f"Best params: {gs.best_params_}"
 
 
 
 
 
 
 
105
  else:
 
106
  clf = pipe.named_steps['clf']
107
+ clf.set_params(C=float((c_min + c_max) / 2), penalty=penalties[0] if penalties else 'l2', solver=solver)
 
 
 
 
 
108
  pipe.fit(X_train, y_train)
109
  model = pipe
 
 
 
 
 
110
  extra = "Trained with provided hyperparameters"
111
 
112
+ test_pred = model.predict(X_test)
113
+ acc = accuracy_score(y_test, test_pred)
114
+ report = classification_report(y_test, test_pred)
115
+ cm = confusion_matrix(y_test, test_pred)
116
+
117
+ # Confusion matrix plot
118
+ fig, ax = plt.subplots(figsize=(4, 4))
119
  ax.imshow(cm, interpolation='nearest')
120
  ax.set_title('Confusion matrix')
121
  ax.set_xlabel('Predicted')
122
  ax.set_ylabel('Actual')
123
  for i in range(cm.shape[0]):
124
  for j in range(cm.shape[1]):
125
+ ax.text(j, i, str(cm[i, j]), ha='center', va='center',
126
+ color='white' if cm[i, j] > cm.max() / 2 else 'black')
127
  plt.tight_layout()
128
 
129
+ return f"Accuracy: {acc:.4f}\n{extra}", fig, report, str(model)
130
 
131
 
132
+ # Gradio Interface
133
+ with gr.Blocks(title="CSV -> Logistic Regression") as demo:
134
+ gr.Markdown("## CSV → Logistic Regression with Hyperparameter Tuning")
 
 
 
 
 
 
135
 
136
  with gr.Row():
137
+ with gr.Column():
138
  file_input = gr.File(label="Upload CSV/Excel file", file_types=['.csv', '.xls', '.xlsx'])
139
  load_status = gr.Textbox(label="File status", interactive=False)
140
+ target_dropdown = gr.Dropdown(label="Select target column", choices=[])
141
+ preview_output = gr.Dataframe(label="Data Preview", interactive=False)
 
142
 
143
+ with gr.Row():
144
+ with gr.Column():
145
+ impute_radio = gr.Radio(['mean', 'median', 'most_frequent', 'none'], value='mean', label='Numeric imputation')
146
  scaler_checkbox = gr.Checkbox(label='Apply Standard Scaling', value=True)
147
  encode_checkbox = gr.Checkbox(label='One-Hot Encode categorical', value=True)
 
 
148
  test_size = gr.Slider(0.05, 0.5, value=0.2, step=0.05, label='Test size')
149
+ random_state = gr.Number(value=42, precision=0, label='Random state')
150
 
151
+ use_grid = gr.Checkbox(label='Use GridSearchCV', value=True)
152
+ c_min = gr.Number(value=0.01, label='C min')
153
+ c_max = gr.Number(value=10.0, label='C max')
154
+ c_steps = gr.Slider(1, 20, value=5, step=1, label='C steps')
155
+ penalties = gr.CheckboxGroup(['l1', 'l2', 'elasticnet', 'none'], value=['l2'], label='Penalties')
156
+ solver = gr.Dropdown(['lbfgs', 'liblinear', 'saga'], value='lbfgs', label='Solver')
157
+ max_iter = gr.Slider(50, 1000, value=200, step=10, label='Max iterations')
158
+ cv_folds = gr.Slider(2, 10, value=5, step=1, label='CV folds')
159
+ n_jobs = gr.Slider(1, 8, value=1, step=1, label='n_jobs')
 
160
 
161
+ train_btn = gr.Button("Train Model")
162
 
163
  with gr.Row():
164
+ accuracy_text = gr.Textbox(label='Accuracy & Notes', interactive=False)
165
+ conf_plot = gr.Plot(label='Confusion Matrix')
166
+ class_report = gr.Textbox(label='Classification Report', interactive=False)
167
+ model_obj = gr.Textbox(label='Model', interactive=False)
 
 
168
 
 
169
  df_state = gr.State()
170
 
171
+ file_input.change(fn=on_upload, inputs=file_input, outputs=[target_dropdown, load_status, df_state, preview_output])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172
 
173
+ train_btn.click(
174
+ fn=train_model,
175
+ inputs=[df_state, target_dropdown, test_size, random_state, impute_radio, scaler_checkbox, encode_checkbox,
176
+ use_grid, c_min, c_max, c_steps, penalties, solver, cv_folds, max_iter, n_jobs],
177
+ outputs=[accuracy_text, conf_plot, class_report, model_obj]
178
+ )
179
 
180
  if __name__ == '__main__':
181
+ demo.launch()