saherPervaiz commited on
Commit
43d6671
·
verified ·
1 Parent(s): d6bf5be

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +76 -190
app.py CHANGED
@@ -14,31 +14,33 @@ import matplotlib.pyplot as plt
14
  import seaborn as sns
15
  from io import BytesIO
16
 
17
- # File uploader
18
  st.title("Model Training with Metrics and Correlation Heatmap")
 
 
19
  uploaded_file = st.file_uploader("Choose a CSV file", type=["csv"])
20
 
21
  if uploaded_file is not None:
 
22
  df = pd.read_csv(uploaded_file)
23
-
24
- # Show the dataset
25
  st.write("Dataset:")
26
  st.dataframe(df)
27
 
28
  # Convert categorical (str) data to numerical
29
  st.write("Converting Categorical Columns to Numerical Values:")
30
  label_encoder = LabelEncoder()
31
-
32
  for col in df.columns:
33
  if df[col].dtype == 'object' or len(df[col].unique()) <= 10:
34
  st.write(f"Encoding Column: **{col}**")
35
  df[col] = label_encoder.fit_transform(df[col])
36
-
37
  # Display the dataset after conversion
38
  st.write("Dataset After Conversion:")
39
  st.dataframe(df)
40
-
41
- # Handle Null Values (Missing Data)
42
  st.write("Handling Missing (Null) Values:")
43
  fill_method = st.selectbox("Choose how to handle missing values", ["Drop rows", "Fill with mean/median"])
44
  if fill_method == "Drop rows":
@@ -49,201 +51,85 @@ if uploaded_file is not None:
49
  df[col].fillna(df[col].mean(), inplace=True)
50
  else:
51
  df[col].fillna(df[col].mode()[0], inplace=True)
52
-
53
- # Handle Outliers using IQR method
54
- st.write("Handling Outliers:")
55
- def remove_outliers_iqr(dataframe):
56
- Q1 = dataframe.quantile(0.25)
57
- Q3 = dataframe.quantile(0.75)
58
- IQR = Q3 - Q1
59
- return dataframe[~((dataframe < (Q1 - 1.5 * IQR)) | (dataframe > (Q3 + 1.5 * IQR))).any(axis=1)]
60
-
61
- df = remove_outliers_iqr(df)
62
-
63
- # Cap Extreme Values
64
- st.write("Handling Extreme Values (Capping):")
65
- def cap_extreme_values(dataframe):
66
- for col in dataframe.select_dtypes(include=[np.number]).columns:
67
- lower_limit = dataframe[col].quantile(0.05)
68
- upper_limit = dataframe[col].quantile(0.95)
69
- dataframe[col] = np.clip(dataframe[col], lower_limit, upper_limit)
70
- return dataframe
71
-
72
- df = cap_extreme_values(df)
73
-
74
  # Show cleaned dataset
75
  st.write("Cleaned Dataset:")
76
  st.dataframe(df)
77
 
78
- # Add clean data download option
79
- st.subheader("Download Cleaned Dataset")
80
- st.download_button(
81
- label="Download Cleaned Dataset (CSV)",
82
- data=df.to_csv(index=False),
83
- file_name="cleaned_dataset.csv",
84
- mime="text/csv"
85
- )
86
-
87
  # Correlation Heatmap
88
  st.subheader("Correlation Heatmap")
89
  corr = df.corr()
90
  plt.figure(figsize=(10, 8))
91
  sns.heatmap(corr, annot=True, cmap="coolwarm", fmt=".2f", cbar=True)
92
  st.pyplot(plt)
93
-
94
- # Save heatmap as PNG
95
- buf = BytesIO()
96
- plt.savefig(buf, format="png")
97
- buf.seek(0)
98
- st.download_button(
99
- label="Download Correlation Heatmap as PNG",
100
- data=buf,
101
- file_name="correlation_heatmap.png",
102
- mime="image/png"
103
- )
104
-
105
- # Highlight highly correlated pairs
106
- st.subheader("Highly Correlated Features")
107
- high_corr = corr.abs().unstack().sort_values(ascending=False).drop_duplicates()
108
- high_corr = high_corr[high_corr.index.get_level_values(0) != high_corr.index.get_level_values(1)]
109
- high_corr_df = pd.DataFrame(high_corr)
110
- st.write(high_corr_df)
111
 
 
112
  target = st.selectbox("Select Target Variable", df.columns)
113
  features = [col for col in df.columns if col != target]
114
  X = df[features]
115
  y = df[target]
116
-
117
- if y.dtype == 'object' or len(y.unique()) <= 10: # Categorical target (classification)
118
- st.subheader("Classification Model Training")
119
- classifiers = {
120
- 'Logistic Regression': LogisticRegression(max_iter=5000, solver='saga', penalty='l1'),
121
- 'Decision Tree': DecisionTreeClassifier(),
122
- 'Random Forest': RandomForestClassifier(),
123
- 'Support Vector Machine (SVM)': SVC(),
124
- 'K-Nearest Neighbors (k-NN)': KNeighborsClassifier(),
125
- 'Naive Bayes': GaussianNB()
126
- }
127
-
128
- metrics = []
129
- train_size = st.slider("Select Training Size", min_value=0.1, max_value=0.9, value=0.8)
130
- X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1-train_size, random_state=42)
131
-
132
- for name, classifier in classifiers.items():
133
- classifier.fit(X_train, y_train)
134
- y_pred = classifier.predict(X_test)
135
- metrics.append({
136
- 'Model': name,
137
- 'Accuracy': round(accuracy_score(y_test, y_pred), 2),
138
- 'Precision': round(precision_score(y_test, y_pred, zero_division=1, average='macro'), 2),
139
- 'Recall': round(recall_score(y_test, y_pred, zero_division=1, average='macro'), 2),
140
- 'F1-Score': round(f1_score(y_test, y_pred, zero_division=1, average='macro'), 2)
141
- })
142
-
143
- metrics_df = pd.DataFrame(metrics)
144
- st.subheader("Classification Model Performance Metrics")
145
- st.dataframe(metrics_df)
146
-
147
- # Save metrics as PNG (table form)
148
- fig, ax = plt.subplots(figsize=(8, 4))
149
- ax.axis('tight')
150
- ax.axis('off')
151
- table = plt.table(cellText=metrics_df.values, colLabels=metrics_df.columns, cellLoc='center', loc='center')
152
- table.auto_set_font_size(False)
153
- table.set_fontsize(10)
154
- table.auto_set_column_width(col=list(range(len(metrics_df.columns))))
155
- buf = BytesIO()
156
- fig.savefig(buf, format="png")
157
- buf.seek(0)
158
- st.download_button(
159
- label="Download Classification Metrics Table as PNG",
160
- data=buf,
161
- file_name="classification_metrics_table.png",
162
- mime="image/png"
163
- )
164
-
165
- # Visualization (Bar Graphs for Classification)
166
- st.subheader("Classification Model Performance Metrics Graph")
167
- metrics_df.set_index('Model', inplace=True)
168
- ax = metrics_df.plot(kind='bar', figsize=(10, 6), colormap='coolwarm', rot=45)
169
- plt.title("Classification Models - Performance Metrics")
170
- plt.ylabel("Scores")
171
- plt.xlabel("Models")
172
- st.pyplot(plt)
173
-
174
- # Download button for the bar graph
175
- buf = BytesIO()
176
- ax.figure.savefig(buf, format="png")
177
- buf.seek(0)
178
- st.download_button(
179
- label="Download Classification Performance Graph as PNG",
180
- data=buf,
181
- file_name="classification_performance_graph.png",
182
- mime="image/png"
183
- )
184
-
185
- else: # Continuous target (regression)
186
- st.subheader("Regression Model Training")
187
- regressors = {
188
- 'Linear Regression': LinearRegression(),
189
- 'Decision Tree Regressor': DecisionTreeRegressor(),
190
- 'Random Forest Regressor': RandomForestRegressor(),
191
- 'Support Vector Regressor (SVR)': SVR(),
192
- 'K-Nearest Neighbors Regressor (k-NN)': KNeighborsRegressor()
193
- }
194
-
195
- regression_metrics = []
196
- train_size = st.slider("Select Training Size", min_value=0.1, max_value=0.9, value=0.8)
197
- X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1-train_size, random_state=42)
198
-
199
- for name, regressor in regressors.items():
200
- regressor.fit(X_train, y_train)
201
- y_pred = regressor.predict(X_test)
202
- regression_metrics.append({
203
- 'Model': name,
204
- 'Mean Squared Error (MSE)': round(mean_squared_error(y_test, y_pred), 2),
205
- 'Mean Absolute Error (MAE)': round(mean_absolute_error(y_test, y_pred), 2),
206
- 'R² Score': round(r2_score(y_test, y_pred), 2)
207
- })
208
-
209
- regression_metrics_df = pd.DataFrame(regression_metrics)
210
- st.subheader("Regression Model Performance Metrics")
211
- st.dataframe(regression_metrics_df)
212
-
213
- # Save metrics as PNG (table form)
214
- fig, ax = plt.subplots(figsize=(8, 4))
215
- ax.axis('tight')
216
- ax.axis('off')
217
- table = plt.table(cellText=regression_metrics_df.values, colLabels=regression_metrics_df.columns, cellLoc='center', loc='center')
218
- table.auto_set_font_size(False)
219
- table.set_fontsize(10)
220
- table.auto_set_column_width(col=list(range(len(regression_metrics_df.columns))))
221
- buf = BytesIO()
222
- fig.savefig(buf, format="png")
223
- buf.seek(0)
224
- st.download_button(
225
- label="Download Regression Metrics Table as PNG",
226
- data=buf,
227
- file_name="regression_metrics_table.png",
228
- mime="image/png"
229
- )
230
-
231
- # Visualization (Bar Graphs for Regression)
232
- st.subheader("Regression Model Performance Metrics Graph")
233
- regression_metrics_df.set_index('Model', inplace=True)
234
- regression_metrics_df.plot(kind='bar', figsize=(10, 6), colormap='coolwarm', rot=45)
235
- plt.title("Regression Models - Performance Metrics")
236
- plt.ylabel("Scores")
237
- plt.xlabel("Models")
238
- st.pyplot(plt)
239
-
240
- # Download button for the bar graph
241
- buf = BytesIO()
242
- plt.savefig(buf, format="png")
243
- buf.seek(0)
244
- st.download_button(
245
- label="Download Regression Performance Graph as PNG",
246
- data=buf,
247
- file_name="regression_performance_graph.png",
248
- mime="image/png"
249
- )
 
14
  import seaborn as sns
15
  from io import BytesIO
16
 
17
+ # Streamlit app title
18
  st.title("Model Training with Metrics and Correlation Heatmap")
19
+
20
+ # File uploader
21
  uploaded_file = st.file_uploader("Choose a CSV file", type=["csv"])
22
 
23
  if uploaded_file is not None:
24
+ # Read the uploaded CSV file
25
  df = pd.read_csv(uploaded_file)
26
+
27
+ # Display the dataset
28
  st.write("Dataset:")
29
  st.dataframe(df)
30
 
31
  # Convert categorical (str) data to numerical
32
  st.write("Converting Categorical Columns to Numerical Values:")
33
  label_encoder = LabelEncoder()
 
34
  for col in df.columns:
35
  if df[col].dtype == 'object' or len(df[col].unique()) <= 10:
36
  st.write(f"Encoding Column: **{col}**")
37
  df[col] = label_encoder.fit_transform(df[col])
38
+
39
  # Display the dataset after conversion
40
  st.write("Dataset After Conversion:")
41
  st.dataframe(df)
42
+
43
+ # Handle missing values
44
  st.write("Handling Missing (Null) Values:")
45
  fill_method = st.selectbox("Choose how to handle missing values", ["Drop rows", "Fill with mean/median"])
46
  if fill_method == "Drop rows":
 
51
  df[col].fillna(df[col].mean(), inplace=True)
52
  else:
53
  df[col].fillna(df[col].mode()[0], inplace=True)
54
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  # Show cleaned dataset
56
  st.write("Cleaned Dataset:")
57
  st.dataframe(df)
58
 
 
 
 
 
 
 
 
 
 
59
  # Correlation Heatmap
60
  st.subheader("Correlation Heatmap")
61
  corr = df.corr()
62
  plt.figure(figsize=(10, 8))
63
  sns.heatmap(corr, annot=True, cmap="coolwarm", fmt=".2f", cbar=True)
64
  st.pyplot(plt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
 
66
+ # Select target variable
67
  target = st.selectbox("Select Target Variable", df.columns)
68
  features = [col for col in df.columns if col != target]
69
  X = df[features]
70
  y = df[target]
71
+
72
+ if len(y.unique()) > 1: # Ensure the target variable has at least two unique classes/values
73
+ if y.dtype == 'object' or len(y.unique()) <= 10: # Classification
74
+ st.subheader("Classification Model Training")
75
+ classifiers = {
76
+ 'Logistic Regression': LogisticRegression(max_iter=5000),
77
+ 'Decision Tree': DecisionTreeClassifier(),
78
+ 'Random Forest': RandomForestClassifier(),
79
+ 'Support Vector Machine (SVM)': SVC(),
80
+ 'K-Nearest Neighbors (k-NN)': KNeighborsClassifier(),
81
+ 'Naive Bayes': GaussianNB()
82
+ }
83
+
84
+ metrics = []
85
+ train_size = st.slider("Select Training Size", min_value=0.1, max_value=0.9, value=0.8)
86
+ X_train, X_test, y_train, y_test = train_test_split(
87
+ X, y, test_size=1-train_size, stratify=y, random_state=42
88
+ )
89
+
90
+ for name, classifier in classifiers.items():
91
+ classifier.fit(X_train, y_train)
92
+ y_pred = classifier.predict(X_test)
93
+ metrics.append({
94
+ 'Model': name,
95
+ 'Accuracy': round(accuracy_score(y_test, y_pred), 2),
96
+ 'Precision': round(precision_score(y_test, y_pred, zero_division=1, average='macro'), 2),
97
+ 'Recall': round(recall_score(y_test, y_pred, zero_division=1, average='macro'), 2),
98
+ 'F1-Score': round(f1_score(y_test, y_pred, zero_division=1, average='macro'), 2)
99
+ })
100
+
101
+ metrics_df = pd.DataFrame(metrics)
102
+ st.subheader("Classification Model Performance Metrics")
103
+ st.dataframe(metrics_df)
104
+
105
+ else: # Regression
106
+ st.subheader("Regression Model Training")
107
+ regressors = {
108
+ 'Linear Regression': LinearRegression(),
109
+ 'Decision Tree Regressor': DecisionTreeRegressor(),
110
+ 'Random Forest Regressor': RandomForestRegressor(),
111
+ 'Support Vector Regressor (SVR)': SVR(),
112
+ 'K-Nearest Neighbors Regressor (k-NN)': KNeighborsRegressor()
113
+ }
114
+
115
+ regression_metrics = []
116
+ train_size = st.slider("Select Training Size", min_value=0.1, max_value=0.9, value=0.8)
117
+ X_train, X_test, y_train, y_test = train_test_split(
118
+ X, y, test_size=1-train_size, random_state=42
119
+ )
120
+
121
+ for name, regressor in regressors.items():
122
+ regressor.fit(X_train, y_train)
123
+ y_pred = regressor.predict(X_test)
124
+ regression_metrics.append({
125
+ 'Model': name,
126
+ 'Mean Squared Error (MSE)': round(mean_squared_error(y_test, y_pred), 2),
127
+ 'Mean Absolute Error (MAE)': round(mean_absolute_error(y_test, y_pred), 2),
128
+ 'R² Score': round(r2_score(y_test, y_pred), 2)
129
+ })
130
+
131
+ regression_metrics_df = pd.DataFrame(regression_metrics)
132
+ st.subheader("Regression Model Performance Metrics")
133
+ st.dataframe(regression_metrics_df)
134
+ else:
135
+ st.error("The target variable must contain at least two unique values for classification or regression. Please check your dataset.")