NNEngine commited on
Commit
136b539
·
1 Parent(s): 7b04a6d

Initial commit

Browse files
__pycache__/helper_functions.cpython-310.pyc ADDED
Binary file (12.8 kB). View file
 
__pycache__/plotly.cpython-310.pyc ADDED
Binary file (9.66 kB). View file
 
__pycache__/report_generation.cpython-310.pyc ADDED
Binary file (1.21 kB). View file
 
app.py ADDED
@@ -0,0 +1,257 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #==============================================================
2
+ # Deendencies
3
+ #===============================================================
4
+
5
+ import gradio as gr
6
+ import pandas as pd
7
+ from sklearn.preprocessing import LabelEncoder, OrdinalEncoder
8
+ import io
9
+ import numpy as np
10
+ import tempfile
11
+ import os
12
+
13
+
14
+ #==================================================================
15
+ # Other Dependencies
16
+ #==================================================================
17
+
18
+ from helper_functions import file_summary, load_csv
19
+ from helper_functions import check_duplicate_columns, remove_duplicate_columns, check_duplicate_rows, remove_duplicate_rows, check_missing_columns, drop_high_missing, delete_column
20
+ from helper_functions import get_missing_columns, detect_column_type, apply_missing_value
21
+ from helper_functions import show_value_counts, encode_column
22
+ from helper_functions import normalize_column_names, rename_single_column
23
+ from helper_functions import get_numeric_columns, show_current_dtype, change_column_dtype
24
+ from helper_functions import get_continuous_columns, show_column_stats, handle_outliers
25
+ from helper_functions import make_csv_download
26
+
27
+ from report_generation import generate_profile_report
28
+
29
+
30
+
31
+ # ===========================================================
32
+ # Gradio Layout
33
+ # ===========================================================
34
+
35
+ with gr.Blocks(theme="soft") as demo:
36
+ gr.Markdown("# <div align = 'center'> **Clean Data Dashboard** </div>")
37
+ gr.Markdown("<div align = 'center'>In every machine learning workflow, data cleaning is one of the most time-consuming and repetitive tasks. yet, as ML engineers, our true focus should be on building models, crafting architectures, and solving real problems - not spending endless hours handling missing values, formatting inconsistencies and unwanted noise in CSV files.</div>")
38
+ gr.Markdown("<div align = 'center'> That's exactly why I build this CSV Data Cleaning App. This tool helps you clean your data in few steps. All you need to do is to click on the button the operation you want to apply on the file. After applying all the operations, you can download the final cleaned CSV File.</div>")
39
+ gr.Markdown("---")
40
+
41
+ with gr.Row():
42
+ with gr.Column(scale=1, min_width=400):
43
+ gr.HTML("<div style='max-height: 90vh; overflow-y: auto; padding-right: 10px;'>")
44
+
45
+ gr.Markdown("# ⚙️ Tools Panel")
46
+
47
+ file_input = gr.File(label="Choose CSV", file_types=[".csv"])
48
+ load_btn = gr.Button("📂 Load CSV")
49
+ status_box = gr.Textbox(label="Status", interactive=False)
50
+ gr.Markdown("---")
51
+
52
+ delete_col = gr.Dropdown(label="Select Column to Delete")
53
+ gr.Markdown("Delete Columns which you don't need!")
54
+ delete_btn = gr.Button("🗑️ Delete Column")
55
+ delete_status = gr.Textbox(label="Delete Status", interactive=True)
56
+ gr.Markdown("---")
57
+
58
+ dup_col_status = gr.Textbox(label="Duplicate Columns", interactive=False)
59
+ dup_col_check = gr.Button("🔍 Check Duplicate Columns")
60
+ dup_col_btn = gr.Button("🧬 Remove Duplicate Columns")
61
+ gr.Markdown("---")
62
+
63
+ dup_row_status = gr.Textbox(label="Duplicate Rows", interactive=False)
64
+ dup_row_check = gr.Button("🔍 Check Duplicate Rows")
65
+ dup_row_btn = gr.Button("📄 Remove Duplicate Rows")
66
+ gr.Markdown("---")
67
+
68
+ missing_status = gr.Textbox(label="Missing Columns Check", interactive=False)
69
+ check_missing_btn = gr.Button("🔍 Check Columns with Missing Values")
70
+ drop_high_missing_btn = gr.Button("🧮 Drop Columns with >50% Missing Values")
71
+ gr.Markdown("---")
72
+
73
+ gr.Markdown("### 🧩 Handle Missing Values")
74
+ missing_col = gr.Dropdown(label="Select Column with Missing Values")
75
+ detect_type_box = gr.Textbox(label="Column Type", interactive=False)
76
+ fill_method = gr.Dropdown(label="Select Fill Method", choices=[])
77
+ apply_fill_btn = gr.Button("✨ Apply Fill Method")
78
+ fill_status = gr.Textbox(label="Fill Operation Status", interactive=False)
79
+ gr.Markdown("---")
80
+
81
+ gr.Markdown("### 🔤 Encoding Section")
82
+ encode_col = gr.Dropdown(label="Select Column to Encode")
83
+ encode_method = gr.Radio(["Label Encoding", "Ordinal Encoding"], label="Encoding Type", value="Label Encoding")
84
+ value_counts_box = gr.Textbox(label="Value Counts (for Ordinal Encoding)", interactive=False, lines=8)
85
+ encode_order = gr.Textbox(label="If Ordinal, Enter Order (comma-separated)")
86
+ encode_status = gr.Textbox(label="Encoding Status", interactive=False)
87
+ encode_btn = gr.Button("⚙️ Apply Encoding")
88
+ gr.Markdown("---")
89
+
90
+ gr.Markdown("### 🏷️ Column Name Normalization & Renaming")
91
+ normalize_btn = gr.Button("🔡 Normalize Column Names")
92
+ normalize_status = gr.Textbox(label="Normalization Status", interactive=False)
93
+ rename_col = gr.Dropdown(label="Select Column to Rename")
94
+ new_col_name = gr.Textbox(label="Enter New Column Name")
95
+ rename_btn = gr.Button("✏️ Rename Column")
96
+ rename_status = gr.Textbox(label="Rename Status", interactive=False)
97
+
98
+ gr.Markdown("---")
99
+ gr.Markdown("### 🔢 Change Data Type of Columns")
100
+ numeric_detect_btn = gr.Button("🔍 Detect Numeric Columns")
101
+ numeric_detect_status = gr.Textbox(label="Numeric Column Detection", interactive=False)
102
+ dtype_col = gr.Dropdown(label="Select Numeric Column")
103
+ current_dtype_box = gr.Textbox(label="Current Data Type", interactive=False)
104
+
105
+ # Target dtype selection
106
+ dtype_choices = [
107
+ "int8", "int16", "int32", "int64",
108
+ "float16", "float32", "float64",
109
+ "complex64", "complex128"
110
+ ]
111
+ new_dtype = gr.Dropdown(label="Select New Data Type", choices=dtype_choices)
112
+ convert_dtype_btn = gr.Button("🔁 Convert Data Type")
113
+ convert_dtype_status = gr.Textbox(label="Data Type Conversion Status", interactive=False)
114
+ gr.Markdown("---")
115
+
116
+ gr.Markdown("### 🚨 Outlier Detection & Handling")
117
+ detect_cont_col_btn = gr.Button("🔍 Detect Continuous Columns")
118
+ cont_col_status = gr.Textbox(label="Continuous Columns Detection", interactive=False)
119
+ outlier_col = gr.Dropdown(label="Select Continuous Column")
120
+ col_stats_box = gr.Textbox(label="Column Statistics", interactive=False)
121
+
122
+ # Technique + threshold
123
+ outlier_method = gr.Radio(
124
+ ["IQR", "Z-score", "Winsorization", "MinMax"],
125
+ label="Select Outlier Handling Technique",
126
+ value="IQR"
127
+ )
128
+ threshold_value = gr.Textbox(label="Enter Threshold Value (e.g., 1.5 for IQR, 3 for Z-score, etc.)")
129
+
130
+ # Apply technique
131
+ apply_outlier_btn = gr.Button("🧮 Apply Technique")
132
+ outlier_status = gr.Textbox(label="Outlier Handling Status", interactive=False)
133
+
134
+ gr.Markdown("---")
135
+ reset_btn = gr.Button("♻️ Reset to Original")
136
+ download_trigger = gr.Button("📥 Generate & Download Cleaned CSV")
137
+ download_file = gr.File(label="Your Cleaned CSV File Will Appear Below 👇")
138
+ gr.HTML("</div>")
139
+
140
+
141
+ with gr.Column(scale=3):
142
+ gr.Markdown("# Data Panel")
143
+ summary_table = gr.DataFrame(label="📊 File Summary", interactive=True, wrap=True)
144
+ gr.Markdown("---")
145
+ gr.Markdown("## 🧾 Data Preview")
146
+ original_df = gr.DataFrame(label="📘 Original Dataset", wrap=True, interactive=False)
147
+ working_df = gr.DataFrame(label="🧪 Working Dataset", wrap=True)
148
+
149
+ gr.Markdown("---")
150
+ gr.Markdown("### 🧾 Generate Detailed Data Report")
151
+
152
+ generate_report_btn = gr.Button("📈 Create Data Report (It might take time)")
153
+ report_status = gr.HTML(label="Report Status")
154
+ report_file = gr.File(label="Download or View Report")
155
+
156
+
157
+ # ===========================================================
158
+ # Event Bindings
159
+ # ===========================================================
160
+
161
+ load_btn.click(load_csv,
162
+ inputs=file_input,
163
+ outputs=[original_df, working_df, summary_table, delete_col, encode_col, status_box]
164
+ )
165
+
166
+ delete_btn.click(delete_column, inputs=[working_df, delete_col], outputs=[working_df, delete_status])
167
+ dup_col_check.click(check_duplicate_columns, inputs=working_df, outputs=dup_col_status)
168
+ dup_col_btn.click(remove_duplicate_columns, inputs=working_df, outputs=[working_df, dup_col_status])
169
+ dup_row_check.click(check_duplicate_rows, inputs=working_df, outputs=dup_row_status)
170
+ dup_row_btn.click(remove_duplicate_rows, inputs=working_df, outputs=[working_df, dup_row_status])
171
+ check_missing_btn.click(check_missing_columns, inputs=working_df, outputs=missing_status)
172
+ drop_high_missing_btn.click(drop_high_missing, inputs=working_df, outputs=[working_df, missing_status])
173
+
174
+ # Missing values section
175
+ check_missing_btn.click(get_missing_columns, inputs=working_df, outputs=[missing_col, missing_status])
176
+ missing_col.change(detect_column_type, inputs=[working_df, missing_col], outputs=[detect_type_box, fill_method])
177
+ apply_fill_btn.click(apply_missing_value, inputs=[working_df, missing_col, fill_method], outputs=[working_df, fill_status])
178
+
179
+ # Encoding section
180
+ encode_col.change(show_value_counts, inputs=[working_df, encode_col, encode_method], outputs=value_counts_box)
181
+ encode_method.change(show_value_counts, inputs=[working_df, encode_col, encode_method], outputs=value_counts_box)
182
+ encode_btn.click(
183
+ lambda df, col, method, order_str: encode_column(df, col, method, [x.strip() for x in order_str.split(",")] if order_str else None),
184
+ inputs=[working_df, encode_col, encode_method, encode_order],
185
+ outputs=[working_df, encode_status]
186
+ )
187
+
188
+
189
+ # Normalize column names
190
+ def normalize_and_update(df):
191
+ df, msg = normalize_column_names(df)
192
+ if df is None:
193
+ return df, gr.update(choices=[]), gr.update(choices=[]), gr.update(choices=[]), msg
194
+ cols = df.columns.tolist()
195
+ return df, gr.update(choices=cols), gr.update(choices=cols), gr.update(choices=cols), msg
196
+
197
+ normalize_btn.click(
198
+ normalize_and_update,
199
+ inputs=working_df,
200
+ outputs=[working_df, delete_col, rename_col, encode_col, normalize_status]
201
+ )
202
+
203
+ # rename columns
204
+ def rename_and_update(df, old_col, new_col):
205
+ df, msg = rename_single_column(df, old_col, new_col)
206
+ if df is None:
207
+ return df, gr.update(choices=[]), gr.update(choices=[]), gr.update(choices=[]), msg
208
+ cols = df.columns.tolist()
209
+ return df, gr.update(choices=cols), gr.update(choices=cols), gr.update(choices=cols), msg
210
+
211
+ rename_btn.click(
212
+ rename_and_update,
213
+ inputs=[working_df, rename_col, new_col_name],
214
+ outputs=[working_df, delete_col, rename_col, encode_col, rename_status]
215
+ )
216
+
217
+ # ====================== Data Type Change Section ======================
218
+
219
+ # Detect numeric columns
220
+ numeric_detect_btn.click(get_numeric_columns, inputs=working_df, outputs=[dtype_col, numeric_detect_status])
221
+
222
+ # Show current dtype when a column is selected
223
+ dtype_col.change(show_current_dtype, inputs=[working_df, dtype_col], outputs=current_dtype_box)
224
+
225
+ # Apply dtype change
226
+ convert_dtype_btn.click(change_column_dtype, inputs=[working_df, dtype_col, new_dtype], outputs=[working_df, convert_dtype_status])
227
+
228
+ # ===================== Outlier Detection Section =====================
229
+
230
+ # Detect continuous columns
231
+ detect_cont_col_btn.click(get_continuous_columns, inputs=working_df, outputs=[outlier_col, cont_col_status])
232
+
233
+ # Show stats when a column is selected
234
+ outlier_col.change(show_column_stats, inputs=[working_df, outlier_col], outputs=col_stats_box)
235
+
236
+ # Apply selected outlier handling technique
237
+ apply_outlier_btn.click(
238
+ handle_outliers,
239
+ inputs=[working_df, outlier_col, outlier_method, threshold_value],
240
+ outputs=[working_df, outlier_status]
241
+ )
242
+
243
+
244
+ reset_btn.click(lambda df_orig: (df_orig.copy(), "✅ Reset to original dataset."),
245
+ inputs=original_df,
246
+ outputs=[working_df, status_box]
247
+ )
248
+
249
+ download_trigger.click(make_csv_download, inputs=working_df, outputs=download_file)
250
+
251
+ generate_report_btn.click(
252
+ generate_profile_report,
253
+ inputs=working_df,
254
+ outputs=[report_file, report_status]
255
+ )
256
+
257
+ demo.launch()
data_profile_report.html ADDED
The diff for this file is too large to render. See raw diff
 
helper_functions.py ADDED
@@ -0,0 +1,405 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pandas as pd
3
+ from sklearn.preprocessing import LabelEncoder, OrdinalEncoder
4
+ import io
5
+ import numpy as np
6
+ import tempfile
7
+ import os
8
+
9
+
10
+ # ===========================================================
11
+ # Helper Functions
12
+ # ===========================================================
13
+
14
+ def file_summary(df):
15
+ if df is None:
16
+ return pd.DataFrame(), "⚠️ No data loaded."
17
+ memory_usage = df.memory_usage(deep=True)
18
+ column_types = []
19
+ for col in df.columns:
20
+ dtype = df[col].dtype
21
+ if pd.api.types.is_numeric_dtype(dtype):
22
+ unique_ratio = df[col].nunique() / len(df) if len(df) > 0 else 0
23
+ if unique_ratio < 0.05 or df[col].nunique() < 20:
24
+ column_types.append("Categorical (Numerical)")
25
+ else:
26
+ column_types.append("Continuous")
27
+ elif pd.api.types.is_object_dtype(dtype) or pd.api.types.is_categorical_dtype(dtype):
28
+ column_types.append("Categorical (String/Object)")
29
+ elif pd.api.types.is_bool_dtype(dtype):
30
+ column_types.append("Categorical (Boolean)")
31
+ else:
32
+ column_types.append("Other")
33
+
34
+ mem_vals = [round(df[c].memory_usage(deep=True) / 1024, 2) for c in df.columns]
35
+ summary_df = pd.DataFrame({
36
+ "Column": df.columns,
37
+ "Data Type": df.dtypes.values,
38
+ "Column Type": column_types,
39
+ "NULL Values": df.isnull().sum().values,
40
+ "Memory Size (KB)": mem_vals
41
+ })
42
+ return summary_df, f"📊 Summary Generated: {df.shape[1]} columns, {df.shape[0]} rows"
43
+
44
+
45
+ # ===========================================================
46
+ # Loading CSV + UI helpers
47
+ # ===========================================================
48
+
49
+ def load_csv(file):
50
+ if file is None:
51
+ return None, None, pd.DataFrame(), gr.update(choices=[]), gr.update(choices=[]), "⚠️ Please upload a CSV file."
52
+ try:
53
+ df = pd.read_csv(file.name)
54
+ cols = df.columns.tolist()
55
+ # Detect only encodable columns
56
+ encodable_cols = df.select_dtypes(include=["object", "category", "bool"]).columns.tolist()
57
+ summary, _ = file_summary(df)
58
+ return df, df.copy(), summary, gr.update(choices=cols), gr.update(choices=encodable_cols), f"✅ File loaded successfully! Shape: {df.shape}"
59
+ except Exception as e:
60
+ return None, None, pd.DataFrame(), gr.update(choices=[]), gr.update(choices=[]), f"❌ Error: {e}"
61
+
62
+
63
+ # ===========================================================
64
+ # Duplicate, Missing & Deletion
65
+ # ===========================================================
66
+
67
+ def check_duplicate_columns(df):
68
+ if df is None:
69
+ return "⚠️ Please load a dataset first."
70
+ dup_cols = df.columns[df.columns.duplicated()]
71
+ if len(dup_cols) == 0:
72
+ return "✅ No duplicate columns found."
73
+ return f"⚠️ Found duplicate columns: {', '.join(dup_cols)}"
74
+
75
+ def remove_duplicate_columns(df):
76
+ if df is None:
77
+ return df, "⚠️ Please load a dataset first."
78
+ dup_cols = df.columns[df.columns.duplicated()]
79
+ if len(dup_cols) == 0:
80
+ return df, "✅ No duplicate columns to remove."
81
+ df = df.loc[:, ~df.columns.duplicated()]
82
+ return df, f"✅ Removed duplicate columns: {', '.join(dup_cols)}"
83
+
84
+ def check_duplicate_rows(df):
85
+ if df is None:
86
+ return "⚠️ Please load a dataset first."
87
+ dup_rows = df.duplicated().sum()
88
+ if dup_rows == 0:
89
+ return "✅ No duplicate rows found."
90
+ return f"⚠️ Found {dup_rows} duplicate rows."
91
+
92
+ def remove_duplicate_rows(df):
93
+ if df is None:
94
+ return df, "⚠️ Please load a dataset first."
95
+ dup_rows = df.duplicated().sum()
96
+ if dup_rows == 0:
97
+ return df, "✅ No duplicate rows to remove."
98
+ df = df.drop_duplicates()
99
+ return df, f"✅ Removed {dup_rows} duplicate rows successfully."
100
+
101
+ def check_missing_columns(df):
102
+ if df is None:
103
+ return "⚠️ Please load a dataset first."
104
+ missing = df.isnull().sum()
105
+ cols_with_missing = missing[missing > 0]
106
+ if cols_with_missing.empty:
107
+ return "✅ No missing values found."
108
+ return f"⚠️ Columns with missing values: {', '.join(cols_with_missing.index)}"
109
+
110
+ def drop_high_missing(df):
111
+ if df is None:
112
+ return df, "⚠️ No data loaded."
113
+ missing_pct = df.isnull().mean() * 100
114
+ to_drop = missing_pct[missing_pct > 50].index.tolist()
115
+ if not to_drop:
116
+ return df, "✅ No columns with >50% missing values."
117
+ df = df.drop(columns=to_drop)
118
+ return df, f"✅ Dropped columns with >50% missing values: {', '.join(to_drop)}"
119
+
120
+ def delete_column(df, col):
121
+ if df is None:
122
+ return df, "⚠️ Please load a dataset first."
123
+ if col not in df.columns:
124
+ return df, f"⚠️ Column '{col}' not found."
125
+ df = df.drop(columns=[col])
126
+ return df, f"✅ Column '{col}' deleted."
127
+
128
+
129
+ # ===========================================================
130
+ # Missing Value Handler (Column-Type Based Logic)
131
+ # ===========================================================
132
+
133
+ def get_missing_columns(df):
134
+ if df is None:
135
+ return gr.update(choices=[]), "⚠️ Please load a dataset first."
136
+ cols = df.columns[df.isnull().any()].tolist()
137
+ if not cols:
138
+ return gr.update(choices=[]), "✅ No columns with missing values."
139
+ return gr.update(choices=cols), f"⚠️ Columns with missing values: {', '.join(cols)}"
140
+
141
+ def detect_column_type(df, column):
142
+ if df is None or column not in df.columns:
143
+ return "⚠️ Invalid column.", gr.update(choices=[])
144
+ dtype = df[column].dtype
145
+ if pd.api.types.is_numeric_dtype(dtype):
146
+ unique_ratio = df[column].nunique() / len(df)
147
+ if unique_ratio < 0.05 or df[column].nunique() < 20:
148
+ col_type = "Categorical (Numerical)"
149
+ options = ["Mode"]
150
+ else:
151
+ col_type = "Continuous (Numerical)"
152
+ options = ["Mean", "Median", "Mode"]
153
+ else:
154
+ col_type = "Categorical (String/Object)"
155
+ options = ["Mode"]
156
+ return f"🧩 Column Type: {col_type}", gr.update(choices=options, value=options[0])
157
+
158
+ def apply_missing_value(df, column, method):
159
+ if df is None:
160
+ return df, "⚠️ Please load a dataset first."
161
+ if column not in df.columns:
162
+ return df, f"⚠️ Column '{column}' not found."
163
+ if df[column].isnull().sum() == 0:
164
+ return df, f"✅ Column '{column}' has no missing values."
165
+
166
+ if pd.api.types.is_numeric_dtype(df[column]):
167
+ if method == "Mean":
168
+ df[column].fillna(df[column].mean(), inplace=True)
169
+ elif method == "Median":
170
+ df[column].fillna(df[column].median(), inplace=True)
171
+ elif method == "Mode":
172
+ df[column].fillna(df[column].mode().iloc[0], inplace=True)
173
+ else:
174
+ df[column].fillna(df[column].mode().iloc[0], inplace=True)
175
+ return df, f"✅ Missing values in '{column}' filled using {method}."
176
+
177
+
178
+ # ===========================================================
179
+ # Encoding + Download Functions
180
+ # ===========================================================
181
+
182
+ def show_value_counts(df, col, method):
183
+ """Show value counts only if Ordinal Encoding is selected."""
184
+ if df is None or col not in df.columns:
185
+ return gr.DataFrame(value="⚠️ Please select a valid column.")
186
+ if method != "Ordinal Encoding":
187
+ return gr.DataFrame(value="ℹ️ Value counts visible only for Ordinal Encoding.")
188
+ counts = df[col].value_counts(dropna=False).reset_index()
189
+ counts.columns = [col, "Count"]
190
+ return counts
191
+
192
+ def encode_column(df, col, method, order):
193
+ if df is None:
194
+ return df, "⚠️ Please load a dataset first."
195
+ if col not in df.columns:
196
+ return df, "⚠️ Column not found."
197
+
198
+ if method == "Label Encoding":
199
+ le = LabelEncoder()
200
+ df[col] = le.fit_transform(df[col].astype(str))
201
+ return df, f"✅ Label Encoding applied on '{col}'."
202
+
203
+ elif method == "Ordinal Encoding":
204
+ if not order:
205
+ return df, "⚠️ Please provide order for Ordinal Encoding."
206
+
207
+ # Normalize both the column values and user-provided order for comparison
208
+ df[col] = df[col].astype(str).str.strip()
209
+ user_order = [x.strip() for x in order if x.strip()]
210
+ col_values = sorted(df[col].dropna().unique().tolist())
211
+
212
+ # Check if user provided valid categories
213
+ missing_from_col = [x for x in user_order if x not in col_values]
214
+ extra_in_col = [x for x in col_values if x not in user_order]
215
+
216
+ if missing_from_col:
217
+ return df, f"❌ Invalid category(s): {missing_from_col}. Please check spelling/case. Existing values: {col_values}"
218
+
219
+ if extra_in_col:
220
+ msg = f"⚠️ Warning: Some values in column were not in the provided order and will be encoded as NaN: {extra_in_col}"
221
+ else:
222
+ msg = ""
223
+
224
+ try:
225
+ oe = OrdinalEncoder(categories=[user_order])
226
+ df[col] = oe.fit_transform(df[[col]])
227
+ return df, f"✅ Ordinal Encoding applied on '{col}' with order {user_order}. {msg}"
228
+ except Exception as e:
229
+ return df, f"❌ Error during encoding: {e}"
230
+
231
+ return df, "⚠️ Invalid encoding method."
232
+
233
+
234
+
235
+ # ===========================================================
236
+ # Column Normalization & Renaming Functions
237
+ # ===========================================================
238
+
239
+ def normalize_column_names(df):
240
+ """Convert all column names to lowercase, strip spaces, and replace internal spaces with underscores."""
241
+ if df is None:
242
+ return df, "⚠️ Please load a dataset first."
243
+
244
+ original_cols = df.columns.tolist()
245
+ new_cols = [col.strip().lower().replace(" ", "_") for col in original_cols]
246
+ rename_map = {old: new for old, new in zip(original_cols, new_cols) if old != new}
247
+ df.columns = new_cols
248
+
249
+ if not rename_map:
250
+ return df, "✅ All column names were already normalized."
251
+ return df, f"✅ Column names normalized: {rename_map}"
252
+
253
+
254
+ def rename_single_column(df, old_col, new_col):
255
+ """Rename one specific column."""
256
+ if df is None:
257
+ return df, "⚠️ Please load a dataset first."
258
+ if old_col not in df.columns:
259
+ return df, f"⚠️ Column '{old_col}' not found."
260
+ if not new_col.strip():
261
+ return df, "⚠️ Please enter a valid new column name."
262
+
263
+ df = df.rename(columns={old_col: new_col.strip()})
264
+ return df, f"✅ Column '{old_col}' renamed to '{new_col.strip()}'."
265
+
266
+
267
+ # ===========================================================
268
+ # Data Type Conversion (Numerical Columns)
269
+ # ===========================================================
270
+
271
+ def get_numeric_columns(df):
272
+ """Return a list of numeric columns for dtype conversion."""
273
+ if df is None:
274
+ return gr.update(choices=[]), "⚠️ Please load a dataset first."
275
+ num_cols = df.select_dtypes(include=["int", "float", "complex"]).columns.tolist()
276
+ if not num_cols:
277
+ return gr.update(choices=[]), "✅ No numeric columns available for conversion."
278
+ return gr.update(choices=num_cols), f"🔢 Numeric columns available: {', '.join(num_cols)}"
279
+
280
+
281
+ def show_current_dtype(df, col):
282
+ """Display the current dtype of the selected numeric column."""
283
+ if df is None or col not in df.columns:
284
+ return "⚠️ Please select a valid column."
285
+ dtype = str(df[col].dtype)
286
+ return f"📘 Current Data Type: {dtype}"
287
+
288
+
289
+ def change_column_dtype(df, col, new_dtype):
290
+ """Change the data type of a numeric column using pandas .astype()."""
291
+ if df is None:
292
+ return df, "⚠️ Please load a dataset first."
293
+ if col not in df.columns:
294
+ return df, f"⚠️ Column '{col}' not found."
295
+ if not new_dtype:
296
+ return df, "⚠️ Please select a new data type."
297
+
298
+ try:
299
+ df[col] = df[col].astype(new_dtype)
300
+ return df, f"✅ Column '{col}' converted to type '{new_dtype}'."
301
+ except Exception as e:
302
+ return df, f"❌ Conversion failed: {e}"
303
+
304
+
305
+
306
+ # ===========================================================
307
+ # Outlier Detection & Handling Functions
308
+ # ===========================================================
309
+
310
+
311
+ def get_continuous_columns(df):
312
+ """Detect all numerical columns (int and float) for outlier handling."""
313
+ if df is None:
314
+ return gr.update(choices=[]), "⚠️ Please load a dataset first."
315
+
316
+ numeric_cols = df.select_dtypes(include=["int", "float"]).columns.tolist()
317
+
318
+ if not numeric_cols:
319
+ return gr.update(choices=[]), "✅ No numerical columns found."
320
+
321
+ return gr.update(choices=numeric_cols), f"📊 Numerical columns detected: {', '.join(numeric_cols)}"
322
+
323
+
324
+
325
+ def show_column_stats(df, col):
326
+ """Display basic stats for selected continuous column."""
327
+ if df is None or col not in df.columns:
328
+ return "⚠️ Please select a valid column."
329
+ stats = df[col].describe().to_dict()
330
+ return (
331
+ f"📈 Column: {col}\n"
332
+ f"Mean: {stats['mean']:.3f}, Std: {stats['std']:.3f}, Min: {stats['min']:.3f}, Max: {stats['max']:.3f}"
333
+ )
334
+
335
+
336
+ def handle_outliers(df, col, method, threshold):
337
+ """Apply chosen outlier handling technique."""
338
+ if df is None:
339
+ return df, "⚠️ Please load a dataset first."
340
+ if col not in df.columns:
341
+ return df, f"⚠️ Column '{col}' not found."
342
+ if not pd.api.types.is_numeric_dtype(df[col]):
343
+ return df, f"⚠️ Column '{col}' is not numeric."
344
+ if threshold is None or str(threshold).strip() == "":
345
+ return df, "⚠️ Please enter a valid threshold value."
346
+
347
+ try:
348
+ threshold = float(threshold)
349
+ except:
350
+ return df, "⚠️ Threshold value must be numeric."
351
+
352
+ series = df[col]
353
+
354
+ # IQR method
355
+ if method == "IQR":
356
+ Q1, Q3 = series.quantile(0.25), series.quantile(0.75)
357
+ IQR = Q3 - Q1
358
+ lower = Q1 - threshold * IQR
359
+ upper = Q3 + threshold * IQR
360
+ before = series.copy()
361
+ df[col] = np.clip(series, lower, upper)
362
+ return df, f"✅ IQR method applied with threshold={threshold}. Clipped {sum(before != df[col])} outliers."
363
+
364
+ # Z-score method
365
+ elif method == "Z-score":
366
+ mean, std = series.mean(), series.std()
367
+ z_scores = (series - mean) / std
368
+ mask = np.abs(z_scores) > threshold
369
+ before = series.copy()
370
+ df.loc[mask, col] = mean # replace with mean
371
+ return df, f"✅ Z-score method applied (|Z| > {threshold}). Replaced {mask.sum()} outliers with mean."
372
+
373
+ # Winsorization
374
+ elif method == "Winsorization":
375
+ lower = series.quantile(threshold / 100)
376
+ upper = series.quantile(1 - threshold / 100)
377
+ before = series.copy()
378
+ df[col] = np.clip(series, lower, upper)
379
+ return df, f"✅ Winsorization applied with {threshold}% tails capped."
380
+
381
+ # Min-Max clipping
382
+ elif method == "MinMax":
383
+ min_val = series.min()
384
+ max_val = series.max()
385
+ lower = min_val + threshold * (max_val - min_val)
386
+ upper = max_val - threshold * (max_val - min_val)
387
+ before = series.copy()
388
+ df[col] = np.clip(series, lower, upper)
389
+ return df, f"✅ Min-Max clipping applied with threshold={threshold}. Clipped {sum(before != df[col])} values."
390
+
391
+ else:
392
+ return df, "⚠️ Invalid outlier handling method selected."
393
+
394
+ # ===========================================================
395
+ # Downloading the Cleaned CSV File
396
+ # ===========================================================
397
+
398
+ def make_csv_download(df):
399
+ if df is None or df.empty:
400
+ return None
401
+ # Create a temporary file
402
+ temp_dir = tempfile.gettempdir()
403
+ temp_path = os.path.join(temp_dir, "cleaned_data.csv")
404
+ df.to_csv(temp_path, index=False)
405
+ return temp_path
report_generation.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pandas as pd
3
+ from sklearn.preprocessing import LabelEncoder, OrdinalEncoder
4
+ import io
5
+ import numpy as np
6
+ import tempfile
7
+ import os
8
+
9
+
10
+ # ===========================================================
11
+ # Detailed Data Report using pandas-profiling
12
+ # ===========================================================
13
+
14
+ def generate_profile_report(df):
15
+ """Generate a pandas profiling HTML report and optionally open in a new tab."""
16
+ if df is None or df.empty:
17
+ return None, "⚠️ Please load a valid dataset first."
18
+
19
+ try:
20
+ from ydata_profiling import ProfileReport
21
+ except ImportError:
22
+ return None, "❌ Missing dependency: please install it using 'pip install ydata-profiling'."
23
+
24
+ try:
25
+ profile = ProfileReport(df, title="📊 Detailed Data Report", explorative=True)
26
+ output_path = "data_profile_report.html"
27
+ profile.to_file(output_path)
28
+
29
+ # Create a clickable HTML link that opens in new tab
30
+ html_link = f"""
31
+ ✅ Report generated successfully! Now Download the report (in HTML format) and open it.<br>
32
+ """
33
+ # Return the file + HTML message
34
+ return output_path, html_link
35
+ except Exception as e:
36
+ return None, f"❌ Failed to generate report: {e}"
37
+
38
+
requirements.txt ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Core libraries
2
+ pandas
3
+ numpy
4
+ gradio>=5.0.0
5
+
6
+ # Machine Learning preprocessing tools
7
+ scikit-learn
8
+
9
+ # Optional but used for detailed data report generation
10
+ ydata-profiling
11
+
12
+ # Standard Python utilities (already included with Python, listed for clarity)
13
+ io
14
+ tempfile
15
+ os