szili2011 commited on
Commit
f945b6a
·
verified ·
1 Parent(s): aeee79c

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +1010 -0
app.py ADDED
@@ -0,0 +1,1010 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pandas as pd
3
+ import numpy as np
4
+ import sklearn
5
+ from sklearn.model_selection import train_test_split
6
+ from sklearn.preprocessing import StandardScaler, LabelEncoder, OneHotEncoder
7
+ from sklearn.impute import SimpleImputer
8
+ from sklearn.compose import ColumnTransformer
9
+ from sklearn.pipeline import Pipeline
10
+ # Scikit-learn Models
11
+ from sklearn.linear_model import LogisticRegression, LinearRegression
12
+ from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
13
+ from sklearn.svm import SVC, SVR
14
+ # Metrics
15
+ from sklearn.metrics import accuracy_score, classification_report, mean_squared_error, r2_score
16
+ # Dataset generators
17
+ from sklearn.datasets import make_classification, make_regression
18
+
19
+ import joblib
20
+ import os
21
+ import time
22
+ import torch
23
+ import torch.nn as nn
24
+ import torch.optim as optim
25
+ from torch.utils.data import TensorDataset, DataLoader
26
+ import torchvision # For transforms, even if data is basic
27
+ import torchvision.transforms as T
28
+
29
+ # ONNX specific imports
30
+ import skl2onnx
31
+ from skl2onnx import convert_sklearn
32
+ from skl2onnx.common.data_types import FloatTensorType, Int64TensorType, StringTensorType
33
+ import onnxruntime as rt
34
+
35
+ import traceback
36
+ import tempfile
37
+ import json
38
+ import math
39
+ import collections.abc # For Gradio issue with new Python versions
40
+
41
+ # --- Global Variables / Constants ---
42
+ TEMP_DIR = "temp_outputs"
43
+ os.makedirs(TEMP_DIR, exist_ok=True)
44
+ MAX_DATASET_ROWS_WARN = 30000 # Reduced slightly due to increased complexity
45
+ MAX_GENERATED_ROWS = 50000 # Max rows for generation
46
+ MAX_GENERATED_COLS = 100 # Max cols for generation
47
+
48
+ # --- Helper Functions ---
49
+ def count_sklearn_parameters(model):
50
+ if hasattr(model, 'coef_'):
51
+ return model.coef_.size + (model.intercept_.size if hasattr(model, 'intercept_') else 0)
52
+ if hasattr(model, 'support_vectors_'):
53
+ return model.support_vectors_.size
54
+ if isinstance(model, (RandomForestClassifier, RandomForestRegressor)):
55
+ try:
56
+ return sum(tree.tree_.node_count for tree in model.estimators_)
57
+ except: return "N/A (Complex Ensemble)"
58
+ return "N/A"
59
+
60
+ def count_pytorch_parameters(model):
61
+ return sum(p.numel() for p in model.parameters() if p.requires_grad)
62
+
63
+ def get_temp_filepath(filename_base, extension):
64
+ # Ensure extension does not start with a dot if it's passed with one
65
+ clean_extension = extension.lstrip('.')
66
+ return os.path.join(TEMP_DIR, f"{filename_base}_{time.strftime('%Y%m%d-%H%M%S')}.{clean_extension}")
67
+
68
+
69
+ # --- PyTorch Model Definitions ---
70
+ class SimpleMLP(nn.Module):
71
+ def __init__(self, input_dim, hidden_layers_str, output_dim, activation_fn_str="relu", task_type="classification"):
72
+ super(SimpleMLP, self).__init__()
73
+ layers = []
74
+ if not isinstance(input_dim, int) or input_dim <= 0:
75
+ raise ValueError(f"Input dimension must be a positive integer, got {input_dim}")
76
+
77
+ hidden_units_list = []
78
+ if hidden_layers_str and isinstance(hidden_layers_str, str) and hidden_layers_str.strip():
79
+ try:
80
+ hidden_units_list = [int(x.strip()) for x in hidden_layers_str.split(',') if x.strip()]
81
+ if any(h_units <= 0 for h_units in hidden_units_list):
82
+ raise ValueError("Hidden layer units must be positive integers.")
83
+ except ValueError as e:
84
+ raise ValueError(f"Invalid hidden layer string '{hidden_layers_str}'. Error: {e}")
85
+
86
+ current_dim = input_dim
87
+ for h_units in hidden_units_list:
88
+ layers.append(nn.Linear(current_dim, h_units))
89
+ if activation_fn_str.lower() == "relu": layers.append(nn.ReLU())
90
+ elif activation_fn_str.lower() == "tanh": layers.append(nn.Tanh())
91
+ elif activation_fn_str.lower() == "sigmoid": layers.append(nn.Sigmoid())
92
+ else: layers.append(nn.ReLU())
93
+ current_dim = h_units
94
+
95
+ layers.append(nn.Linear(current_dim, output_dim))
96
+
97
+ if task_type == "classification":
98
+ if output_dim == 1: layers.append(nn.Sigmoid()) # Binary
99
+ elif output_dim > 1: layers.append(nn.Softmax(dim=-1)) # Multi-class
100
+ self.network = nn.Sequential(*layers)
101
+ def forward(self, x): return self.network(x)
102
+
103
+ class SimpleCNN(nn.Module):
104
+ def __init__(self, input_channels, img_size_wh, num_classes=10,
105
+ c_out1=16, k1=3, s1=1, p1=1, pool1_k=2, pool1_s=2,
106
+ c_out2=32, k2=3, s2=1, p2=1, pool2_k=2, pool2_s=2,
107
+ fc_hidden=128):
108
+ super(SimpleCNN, self).__init__()
109
+ self.input_channels = input_channels
110
+ self.img_h, self.img_w = img_size_wh
111
+ self.num_classes = num_classes
112
+
113
+ self.conv1 = nn.Conv2d(self.input_channels, c_out1, kernel_size=k1, stride=s1, padding=p1)
114
+ self.relu1 = nn.ReLU()
115
+ self.pool1 = nn.MaxPool2d(kernel_size=pool1_k, stride=pool1_s)
116
+
117
+ h_out_conv1 = (self.img_h - k1 + 2 * p1) // s1 + 1
118
+ w_out_conv1 = (self.img_w - k1 + 2 * p1) // s1 + 1
119
+ h_pool1 = (h_out_conv1 - pool1_k) // pool1_s + 1
120
+ w_pool1 = (w_out_conv1 - pool1_k) // pool1_s + 1
121
+
122
+ self.conv2 = nn.Conv2d(c_out1, c_out2, kernel_size=k2, stride=s2, padding=p2)
123
+ self.relu2 = nn.ReLU()
124
+ self.pool2 = nn.MaxPool2d(kernel_size=pool2_k, stride=pool2_s)
125
+
126
+ h_out_conv2 = (h_pool1 - k2 + 2 * p2) // s2 + 1
127
+ w_out_conv2 = (w_pool1 - k2 + 2 * p2) // s2 + 1
128
+ h_pool2 = (h_out_conv2 - pool2_k) // pool2_s + 1
129
+ w_pool2 = (w_out_conv2 - pool2_k) // pool2_s + 1
130
+
131
+ self.flattened_size = c_out2 * h_pool2 * w_pool2
132
+ if self.flattened_size <= 0:
133
+ raise ValueError(f"Calculated flattened size is {self.flattened_size}. Check CNN params and image size. Conv1_out:({h_out_conv1},{w_out_conv1}), Pool1_out:({h_pool1},{w_pool1}), Conv2_out:({h_out_conv2},{w_out_conv2}), Pool2_out:({h_pool2},{w_pool2})")
134
+
135
+ self.fc1 = nn.Linear(self.flattened_size, fc_hidden)
136
+ self.relu3 = nn.ReLU()
137
+ self.fc2 = nn.Linear(fc_hidden, num_classes)
138
+
139
+ if num_classes > 1 or (num_classes == 1 and task_type=="classification"): # Adapt for binary vs regression
140
+ self.final_activation = nn.Softmax(dim=1) if num_classes > 1 else nn.Sigmoid()
141
+ else: # Regression output from fc2
142
+ self.final_activation = nn.Identity()
143
+
144
+
145
+ def forward(self, x):
146
+ x = self.pool1(self.relu1(self.conv1(x)))
147
+ x = self.pool2(self.relu2(self.conv2(x)))
148
+ x = x.view(-1, self.flattened_size)
149
+ x = self.relu3(self.fc1(x))
150
+ x = self.fc2(x)
151
+ x = self.final_activation(x)
152
+ return x
153
+
154
+ # --- Parameter Target Helpers ---
155
+ PARAM_RANGES = collections.OrderedDict([ # Ordered for consistent UI
156
+ ("Tiny (<10k)", (0, 10000)),
157
+ ("Small (10k-50k)", (10000, 50000)),
158
+ ("Medium (50k-250k)", (50000, 250000)),
159
+ ("Large (250k-1M)", (250000, 1000000)),
160
+ ])
161
+
162
+ def suggest_mlp_layers_for_range(input_dim, output_dim, target_range_str, current_logs=""):
163
+ logs = current_logs
164
+ if not target_range_str or target_range_str not in PARAM_RANGES:
165
+ logs += "Invalid parameter range selected for MLP suggestion.\n"; return "", logs
166
+ min_p, max_p = PARAM_RANGES[target_range_str]
167
+ target_p_avg = (min_p + max_p) // 2
168
+ suggested_layers_str = ""
169
+ if input_dim <=0 or output_dim <=0:
170
+ logs += "Input/Output dims must be positive for MLP suggestion.\n"; return "", logs
171
+
172
+ h1_candidate = max(1, int(target_p_avg / (input_dim + output_dim + 1e-6)))
173
+ params_1_layer = (input_dim * h1_candidate + h1_candidate) + (h1_candidate * output_dim + output_dim)
174
+ if min_p <= params_1_layer <= max_p and h1_candidate > 0:
175
+ suggested_layers_str = str(h1_candidate)
176
+ logs += f"Suggested 1 hidden layer: {h1_candidate} units (Est. Params: {params_1_layer})\n"
177
+ else:
178
+ h_base = max(1, int(math.sqrt(target_p_avg / 2.0)))
179
+ h1 = min(2048, max(1, int(h_base * (input_dim / (input_dim + output_dim + 1e-6)) * 2 + h_base / 2)))
180
+ h2 = min(2048, max(1, int(h_base * (output_dim / (input_dim + output_dim + 1e-6)) * 2 + h_base / 2)))
181
+ params_2_layers = (input_dim * h1 + h1) + (h1 * h2 + h2) + (h2 * output_dim + output_dim)
182
+ if min_p <= params_2_layers <= max_p and h1 > 0 and h2 > 0:
183
+ suggested_layers_str = f"{h1},{h2}"
184
+ logs += f"Suggested 2 hidden layers: {h1},{h2} units (Est. Params: {params_2_layers})\n"
185
+ else:
186
+ if target_p_avg < 50000: suggested_layers_str = str(max(1, int(target_p_avg / (input_dim + output_dim + 100)))) or "32"
187
+ elif target_p_avg < 250000: h = max(1,int(math.sqrt(target_p_avg/1.5))); suggested_layers_str=f"{h},{h//2}" if h>0 and h//2 >0 else "128,64"
188
+ else: h = max(1,int(math.sqrt(target_p_avg/2.0))); suggested_layers_str=f"{h},{h},{h//2}" if h>0 and h//2 >0 else "256,256,128"
189
+ logs += f"Fallback suggestion: {suggested_layers_str} (Verify params).\n"
190
+ if not suggested_layers_str: suggested_layers_str = "64"; logs += "Defaulting to '64'.\n"
191
+ return suggested_layers_str, logs
192
+
193
+ def estimate_current_mlp_params(input_dim_str, hidden_layers_str, output_dim_str, current_logs=""):
194
+ logs = current_logs
195
+ try:
196
+ input_dim = int(input_dim_str); output_dim = int(output_dim_str)
197
+ if input_dim <= 0 or output_dim <= 0: return "Input/Output dims must be > 0", logs
198
+ temp_mlp = SimpleMLP(input_dim, hidden_layers_str, output_dim)
199
+ params = count_pytorch_parameters(temp_mlp); del temp_mlp
200
+ return f"{params:,}", logs
201
+ except Exception as e: logs += f"Error estimating MLP params: {e}\n"; return "Error", logs
202
+
203
+ def estimate_cnn_params(img_h_str, img_w_str, num_classes_str, current_logs=""):
204
+ logs = current_logs
205
+ try:
206
+ img_h, img_w, num_classes = int(img_h_str), int(img_w_str), int(num_classes_str)
207
+ if not (img_h > 0 and img_w > 0 and num_classes > 0): return "Image dims/classes must be > 0", logs
208
+ # Using default SimpleCNN params here. A real app would pass them.
209
+ temp_cnn = SimpleCNN(input_channels=1, img_size_wh=(img_h, img_w), num_classes=num_classes)
210
+ params = count_pytorch_parameters(temp_cnn); del temp_cnn
211
+ return f"{params:,}", logs
212
+ except Exception as e: logs += f"Error estimating CNN params: {e}\n"; return "Error", logs
213
+
214
+ # --- Dataset and Preprocessing ---
215
+ def generate_dataset_backend(task_type, n_samples_str, n_features_str,
216
+ n_classes_or_informative_str, dataset_format,
217
+ ai_suggest_ds_shape, target_param_range_str, model_type_selection,
218
+ current_logs=""):
219
+ logs = current_logs + "\n--- Generating Dataset ---\n"
220
+ try:
221
+ n_samples = int(n_samples_str); n_features = int(n_features_str); n_classes_or_informative = int(n_classes_or_informative_str)
222
+ except ValueError: logs += "Invalid numbers for dataset generation.\n"; return None, "Error", logs, None
223
+
224
+ if ai_suggest_ds_shape:
225
+ n_samples_sugg, n_features_sugg, n_classes_or_informative_sugg = 5000, 10, 2
226
+ if task_type == "Tabular Regression": n_classes_or_informative_sugg = min(n_features_sugg // 2, 5)
227
+ elif task_type == "Basic Image Classification": n_samples_sugg, n_features_sugg = 500, 0 # features not tabular
228
+
229
+ is_nn = "Network" in model_type_selection
230
+ if is_nn and target_param_range_str in PARAM_RANGES:
231
+ min_p, max_p = PARAM_RANGES[target_param_range_str]; avg_p = (min_p + max_p) / 2
232
+ if avg_p > 200000: n_samples_sugg = min(MAX_GENERATED_ROWS, n_samples_sugg * 2); n_features_sugg = min(MAX_GENERATED_COLS, n_features_sugg * 2) if task_type.startswith("Tabular") else n_features_sugg
233
+ elif avg_p < 50000: n_samples_sugg = max(100, n_samples_sugg // 2); n_features_sugg = max(3, n_features_sugg // 2) if task_type.startswith("Tabular") else n_features_sugg
234
+
235
+ n_samples, n_features, n_classes_or_informative = n_samples_sugg, n_features_sugg, n_classes_or_informative_sugg
236
+ logs += f"AI Suggested Dataset: Samples={n_samples}, Feats={n_features}, Classes/Informative={n_classes_or_informative}\n"
237
+
238
+ n_samples = max(10, min(n_samples, MAX_GENERATED_ROWS))
239
+ if task_type.startswith("Tabular"): n_features = max(1, min(n_features, MAX_GENERATED_COLS))
240
+ if n_samples > MAX_DATASET_ROWS_WARN: logs += f"Warning: Generating {n_samples} rows. May be slow.\n"
241
+
242
+ df = None; X_data=None; y_data=None # Init X_data, y_data
243
+ try:
244
+ if task_type == "Tabular Classification":
245
+ n_cls = max(2, n_classes_or_informative)
246
+ n_inf = max(1, min(n_features, n_classes_or_informative if n_classes_or_informative > n_cls else n_features // 2))
247
+ X_data, y_data = make_classification(n_samples=n_samples, n_features=n_features, n_informative=n_inf,
248
+ n_redundant=max(0,n_features - n_inf)//2, n_classes=n_cls, flip_y=0.05, random_state=42)
249
+ df = pd.DataFrame(X_data, columns=[f'feature_{i}' for i in range(n_features)]); df['target'] = y_data
250
+ elif task_type == "Tabular Regression":
251
+ n_inf = max(1, min(n_features, n_classes_or_informative))
252
+ X_data, y_data = make_regression(n_samples=n_samples, n_features=n_features, n_informative=n_inf, noise=10, random_state=42)
253
+ df = pd.DataFrame(X_data, columns=[f'feature_{i}' for i in range(n_features)]); df['target'] = y_data
254
+ elif task_type == "Basic Image Classification":
255
+ # For SimpleCNN, let's generate 28x28 "images" (random noise)
256
+ img_h, img_w = 28, 28
257
+ num_pixels = img_h * img_w
258
+ X_data = np.random.randint(0, 256, size=(n_samples, num_pixels), dtype=np.uint8)
259
+ y_data = np.random.randint(0, max(2, n_classes_or_informative), n_samples)
260
+ df = pd.DataFrame(X_data, columns=[f'pixel_{i}' for i in range(num_pixels)]); df['target'] = y_data
261
+ logs += f"Generated {img_h}x{img_w} Image placeholder data.\n"
262
+ else: logs += f"Dataset generation for '{task_type}' not fully implemented.\n"; return None, "Task not implemented", logs, None
263
+
264
+ logs += f"Generated data: {df.shape if df is not None else (X_data.shape, y_data.shape)}\n"
265
+ file_path = get_temp_filepath("generated_dataset", dataset_format)
266
+ if df is not None: # Save if DataFrame was created
267
+ if dataset_format == ".csv": df.to_csv(file_path, index=False)
268
+ elif dataset_format == ".json": df.to_json(file_path, orient='records', lines=True)
269
+ elif dataset_format == ".parquet": df.to_parquet(file_path, index=False)
270
+ else: logs += f"Unsupported format {dataset_format}. Defaulting to CSV.\n"; file_path=get_temp_filepath("generated_dataset","csv"); df.to_csv(file_path, index=False)
271
+ logs += f"Dataset saved to {file_path}\n"
272
+ return df.head(), df, logs, file_path # Return DataFrame for sklearn
273
+ else: # Case where df might not be created (though current logic does)
274
+ logs += "Dataset generated as numpy arrays. No file saved directly by this part of function.\n"
275
+ # This branch needs more thought if we don't always make a df
276
+ return pd.DataFrame(X_data[:5]), (X_data, y_data), logs, None # Return numpy arrays for PyTorch image case
277
+
278
+
279
+ except Exception as e: error_msg=f"Error generating dataset: {traceback.format_exc()}"; logs+=error_msg+"\n"; return None, error_msg, logs, None
280
+
281
+ def preprocess_tabular_data(df_or_X, y_if_X_is_numpy, target_column_name, task_type, current_logs=""):
282
+ logs = current_logs
283
+ if isinstance(df_or_X, pd.DataFrame):
284
+ df = df_or_X
285
+ if target_column_name not in df.columns: raise ValueError(f"Target column '{target_column_name}' not found.")
286
+ X_df = df.drop(target_column_name, axis=1)
287
+ y_series = df[target_column_name]
288
+ elif isinstance(df_or_X, np.ndarray) and y_if_X_is_numpy is not None: # If X,y are numpy
289
+ X_df = pd.DataFrame(df_or_X, columns=[f'feature_{i}' for i in range(df_or_X.shape[1])]) # Temp DF for pipeline
290
+ y_series = pd.Series(y_if_X_is_numpy)
291
+ else: raise ValueError("Invalid input for preprocess_tabular_data.")
292
+
293
+ numerical_features = X_df.select_dtypes(include=np.number).columns.tolist()
294
+ categorical_features = X_df.select_dtypes(include='object').columns.tolist()
295
+ logs += f"Numerical: {numerical_features}, Categorical: {categorical_features}\n"
296
+
297
+ preprocessor = ColumnTransformer(transformers=[
298
+ ('num', Pipeline([('imputer', SimpleImputer(strategy='mean')), ('scaler', StandardScaler())]), numerical_features),
299
+ ('cat', Pipeline([('imputer', SimpleImputer(strategy='most_frequent')), ('onehot', OneHotEncoder(handle_unknown='ignore', sparse_output=False))]), categorical_features) # sparse_output=False for easier handling
300
+ ], remainder='passthrough') # passthrough to keep unhandled columns if any
301
+
302
+ X_processed_np = preprocessor.fit_transform(X_df)
303
+
304
+ try: feature_names_out = preprocessor.get_feature_names_out()
305
+ except AttributeError: # Older sklearn
306
+ cat_encoder = preprocessor.named_transformers_['cat'].named_steps['onehot']
307
+ if hasattr(cat_encoder, 'get_feature_names_out'):
308
+ cat_feature_names = cat_encoder.get_feature_names_out(categorical_features)
309
+ elif hasattr(cat_encoder, 'get_feature_names'): # even older
310
+ cat_feature_names = cat_encoder.get_feature_names(categorical_features)
311
+ else: cat_feature_names = [f"cat_feat_{i}" for i in range(X_processed_np.shape[1] - len(numerical_features))] # Fallback
312
+ feature_names_out = numerical_features + list(cat_feature_names)
313
+
314
+ processed_input_dim = X_processed_np.shape[1]
315
+ logs += f"Tabular data preprocessed. X shape: {X_processed_np.shape}, Processed input dim: {processed_input_dim}\n"
316
+
317
+ if task_type.endswith("Classification"):
318
+ le = LabelEncoder()
319
+ y_processed_np = le.fit_transform(y_series)
320
+ num_classes = len(le.classes_)
321
+ logs += f"Target encoded. Classes: {num_classes} ({le.classes_})\n"
322
+ # For binary classification with PyTorch, often output 1 neuron with Sigmoid or BCEWithLogitsLoss
323
+ # If num_classes is 2, some PyTorch setups expect output_dim=1.
324
+ # Scikit-learn handles this internally.
325
+ output_dim_nn = 1 if num_classes == 2 else num_classes
326
+ else: # Regression
327
+ y_processed_np = y_series.astype(float).values
328
+ num_classes = 1 # Output dim for regression for NN
329
+ output_dim_nn = 1
330
+
331
+ return X_processed_np, y_processed_np, preprocessor, logs, processed_input_dim, output_dim_nn, feature_names_out
332
+
333
+
334
+ # --- Training Functions ---
335
+ def train_model_sklearn(data_input_obj, target_column, task_type, model_name, model_output_format, current_logs=""):
336
+ logs = current_logs + f"\n--- Training Scikit-learn Model: {model_name} ---\n"
337
+ model_path_out, metrics_out, model_params_out = None, "Training failed.", "N/A"
338
+
339
+ df = None
340
+ if isinstance(data_input_obj, str): # Filepath
341
+ try:
342
+ if data_input_obj.endswith('.csv'): df = pd.read_csv(data_input_obj)
343
+ elif data_input_obj.endswith('.json'): df = pd.read_json(data_input_obj, lines=True)
344
+ elif data_input_obj.endswith('.parquet'): df = pd.read_parquet(data_input_obj)
345
+ else: logs += f"Unsupported file: {data_input_obj}\n"; return logs, "Error: Unsupported file.", None, "N/A"
346
+ except Exception as e: logs += f"Error reading {data_input_obj}: {e}\n"; return logs, f"Error reading: {e}", None, "N/A"
347
+ elif isinstance(data_input_obj, pd.DataFrame): df = data_input_obj
348
+ else: logs += "Invalid data for training.\n"; return logs, "Error: Invalid data.", None, "N/A"
349
+
350
+ if target_column not in df.columns:
351
+ logs += f"Target '{target_column}' not found.\n"; return logs, f"Error: Target '{target_column}' not found.", None, "N/A"
352
+
353
+ try:
354
+ X_processed_np, y_processed_np, preprocessor, logs, _, _, feature_names = preprocess_tabular_data(df, None, target_column, task_type, logs)
355
+ except ValueError as e: logs += f"Preprocessing error: {e}\n"; return logs, f"Error: {e}", None, "N/A"
356
+
357
+ X_train, X_test, y_train, y_test = train_test_split(X_processed_np, y_processed_np, test_size=0.2, random_state=42)
358
+ logs += f"Train/Test split. Train: {X_train.shape}, Test: {X_test.shape}\n"
359
+
360
+ model = None
361
+ if task_type == "Tabular Classification":
362
+ if model_name == "Logistic Regression": model = LogisticRegression(max_iter=1000, random_state=42)
363
+ elif model_name == "Random Forest Classifier": model = RandomForestClassifier(random_state=42)
364
+ elif model_name == "Support Vector Machine (SVM) Classifier": model = SVC(random_state=42, probability=True) # probability=True for ONNX if it needs predict_proba
365
+ elif task_type == "Tabular Regression":
366
+ if model_name == "Linear Regression": model = LinearRegression()
367
+ elif model_name == "Random Forest Regressor": model = RandomForestRegressor(random_state=42)
368
+ elif model_name == "Support Vector Machine (SVR) Regressor": model = SVR()
369
+ if model is None: logs += f"Model {model_name} or task {task_type} not supported.\n"; return logs, "Model/Task Error", None, "N/A"
370
+
371
+ try:
372
+ logs += f"Starting training for {model_name}...\n"; start_time = time.time()
373
+ model.fit(X_train, y_train)
374
+ logs += f"Training completed in {time.time() - start_time:.2f}s.\n"
375
+ model_params_out = str(count_sklearn_parameters(model))
376
+ logs += f"Est. Model Params: {model_params_out}\n"
377
+ y_pred = model.predict(X_test)
378
+
379
+ if task_type == "Tabular Classification":
380
+ acc = accuracy_score(y_test, y_pred)
381
+ report = classification_report(y_test, y_pred, zero_division=0)
382
+ metrics_out = f"Accuracy: {acc:.4f}\n\nClassification Report:\n{report}"
383
+ elif task_type == "Tabular Regression":
384
+ mse = mean_squared_error(y_test, y_pred)
385
+ r2 = r2_score(y_test, y_pred)
386
+ metrics_out = f"Mean Squared Error: {mse:.4f}\nR2 Score: {r2:.4f}"
387
+ logs += "\n--- Evaluation Metrics ---\n" + metrics_out + "\n"
388
+
389
+ # Full pipeline for inference: preprocessor + model
390
+ full_pipeline_for_saving = Pipeline([('preprocessor', preprocessor), ('model', model)])
391
+ model_filename_base = f"sklearn_{model_name.replace(' ', '_').lower()}"
392
+
393
+ if model_output_format == ".pkl (Scikit-learn)":
394
+ model_path_out = get_temp_filepath(model_filename_base, "pkl")
395
+ joblib.dump(full_pipeline_for_saving, model_path_out)
396
+ logs += f"Model (with preprocessor) saved to {model_path_out} as PKL.\n"
397
+
398
+ elif model_output_format == ".onnx (ONNX)":
399
+ model_path_out = get_temp_filepath(model_filename_base, "onnx")
400
+
401
+ # Define initial types for ONNX conversion based on preprocessed input
402
+ # The preprocessor converts all to numerical. Shape is (batch_size, num_processed_features)
403
+ # num_processed_features = X_train.shape[1]
404
+ initial_type = [('float_input', FloatTensorType([None, X_train.shape[1]]))] # None for batch size
405
+
406
+ # For models with string inputs *before* preprocessing, it's more complex.
407
+ # Here, we assume the `full_pipeline_for_saving` takes the raw DataFrame structure as input.
408
+ # So, we need to define initial_types based on the *original* DataFrame features.
409
+
410
+ # Re-create initial types based on the *original* df structure, before preprocessing
411
+ # This is complex because ColumnTransformer input spec is not trivial for skl2onnx for mixed types.
412
+ # The EASIEST way for skl2onnx with ColumnTransformer is to convert the *fitted preprocessor separately*
413
+ # OR, provide initial types that match the *input to the preprocessor*.
414
+
415
+ # Let's try providing initial types for the raw input to the preprocessor
416
+ raw_X_for_types = df.drop(target_column, axis=1).infer_objects() # Infer object dtypes to str for ONNX
417
+ onnx_initial_types = []
418
+ for col_name in raw_X_for_types.columns:
419
+ col_dtype = raw_X_for_types[col_name].dtype
420
+ if pd.api.types.is_numeric_dtype(col_dtype):
421
+ # Forcing float32 for ONNX compatibility
422
+ onnx_initial_types.append((col_name, FloatTensorType([None, 1])))
423
+ elif pd.api.types.is_string_dtype(col_dtype) or col_dtype == 'object':
424
+ onnx_initial_types.append((col_name, StringTensorType([None, 1])))
425
+ else:
426
+ logs += f"Warning: Unsupported dtype {col_dtype} for column {col_name} in ONNX conversion. Skipping.\n"
427
+
428
+ if not onnx_initial_types:
429
+ logs += "Error: Could not determine ONNX initial types for raw input. Aborting ONNX export.\n"
430
+ raise ValueError("ONNX initial types failed.")
431
+
432
+ try:
433
+ options = {id(full_pipeline_for_saving): {'zipmap': False}} # Disable zipmap for classifier output
434
+ onnx_model = convert_sklearn(full_pipeline_for_saving, initial_types=onnx_initial_types,
435
+ target_opset=12, options=options) # Target opset can be important
436
+ with open(model_path_out, "wb") as f:
437
+ f.write(onnx_model.SerializeToString())
438
+ logs += f"Model (with preprocessor) saved to {model_path_out} as ONNX.\n"
439
+
440
+ # Optional: Verify ONNX model
441
+ sess = rt.InferenceSession(model_path_out, providers=rt.get_available_providers())
442
+ logs += f"ONNX model loaded successfully with ONNX Runtime. Input names: {[inp.name for inp in sess.get_inputs()]}\n"
443
+ except Exception as onnx_e:
444
+ logs += f"Error during ONNX conversion/saving: {traceback.format_exc()}\n"
445
+ model_path_out = None # Clear path if saving failed
446
+ metrics_out += "\nONNX EXPORT FAILED."
447
+
448
+ else:
449
+ logs += f"Unsupported format '{model_output_format}'. Saving as .pkl\n"
450
+ model_path_out = get_temp_filepath(model_filename_base, "pkl")
451
+ joblib.dump(full_pipeline_for_saving, model_path_out)
452
+
453
+ except Exception as e:
454
+ error_msg = f"Error during sklearn training/eval: {traceback.format_exc()}"; logs += error_msg + "\n"; metrics_out = error_msg
455
+ return logs, metrics_out, model_path_out, model_params_out
456
+
457
+
458
+ def train_model_pytorch(data_input_obj, target_column, task_type, model_type_pt,
459
+ mlp_hidden_layers_str, mlp_activation,
460
+ # CNN specific (using defaults in SimpleCNN for now)
461
+ # cnn_img_h_str, cnn_img_w_str, # Now derived from data
462
+ epochs_str, batch_size_str, lr_str,
463
+ model_output_format, current_logs=""):
464
+ logs = current_logs + f"\n--- Training PyTorch Model: {model_type_pt} ---\n"
465
+ model_path_out, metrics_out, model_params_out, plot_out = None, "Training failed.", "N/A", None
466
+
467
+ df_for_pytorch = None; X_numpy_for_pytorch=None; y_numpy_for_pytorch=None # For flexibility
468
+ if isinstance(data_input_obj, str): # Filepath
469
+ try:
470
+ # For PyTorch, we might want to handle data differently, esp images
471
+ if data_input_obj.endswith('.csv'): df_for_pytorch = pd.read_csv(data_input_obj)
472
+ elif data_input_obj.endswith('.json'): df_for_pytorch = pd.read_json(data_input_obj, lines=True)
473
+ elif data_input_obj.endswith('.parquet'): df_for_pytorch = pd.read_parquet(data_input_obj)
474
+ else: logs += f"Unsupported file: {data_input_obj}\n"; return logs, "Error", None, "N/A", None
475
+ except Exception as e: logs += f"Error reading {data_input_obj}: {e}\n"; return logs, f"Error: {e}", None, "N/A", None
476
+ elif isinstance(data_input_obj, pd.DataFrame): df_for_pytorch = data_input_obj
477
+ elif isinstance(data_input_obj, tuple) and len(data_input_obj) == 2 and \
478
+ isinstance(data_input_obj[0], np.ndarray) and isinstance(data_input_obj[1], np.ndarray):
479
+ X_numpy_for_pytorch, y_numpy_for_pytorch = data_input_obj # If data was (X,y) from generation
480
+ else: logs += "Invalid data for PyTorch training.\n"; return logs, "Error", None, "N/A", None
481
+
482
+ try:
483
+ epochs = int(epochs_str); batch_size = int(batch_size_str); lr = float(lr_str)
484
+ if not (epochs > 0 and batch_size > 0 and lr > 0): raise ValueError("Params must be >0.")
485
+ except ValueError as e: logs += f"Invalid training params: {e}\n"; return logs, f"Error: {e}", None, "N/A", None
486
+
487
+ processed_input_dim_actual = -1; nn_output_dim_actual = -1; preprocessor_pipeline = None
488
+ X_processed_np = None; y_processed_np = None
489
+
490
+ if model_type_pt == "Simple Neural Network (MLP)":
491
+ if not task_type.startswith("Tabular"):
492
+ logs += "MLP requires Tabular task.\n"; return logs, "MLP Task Error", None, "N/A", None
493
+ try:
494
+ # Pass df_for_pytorch or (X_numpy_for_pytorch, y_numpy_for_pytorch)
495
+ data_arg1 = df_for_pytorch if df_for_pytorch is not None else X_numpy_for_pytorch
496
+ data_arg2 = y_numpy_for_pytorch if df_for_pytorch is None else None
497
+ X_processed_np, y_processed_np, preprocessor_pipeline, logs, processed_input_dim_actual, nn_output_dim_actual, _ = \
498
+ preprocess_tabular_data(data_arg1, data_arg2, target_column, task_type, logs)
499
+ except ValueError as e: logs+=f"MLP Preprocessing error: {e}\n"; return logs,f"Error: {e}",None,"N/A",None
500
+
501
+ elif model_type_pt == "Simple Convolutional Network (CNN)":
502
+ if task_type != "Basic Image Classification":
503
+ logs += "Warning: CNN selected, but task is not Basic Image Classification. Output may be unexpected.\n"
504
+
505
+ if df_for_pytorch is not None:
506
+ if target_column not in df_for_pytorch.columns:
507
+ logs += f"Target '{target_column}' not found for CNN.\n"; return logs, "CNN Target Error", None, "N/A", None
508
+ X_raw = df_for_pytorch.drop(target_column, axis=1).values
509
+ y_raw = df_for_pytorch[target_column].values
510
+ elif X_numpy_for_pytorch is not None and y_numpy_for_pytorch is not None:
511
+ X_raw = X_numpy_for_pytorch
512
+ y_raw = y_numpy_for_pytorch
513
+ else:
514
+ logs += "No valid data found for CNN.\n"; return logs, "CNN Data Error", None, "N/A", None
515
+
516
+ le = LabelEncoder(); y_processed_np = le.fit_transform(y_raw)
517
+ nn_output_dim_actual = len(le.classes_)
518
+ if nn_output_dim_actual == 2: nn_output_dim_actual = 1 # Binary output for NN
519
+
520
+ pixels_per_sample = X_raw.shape[1]
521
+ img_dim_approx = int(math.sqrt(pixels_per_sample))
522
+ img_h, img_w, input_channels = (28,28,1) # Default
523
+ if img_dim_approx * img_dim_approx == pixels_per_sample:
524
+ img_h, img_w = img_dim_approx, img_dim_approx
525
+ else: logs += f"Warning: Cannot infer square image from {pixels_per_sample} pixels. Defaulting to 28x28 for CNN.\n"
526
+
527
+ # Reshape and normalize (basic)
528
+ X_processed_np = X_raw.reshape(-1, input_channels, img_h, img_w).astype(np.float32) / 255.0
529
+ processed_input_dim_actual = (input_channels, img_h, img_w) # For CNN constructor
530
+ logs += f"CNN Data: X reshaped to {X_processed_np.shape}, y: {y_processed_np.shape}, NN Output Dim: {nn_output_dim_actual}\n"
531
+ else: logs += f"Unknown PyTorch model: {model_type_pt}\n"; return logs, "Unknown PyTorch model", None, "N/A", None
532
+
533
+ X_tensor = torch.tensor(X_processed_np, dtype=torch.float32)
534
+ # Adjust y_tensor dtype based on loss function expectations
535
+ y_dtype = torch.float32 if (nn_output_dim_actual == 1 and task_type.endswith("Regression")) or \
536
+ (nn_output_dim_actual == 1 and task_type.endswith("Classification")) \
537
+ else torch.long # MSELoss/BCELoss with float, CrossEntropy with long
538
+ y_tensor = torch.tensor(y_processed_np, dtype=y_dtype)
539
+ if nn_output_dim_actual == 1 and task_type.endswith("Classification"): y_tensor = y_tensor.unsqueeze(1) # For BCE based loss
540
+ if task_type.endswith("Regression"): y_tensor = y_tensor.unsqueeze(1) # MSELoss expects [N,1]
541
+
542
+ dataset = TensorDataset(X_tensor, y_tensor)
543
+ # Use num_workers=0 on free tier to avoid issues with multiprocessing
544
+ dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=0)
545
+
546
+ pytorch_model = None
547
+ try:
548
+ if model_type_pt == "Simple Neural Network (MLP)":
549
+ pytorch_model = SimpleMLP(input_dim=processed_input_dim_actual, hidden_layers_str=mlp_hidden_layers_str,
550
+ output_dim=nn_output_dim_actual, activation_fn_str=mlp_activation,
551
+ task_type="classification" if task_type.endswith("Classification") else "regression")
552
+ elif model_type_pt == "Simple Convolutional Network (CNN)":
553
+ channels, h, w = processed_input_dim_actual
554
+ pytorch_model = SimpleCNN(input_channels=channels, img_size_wh=(h,w), num_classes=nn_output_dim_actual)
555
+ except Exception as model_e:
556
+ logs += f"Error creating PyTorch model: {traceback.format_exc()}\n"; return logs, f"Model Creation Error: {model_e}", None, "N/A", None
557
+
558
+ if pytorch_model is None: logs += "Failed to instantiate PyTorch model.\n"; return logs, "Model instantiate fail", None, "N/A", None
559
+ model_params_val = count_pytorch_parameters(pytorch_model)
560
+ model_params_out = f"{model_params_val:,}"
561
+ logs += f"PyTorch Model: {model_params_out} params.\n"
562
+ if model_params_val > 500000: logs += "Warning: >500k params on CPU will be SLOW.\n"
563
+
564
+ is_classification_task = task_type.endswith("Classification") or model_type_pt == "Simple Convolutional Network (CNN)" # Treat CNN as classification here
565
+ if is_classification_task:
566
+ criterion = nn.BCELoss() if nn_output_dim_actual == 1 else nn.CrossEntropyLoss()
567
+ else: # Regression
568
+ criterion = nn.MSELoss()
569
+ optimizer = optim.Adam(pytorch_model.parameters(), lr=lr)
570
+
571
+ logs += f"Starting PyTorch training for {epochs} epochs...\n"; start_time = time.time()
572
+ epoch_losses = []
573
+ pytorch_model.train()
574
+ for epoch in range(epochs):
575
+ epoch_loss_sum = 0.0; num_batches = 0
576
+ for batch_X, batch_y in dataloader:
577
+ optimizer.zero_grad()
578
+ outputs = pytorch_model(batch_X)
579
+ loss = criterion(outputs, batch_y)
580
+ loss.backward(); optimizer.step()
581
+ epoch_loss_sum += loss.item(); num_batches += 1
582
+ avg_epoch_loss = epoch_loss_sum / num_batches if num_batches > 0 else 0
583
+ epoch_losses.append(avg_epoch_loss)
584
+ logs += f"Epoch {epoch+1}/{epochs}, Avg Loss: {avg_epoch_loss:.4f}\n"
585
+ # yield logs, metrics_out, model_path_out, model_params_out, None # For streaming, but makes UI complex
586
+
587
+ training_time = time.time() - start_time
588
+ logs += f"PyTorch training completed in {training_time:.2f} seconds.\n"
589
+
590
+ # Basic evaluation (on last batch for simplicity, or could do full test set)
591
+ # A proper eval loop on a test set would be better here.
592
+ pytorch_model.eval()
593
+ with torch.no_grad():
594
+ # For simplicity, let's just report final training loss.
595
+ # A full evaluation on a test split would be needed for proper metrics.
596
+ if is_classification_task:
597
+ # This is a very rough accuracy on the last training batch for demo
598
+ if dataloader.dataset: # Check if dataset is not empty
599
+ try:
600
+ last_batch_X, last_batch_y = next(iter(dataloader)) # Get one batch
601
+ outputs = pytorch_model(last_batch_X)
602
+ if nn_output_dim_actual == 1: # Binary
603
+ predicted = (outputs > 0.5).float()
604
+ else: # Multi-class
605
+ _, predicted = torch.max(outputs.data, 1)
606
+ correct = (predicted == last_batch_y.view_as(predicted)).sum().item()
607
+ total = last_batch_y.size(0)
608
+ acc = correct / total if total > 0 else 0
609
+ metrics_out = f"Final Training Loss: {avg_epoch_loss:.4f}\nApprox. Accuracy on a batch: {acc*100:.2f}% (Note: Proper eval needs a test set)"
610
+ except StopIteration: # Dataloader was empty
611
+ metrics_out = f"Final Training Loss: {avg_epoch_loss:.4f}\n (Dataloader empty, cannot get batch accuracy)"
612
+
613
+ else:
614
+ metrics_out = f"Final Training Loss: {avg_epoch_loss:.4f}\n (No data for batch accuracy)"
615
+ else: # Regression
616
+ metrics_out = f"Final Training Loss (MSE): {avg_epoch_loss:.4f}"
617
+ logs += "\n--- PyTorch Metrics (Simplified) ---\n" + metrics_out + "\n"
618
+
619
+ # Loss plot
620
+ if epoch_losses:
621
+ import matplotlib.pyplot as plt
622
+ fig, ax = plt.subplots()
623
+ ax.plot(range(1, epochs + 1), epoch_losses, marker='o')
624
+ ax.set_xlabel("Epoch")
625
+ ax.set_ylabel("Average Loss")
626
+ ax.set_title("Training Loss Curve")
627
+ plot_out = fig # Gradio can display matplotlib figures
628
+ logs += "Loss curve generated.\n"
629
+
630
+
631
+ # Save model (and preprocessor if MLP)
632
+ model_filename_base = f"pytorch_{model_type_pt.replace(' ', '_').lower()}"
633
+ if model_output_format == ".pt (PyTorch)":
634
+ model_path_out = get_temp_filepath(model_filename_base, "pt")
635
+ if model_type_pt == "Simple Neural Network (MLP)" and preprocessor_pipeline:
636
+ torch.save({
637
+ 'model_state_dict': pytorch_model.state_dict(),
638
+ 'preprocessor': preprocessor_pipeline,
639
+ 'input_dim': processed_input_dim_actual, # From preprocessing
640
+ 'output_dim': nn_output_dim_actual, # From preprocessing
641
+ 'hidden_layers_str': mlp_hidden_layers_str,
642
+ 'activation_fn': mlp_activation,
643
+ 'task_type': task_type
644
+ }, model_path_out)
645
+ logs += f"PyTorch MLP (model + preprocessor) saved to {model_path_out}\n"
646
+ else: # CNN or MLP without preprocessor explicitly bundled (less common)
647
+ torch.save(pytorch_model.state_dict(), model_path_out)
648
+ logs += f"PyTorch {model_type_pt} (model state_dict) saved to {model_path_out}\n"
649
+ # Add ONNX export for PyTorch later if needed (torch.onnx.export)
650
+ else:
651
+ logs += f"Unsupported format '{model_output_format}' for PyTorch. Saving as .pt\n"
652
+ model_path_out = get_temp_filepath(model_filename_base, "pt")
653
+ torch.save(pytorch_model.state_dict(), model_path_out) # Fallback to state_dict
654
+
655
+ return logs, metrics_out, model_path_out, model_params_out, plot_out
656
+
657
+
658
+ # --- Gradio UI Definition ---
659
+ # Define choices
660
+ TASK_CHOICES = ["Tabular Classification", "Tabular Regression", "Basic Image Classification"] # Simple Text removed for focus
661
+ MODEL_FAMILIES = ["Scikit-learn (Classical ML)", "PyTorch (Neural Networks)"]
662
+ SKLEARN_MODELS_CLASSIFICATION = ["Logistic Regression", "Random Forest Classifier", "Support Vector Machine (SVM) Classifier"]
663
+ SKLEARN_MODELS_REGRESSION = ["Linear Regression", "Random Forest Regressor", "Support Vector Machine (SVR) Regressor"]
664
+ PYTORCH_MODELS = ["Simple Neural Network (MLP)", "Simple Convolutional Network (CNN)"]
665
+ DATASET_FORMATS = [".csv", ".json", ".parquet"]
666
+ MODEL_OUTPUT_FORMATS_SKLEARN = [".pkl (Scikit-learn)", ".onnx (ONNX)"]
667
+ MODEL_OUTPUT_FORMATS_PYTORCH = [".pt (PyTorch)"] # ".onnx (ONNX)" can be added later
668
+ MLP_ACTIVATIONS = ["relu", "tanh", "sigmoid"]
669
+
670
+ CLONE_GUIDE_TEXT = """
671
+ ## How to Clone & Upgrade This Space for More Power:
672
+ (Instructions as provided in previous response - omitted here for brevity but should be included)
673
+ """
674
+
675
+ def update_model_options(task_choice, model_family_choice):
676
+ if model_family_choice == "Scikit-learn (Classical ML)":
677
+ if task_choice == "Tabular Classification": return gr.update(choices=SKLEARN_MODELS_CLASSIFICATION, value=SKLEARN_MODELS_CLASSIFICATION[0], visible=True)
678
+ elif task_choice == "Tabular Regression": return gr.update(choices=SKLEARN_MODELS_REGRESSION, value=SKLEARN_MODELS_REGRESSION[0], visible=True)
679
+ else: return gr.update(choices=[], value=None, visible=False) # Sklearn not for image task here
680
+ elif model_family_choice == "PyTorch (Neural Networks)":
681
+ if task_choice.startswith("Tabular"): return gr.update(choices=[PYTORCH_MODELS[0]], value=PYTORCH_MODELS[0], visible=True) # Only MLP for tabular
682
+ elif task_choice == "Basic Image Classification": return gr.update(choices=[PYTORCH_MODELS[1]], value=PYTORCH_MODELS[1], visible=True) # Only CNN for image
683
+ else: return gr.update(choices=[], value=None, visible=False)
684
+ return gr.update(choices=[], value=None, visible=False)
685
+
686
+ def update_param_range_visibility(model_family_choice):
687
+ return gr.update(visible=(model_family_choice == "PyTorch (Neural Networks)"))
688
+
689
+ def update_pytorch_specific_options_visibility(model_choice_pytorch):
690
+ is_mlp = model_choice_pytorch == "Simple Neural Network (MLP)"
691
+ is_cnn = model_choice_pytorch == "Simple Convolutional Network (CNN)"
692
+ return gr.update(visible=is_mlp), gr.update(visible=is_cnn) # MLP Group, CNN Group
693
+
694
+ def update_model_output_formats(model_family_choice):
695
+ if model_family_choice == "Scikit-learn (Classical ML)":
696
+ return gr.update(choices=MODEL_OUTPUT_FORMATS_SKLEARN, value=MODEL_OUTPUT_FORMATS_SKLEARN[0])
697
+ elif model_family_choice == "PyTorch (Neural Networks)":
698
+ return gr.update(choices=MODEL_OUTPUT_FORMATS_PYTORCH, value=MODEL_OUTPUT_FORMATS_PYTORCH[0])
699
+ return gr.update(choices=[], value=None)
700
+
701
+
702
+ css = """
703
+ .gradio-container { font-family: 'IBM Plex Sans', sans-serif; }
704
+ .gr-button { color: white; border-color: black; background: black; }
705
+ .gr-input { border-radius: 8px; }
706
+ .gr-output { border-radius: 8px; }
707
+ """
708
+
709
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="orange"), css=css) as demo:
710
+ gr.Markdown("# 🧠 Universal AI Model Trainer (CPU Edition)")
711
+ gr.Markdown("Create, train, and download AI models. Optimized for CPU - expect longer training for complex models.")
712
+
713
+ # Global state to store generated data path or df
714
+ # This helps pass data between dataset generation and training without re-upload
715
+ # For DataFrames, it's better to pass them directly if possible, or save/load paths.
716
+ generated_data_state = gr.State(None)
717
+ current_logs_state = gr.State("") # To accumulate logs
718
+
719
+ with gr.Tabs():
720
+ with gr.TabItem("1. Define Task & Model"):
721
+ with gr.Row():
722
+ task_type_dd = gr.Dropdown(TASK_CHOICES, label="Select Task Type", value=TASK_CHOICES[0])
723
+ model_family_dd = gr.Dropdown(MODEL_FAMILIES, label="Select Model Family", value=MODEL_FAMILIES[0])
724
+
725
+ model_specific_dd = gr.Dropdown(label="Select Specific Model", interactive=True) # Populated by callback
726
+
727
+ # PyTorch Parameter Range (only visible for PyTorch)
728
+ pytorch_param_range_dd = gr.Dropdown(list(PARAM_RANGES.keys()), label="Target Parameter Range (for NNs)",
729
+ info="Guides NN architecture suggestions. Training >250k params on CPU is slow.",
730
+ value=list(PARAM_RANGES.keys())[1], visible=False)
731
+
732
+ # PyTorch MLP Specifics (only visible for MLP)
733
+ with gr.Group(visible=False) as pt_mlp_specific_group:
734
+ gr.Markdown("#### MLP Configuration")
735
+ # Input dim will be determined after data preprocessing for MLP. User doesn't set it here.
736
+ # Output dim also determined by data (num_classes or 1 for regression)
737
+ pt_mlp_hidden_layers_txt = gr.Textbox(label="Hidden Layer Sizes (comma-separated, e.g., 128,64)", value="64,32")
738
+ pt_mlp_activation_dd = gr.Dropdown(MLP_ACTIVATIONS, label="Activation Function", value="relu")
739
+ pt_mlp_suggest_btn = gr.Button("Suggest MLP Layers for Target Range")
740
+ pt_mlp_param_count_txt = gr.Textbox(label="Estimated MLP Parameters", interactive=False)
741
+ # For MLP param estimation, we'd need #input_features and #output_classes from data step
742
+ # This means estimation might be better placed *after* dataset is defined.
743
+ # For now, placeholder or user has to guess input/output dims.
744
+ # Simplified: we'll show actual params *after* training or with a dedicated button post-data.
745
+
746
+ # PyTorch CNN Specifics (Placeholder - visible for CNN)
747
+ with gr.Group(visible=False) as pt_cnn_specific_group:
748
+ gr.Markdown("#### CNN Configuration (Simplified for Demo)")
749
+ gr.Markdown("SimpleCNN uses fixed architecture for now (2 conv layers, 1 FC). Parameters mainly come from image size/classes.")
750
+ # For CNN param estimation, we need image H, W, num_classes from data step.
751
+ # cnn_img_h_param_est = gr.Number(label="Est. Image Height (for param count)", value=28, visible=False) # Hidden, used by callback
752
+ # cnn_img_w_param_est = gr.Number(label="Est. Image Width (for param count)", value=28, visible=False)
753
+ # cnn_num_classes_param_est = gr.Number(label="Est. Num Classes (for param count)", value=10, visible=False)
754
+ pt_cnn_param_count_txt = gr.Textbox(label="Estimated CNN Parameters", interactive=False)
755
+ # Actual CNN param count shown after training or with dedicated button post-data.
756
+
757
+
758
+ with gr.TabItem("2. Configure Dataset"):
759
+ dataset_source_rb = gr.Radio(["Generate new dataset", "Upload my own dataset (CSV, JSON, Parquet)"],
760
+ label="Dataset Source", value="Generate new dataset")
761
+
762
+ with gr.Group(visible=True) as generate_dataset_group: # Visible by default
763
+ gr.Markdown("#### Generate Synthetic Dataset")
764
+ with gr.Row():
765
+ ds_gen_samples_num = gr.Number(label="Number of Rows (Samples)", value=1000)
766
+ ds_gen_features_num = gr.Number(label="Number of Features (Columns, if tabular)", value=10)
767
+ ds_gen_classes_informative_num = gr.Number(label="Num Classes (for Classification) / Num Informative Features (for Regression)", value=2)
768
+ ds_gen_ai_suggest_cb = gr.Checkbox(label="Let AI suggest optimal rows/columns based on model type & param range?", value=False)
769
+ ds_gen_format_dd = gr.Dropdown(DATASET_FORMATS, label="Generated Dataset Download Format", value=".csv")
770
+ generate_dataset_btn = gr.Button("Generate & Preview Dataset", variant="secondary")
771
+
772
+ with gr.Group(visible=False) as upload_dataset_group:
773
+ gr.Markdown("#### Upload Dataset")
774
+ ds_upload_file = gr.File(label="Upload your dataset file", file_types=[".csv", ".json", ".parquet"])
775
+
776
+ target_column_name_txt = gr.Textbox(label="Target Column Name (Case-Sensitive)", placeholder="e.g., 'target' or 'label'")
777
+ dataset_preview_df = gr.DataFrame(label="Dataset Preview (First 5 Rows)", interactive=False)
778
+ generated_dataset_download_file = gr.File(label="Download Generated Dataset", interactive=False)
779
+
780
+ with gr.TabItem("3. Train Model & Get Results"):
781
+ gr.Markdown("Ensure Model and Dataset are configured before training.")
782
+ with gr.Row():
783
+ # Training Hyperparameters (Common for PyTorch)
784
+ # For Scikit-learn, HPs are mostly defaults or need more complex UI
785
+ # These are mainly for PyTorch NNs
786
+ train_epochs_num = gr.Number(label="Epochs (for NNs)", value=10)
787
+ train_batch_size_num = gr.Number(label="Batch Size (for NNs)", value=32)
788
+ train_learning_rate_num = gr.Number(label="Learning Rate (for NNs)", value=0.001)
789
+
790
+ model_output_format_dd = gr.Dropdown(label="Select Model Output Format", choices=MODEL_OUTPUT_FORMATS_SKLEARN, value=MODEL_OUTPUT_FORMATS_SKLEARN[0]) # Default to sklearn
791
+ train_model_btn = gr.Button("🚀 Train Model", variant="primary")
792
+
793
+ gr.Markdown("---")
794
+ gr.Markdown("### Training Progress & Results")
795
+ training_log_txt = gr.Textbox(label="Training Log & Status", lines=15, interactive=False, max_lines=50)
796
+ model_param_count_output_txt = gr.Textbox(label="Actual Trained Model Parameters", interactive=False)
797
+ evaluation_metrics_txt = gr.Textbox(label="Evaluation Metrics", lines=7, interactive=False)
798
+ loss_plot_img = gr.Plot(label="Training Loss Curve (PyTorch NNs)")
799
+ download_trained_model_file = gr.File(label="Download Trained Model", interactive=False)
800
+
801
+ with gr.TabItem("ℹ️ Guide & Info"):
802
+ gr.Markdown("### Using This Space")
803
+ gr.Markdown("- **Free CPU Tier:** Training large or complex models will be slow. Memory is also limited (around 15GB RAM).")
804
+ gr.Markdown("- **Workflow:** 1. Define Task/Model -> 2. Configure Dataset -> 3. Train.")
805
+ gr.Markdown("- **Dataset Generation:** For 'Basic Image Classification', random pixel data is generated (not real images).")
806
+ gr.Markdown("- **Parameters:** For Neural Networks, the 'Target Parameter Range' helps suggest architectures. 1M params is already large for CPU training.")
807
+ gr.Markdown("- **ONNX Export (Scikit-learn):** Converts Scikit-learn pipelines (preprocessor + model) to ONNX. Input to the ONNX model should be raw data matching the original training DataFrame structure.")
808
+ gr.Markdown(CLONE_GUIDE_TEXT)
809
+
810
+ # --- Event Handlers ---
811
+ # Update model choices based on task and family
812
+ task_type_dd.change(fn=update_model_options, inputs=[task_type_dd, model_family_dd], outputs=model_specific_dd)
813
+ model_family_dd.change(fn=update_model_options, inputs=[task_type_dd, model_family_dd], outputs=model_specific_dd)
814
+
815
+ # Show/hide PyTorch parameter range dropdown
816
+ model_family_dd.change(fn=update_param_range_visibility, inputs=model_family_dd, outputs=pytorch_param_range_dd)
817
+
818
+ # Show/hide PyTorch MLP/CNN specific groups
819
+ # This needs model_specific_dd as input, which is tricky if it's dynamically populated.
820
+ # Let's assume model_specific_dd is the PyTorch model dropdown for this context.
821
+ # This means model_specific_dd must *only* be active/relevant when model_family_dd is PyTorch.
822
+ def combined_pytorch_ui_update(model_family_choice, pytorch_model_choice):
823
+ param_range_visible = (model_family_choice == "PyTorch (Neural Networks)")
824
+ if not param_range_visible: # If not PyTorch, hide all PyTorch specific groups
825
+ return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
826
+
827
+ is_mlp = (pytorch_model_choice == "Simple Neural Network (MLP)")
828
+ is_cnn = (pytorch_model_choice == "Simple Convolutional Network (CNN)")
829
+ return gr.update(visible=param_range_visible), gr.update(visible=is_mlp), gr.update(visible=is_cnn)
830
+
831
+ model_family_dd.change(fn=combined_pytorch_ui_update,
832
+ inputs=[model_family_dd, model_specific_dd],
833
+ outputs=[pytorch_param_range_dd, pt_mlp_specific_group, pt_cnn_specific_group])
834
+ model_specific_dd.change(fn=combined_pytorch_ui_update, # Also trigger when specific PyTorch model changes
835
+ inputs=[model_family_dd, model_specific_dd],
836
+ outputs=[pytorch_param_range_dd, pt_mlp_specific_group, pt_cnn_specific_group])
837
+
838
+ # Suggest MLP Layers
839
+ def mlp_suggest_proxy(target_range_str, current_logs, dataset_preview_df, target_col_name, task_type):
840
+ logs = current_logs
841
+ input_dim_est = 10 # default if no data
842
+ output_dim_est = 2 if task_type.endswith("Classification") else 1 # default
843
+
844
+ if dataset_preview_df is not None and isinstance(dataset_preview_df, pd.DataFrame) and not dataset_preview_df.empty and target_col_name:
845
+ try:
846
+ # Attempt to get processed input dim. This is a simplified estimation.
847
+ # A full preprocessing run is too heavy here.
848
+ temp_X = dataset_preview_df.drop(target_col_name, axis=1, errors='ignore')
849
+ num_cols = len(temp_X.select_dtypes(include=np.number).columns)
850
+ cat_cols = temp_X.select_dtypes(include='object').columns
851
+ # Rough estimate of one-hot encoded features
852
+ one_hot_est = sum(min(10, dataset_preview_df[col].nunique()) for col in cat_cols) # cap nunique
853
+ input_dim_est = num_cols + one_hot_est
854
+ input_dim_est = max(1, input_dim_est) # Ensure > 0
855
+
856
+ if task_type.endswith("Classification"):
857
+ output_dim_est = max(1, dataset_preview_df[target_col_name].nunique())
858
+ if output_dim_est == 2: output_dim_est = 1 # For binary an output of 1 is common in NNs
859
+ logs += f"Estimated input_dim: {input_dim_est}, output_dim: {output_dim_est} for MLP suggestion.\n"
860
+ except Exception as e:
861
+ logs += f"Could not estimate dims from preview for MLP suggestion: {e}. Using defaults.\n"
862
+ else:
863
+ logs += "Dataset preview not available for MLP dimension estimation. Using defaults.\n"
864
+
865
+ suggested_str, logs = suggest_mlp_layers_for_range(input_dim_est, output_dim_est, target_range_str, logs)
866
+
867
+ # Also estimate params for the suggestion
868
+ param_count_str = "Error"
869
+ if suggested_str:
870
+ param_count_str, logs = estimate_current_mlp_params(str(input_dim_est), suggested_str, str(output_dim_est), logs)
871
+
872
+ return suggested_str, logs, param_count_str
873
+
874
+ pt_mlp_suggest_btn.click(
875
+ fn=mlp_suggest_proxy,
876
+ inputs=[pytorch_param_range_dd, current_logs_state, dataset_preview_df, target_column_name_txt, task_type_dd],
877
+ outputs=[pt_mlp_hidden_layers_txt, training_log_txt, pt_mlp_param_count_txt] # Use training_log_txt for logs from suggestion
878
+ )
879
+
880
+ # Estimate MLP params when hidden layers text changes (might be too slow if hooked to .change)
881
+ # A button is safer for this. For now, rely on suggestion button or post-training report.
882
+ # We can add an "Estimate Current MLP Params" button if needed.
883
+
884
+ # Show/hide dataset generation/upload groups
885
+ def toggle_dataset_source_groups(source_choice):
886
+ return gr.update(visible=(source_choice == "Generate new dataset")), \
887
+ gr.update(visible=(source_choice == "Upload my own dataset (CSV, JSON, Parquet)"))
888
+ dataset_source_rb.change(fn=toggle_dataset_source_groups, inputs=dataset_source_rb,
889
+ outputs=[generate_dataset_group, upload_dataset_group])
890
+
891
+ # Update model output formats based on family
892
+ model_family_dd.change(fn=update_model_output_formats, inputs=model_family_dd, outputs=model_output_format_dd)
893
+
894
+ # Dataset Generation Button
895
+ def generate_dataset_wrapper(task_type, n_samples, n_features, n_classes_info, ds_format, ai_sugg, param_range, model_type, logs_in):
896
+ preview, data_obj, logs_out, file_out = generate_dataset_backend(
897
+ task_type, n_samples, n_features, n_classes_info, ds_format, ai_sugg, param_range, model_type, logs_in
898
+ )
899
+ # Store the actual data (DataFrame or (X,y) tuple) in state if generation was successful
900
+ # If it's a filepath (from upload), store the path.
901
+ # For generated data, store the df or (X,y) tuple to avoid disk I/O if not necessary before training.
902
+ stored_data = data_obj if data_obj is not None else None
903
+ return preview, stored_data, logs_out, file_out
904
+
905
+ generate_dataset_btn.click(
906
+ fn=generate_dataset_wrapper,
907
+ inputs=[task_type_dd, ds_gen_samples_num, ds_gen_features_num, ds_gen_classes_informative_num,
908
+ ds_gen_format_dd, ds_gen_ai_suggest_cb, pytorch_param_range_dd, model_specific_dd, current_logs_state],
909
+ outputs=[dataset_preview_df, generated_data_state, training_log_txt, generated_dataset_download_file]
910
+ )
911
+
912
+ # Handle dataset upload
913
+ def process_uploaded_file(file_obj, logs_in):
914
+ logs = logs_in
915
+ if file_obj is None:
916
+ return None, logs, "Please upload a file first.", None
917
+ logs += f"Uploaded file: {file_obj.name}\n"
918
+
919
+ # For preview, try to read a few lines
920
+ df_preview = None
921
+ try:
922
+ if file_obj.name.endswith(".csv"):
923
+ df_preview = pd.read_csv(file_obj.name, nrows=5)
924
+ elif file_obj.name.endswith(".json"): # Assuming JSONL
925
+ df_preview = pd.read_json(file_obj.name, lines=True, nrows=5)
926
+ elif file_obj.name.endswith(".parquet"):
927
+ # Reading only 5 rows from parquet is not straightforward without loading more.
928
+ # For simplicity, load full and take head, or skip preview.
929
+ temp_df = pd.read_parquet(file_obj.name)
930
+ df_preview = temp_df.head()
931
+ logs += "Preview generated for uploaded file.\n"
932
+ except Exception as e:
933
+ logs += f"Could not generate preview for {file_obj.name}: {e}\n"
934
+ return None, logs, f"Error previewing: {e}", file_obj.name # Return path even if preview fails
935
+
936
+ return df_preview, logs, "File ready for training.", file_obj.name # Store path in generated_data_state
937
+
938
+ ds_upload_file.upload(
939
+ fn=process_uploaded_file,
940
+ inputs=[ds_upload_file, current_logs_state],
941
+ outputs=[dataset_preview_df, training_log_txt, training_log_txt, generated_data_state] # Use training_log for status, then store path
942
+ )
943
+
944
+
945
+ # Train Model Button
946
+ def train_model_wrapper(data_state_val, # This will be DataFrame, (X,y) tuple, or filepath string
947
+ target_col, task_type, model_family, model_name, # Common params
948
+ # Sklearn specific (none for now beyond model_name)
949
+ # PyTorch specific
950
+ pt_model_type, pt_mlp_hidden, pt_mlp_activ, #pt_cnn_params (later)
951
+ epochs, batch_size, lr,
952
+ model_out_format,
953
+ logs_in): # Accumulate logs
954
+
955
+ current_logs = logs_in + "\n--- Initiating Training ---\n"
956
+ current_logs += f"Data state type: {type(data_state_val)}\n"
957
+
958
+ if data_state_val is None:
959
+ current_logs += "Error: No dataset loaded or generated. Please go to Tab 2.\n"
960
+ return current_logs, "No data available.", None, "N/A", None, None # logs, metrics, model_file, params, plot, download_btn_update
961
+
962
+ if not target_col and (task_type.startswith("Tabular") or (isinstance(data_state_val, pd.DataFrame) and model_type_pt != "Simple Convolutional Network (CNN)")) : # Target col needed for tabular
963
+ current_logs += "Error: Target column name is required for this task/data.\n"
964
+ return current_logs, "Target column needed.", None, "N/A", None, None
965
+
966
+ # Ensure logs are passed and returned correctly by train functions
967
+ if model_family == "Scikit-learn (Classical ML)":
968
+ logs, metrics, model_file, params = train_model_sklearn(
969
+ data_state_val, target_col, task_type, model_name, model_out_format, current_logs
970
+ )
971
+ return logs, metrics, model_file, params, None, model_file # No plot for sklearn here
972
+
973
+ elif model_family == "PyTorch (Neural Networks)":
974
+ # model_name here is the PyTorch model type (MLP or CNN)
975
+ logs, metrics, model_file, params, plot = train_model_pytorch(
976
+ data_state_val, target_col, task_type, model_name,
977
+ pt_mlp_hidden, pt_mlp_activ,
978
+ epochs, batch_size, lr,
979
+ model_out_format, current_logs
980
+ )
981
+ return logs, metrics, model_file, params, plot, model_file
982
+ else:
983
+ current_logs += f"Unknown model family: {model_family}\n"
984
+ return current_logs, "Unknown model family.", None, "N/A", None, None
985
+
986
+ train_model_btn.click(
987
+ fn=train_model_wrapper,
988
+ inputs=[
989
+ generated_data_state, target_column_name_txt, task_type_dd, model_family_dd, model_specific_dd,
990
+ # PyTorch specific inputs (will be None if not PyTorch family, but passed)
991
+ model_specific_dd, # This is pt_model_type if family is PyTorch
992
+ pt_mlp_hidden_layers_txt, pt_mlp_activation_dd,
993
+ train_epochs_num, train_batch_size_num, train_learning_rate_num,
994
+ model_output_format_dd,
995
+ training_log_txt # Pass current log content to append
996
+ ],
997
+ outputs=[
998
+ training_log_txt, evaluation_metrics_txt, download_trained_model_file,
999
+ model_param_count_output_txt, loss_plot_img,
1000
+ download_trained_model_file # This seems redundant, download_trained_model_file is already an output
1001
+ ]
1002
+ )
1003
+
1004
+ # Clear logs button (optional)
1005
+ # clear_logs_btn = gr.Button("Clear Logs")
1006
+ # def clear_logs_func(): return "", "" # Clears current_logs_state and training_log_txt
1007
+ # clear_logs_btn.click(clear_logs_func, [], [current_logs_state, training_log_txt])
1008
+
1009
+
1010
+ demo.queue().launch(debug=True, show_error=True) # Enable queue for longer tasks, debug for local testing