import numpy as np from sklearn.linear_model import LinearRegression import gradio as gr from sklearn.model_selection import train_test_split from sklearn.ensemble import AdaBoostRegressor, ExtraTreesRegressor import pandas as pd from sklearn.tree import DecisionTreeRegressor, ExtraTreeRegressor from xgboost import XGBRegressor from sklearn.metrics import r2_score from catboost import CatBoostRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import r2_score import random logger = gr.SimpleCSVLogger() # set all random seeds to 42 random.seed(42) np.random.seed(42) def AdaBoostRegressorR2(x_file_obj,y_file_obj,learning_rate, n_estimators, loss, base_estimator, test_size,random_seed): if base_estimator == "DecisionTreeRegressor": base_estimator = DecisionTreeRegressor() elif base_estimator == "XGBoost": base_estimator = XGBRegressor() elif base_estimator == "RandomForestRegressor": base_estimator = RandomForestRegressor() elif base_estimator == "CatBoostRegressor": base_estimator = CatBoostRegressor() elif base_estimator == "LinearRegression": base_estimator = LinearRegression() elif base_estimator == "ExtraTreeRegressor": base_estimator = ExtraTreeRegressor() elif base_estimator == "None": base_estimator = None X = pd.read_csv(x_file_obj.name,dtype=str) y = pd.read_csv(y_file_obj.name,dtype=str) y = y.values.ravel() x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_seed) model = AdaBoostRegressor(learning_rate=learning_rate, n_estimators=n_estimators, loss=loss,base_estimator=base_estimator,random_state=random_seed) model.fit(x_train, y_train) predictions = model.predict(x_test) return r2_score(y_test, predictions) adaInterface = gr.Interface( AdaBoostRegressorR2, [ gr.components.File(label="X File (CSV)"), gr.components.File(label="Y File (CSV)"), gr.components.Slider(0.1, 1, default=0.1, label="learning_rate"), gr.components.Slider(0, 1000, default=10, label="n_estimators"), gr.components.Dropdown(["linear", "square", "exponential"], label="loss"), gr.components.Dropdown(["DecisionTreeRegressor","XGBoost","RandomForestRegressor","CatBoostRegressor","LinearRegression","ExtraTreeRegressor","None"], label="base_estimator"), gr.components.Slider(0.1, 0.9, default=0.2, label="test_size"), gr.components.Slider(0, 100, default=42, label="random_seed") ], outputs="number", title="AdaBoostRegressor", description="Treino de um modelo de regressão com AdaBoostRegressor", allow_flagging="manual" ) def CatBoostRegressorR2(x_file_obj,y_file_obj,learning_rate, n_estimators, loss, max_depth, max_bin, l2_leaf_reg, test_size,random_seed): X = pd.read_csv(x_file_obj.name,dtype=str) y = pd.read_csv(y_file_obj.name,dtype=str) y = y.values.ravel() x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_seed) model = CatBoostRegressor(learning_rate=learning_rate, n_estimators=n_estimators, loss_function=loss,max_depth=max_depth, max_bin=max_bin,l2_leaf_reg=l2_leaf_reg ,random_state=random_seed) model.fit(x_train, y_train) predictions = model.predict(x_test) return r2_score(y_test, predictions) catInterface = gr.Interface( CatBoostRegressorR2, [ gr.components.File(label="X File (CSV)"), gr.components.File(label="Y File (CSV)"), gr.components.Slider(0.1, 1, default=0.1, label="learning_rate"), gr.components.Slider(0, 1000, default=10, label="n_estimators"), gr.components.Dropdown(["RMSE", "MAE", "Quantile:alpha=0.9", "LogLinQuantile:alpha=0.9", "Poisson", "MAPE", "MultiRMSE","Quantile", "LogLinQuantile", "Lq:q=1", "Lq:q=2"], label="loss"), gr.components.Slider(0, 100, default=1, label="max_depth"), gr.components.Slider(1, 255, default=255, label="max_bin"), gr.components.Slider(0, 100, default=3, label="l2_leaf_reg"), gr.components.Slider(0.1, 0.9, default=0.2, label="test_size"), gr.components.Slider(0, 100, default=42, label="random_seed") ], outputs="number", title="CatBoostRegressor", description="Treino de um modelo de regressão com CatBoostRegressor", allow_flagging="manual" ) def RandomForestRegressorR2(x_file_obj,y_file_obj,n_estimators, criterion, max_depth,min_samples_split, min_samples_leaf,min_weight_fraction_leaf,max_features,max_leaf_nodes,min_impurity_decrease,bootstrap,n_jobs,test_size,random_seed): max_depth = int(max_depth) X = pd.read_csv(x_file_obj.name,dtype=str) y = pd.read_csv(y_file_obj.name,dtype=str) y = y.values.ravel() x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_seed) model = RandomForestRegressor(n_estimators=n_estimators, criterion=criterion, max_depth=max_depth, min_samples_split=min_samples_split,min_samples_leaf=min_samples_leaf, min_weight_fraction_leaf=min_weight_fraction_leaf,max_features=max_features,max_leaf_nodes=max_leaf_nodes, min_impurity_decrease=min_impurity_decrease,bootstrap=bootstrap,n_jobs=n_jobs,random_state=random_seed) model.fit(x_train, y_train) predictions = model.predict(x_test) return r2_score(y_test, predictions) randomForestInterface = gr.Interface( RandomForestRegressorR2, [ gr.components.File(label="X File (CSV)"), gr.components.File(label="Y File (CSV)"), gr.components.Slider(0, 1000, default=10, label="n_estimators"), gr.components.Dropdown(["mse", "mae","poisson"], label="criterion",default="mse"), gr.components.Slider(0, 100, default=1, label="max_depth"), gr.components.Slider(0, 100, default=1, label="min_samples_split"), gr.components.Slider(0, 100, default=1, label="min_samples_leaf"), gr.components.Slider(0, 0.5, default=0, label="min_weight_fraction_leaf"), gr.components.Dropdown(["auto", "sqrt", "log2"], label="max_features",default="auto"), gr.components.Slider(0, 100, default=1, label="max_leaf_nodes"), gr.components.Slider(0, 1, default=0, label="min_impurity_decrease"), gr.components.Dropdown(["True", "False"], label="bootstrap",default="True"), gr.components.Dropdown([i for i in range(-1,5,1)], default=-1, label="n_jobs"), gr.components.Slider(0.1, 0.9, default=0.2, label="test_size"), gr.components.Slider(0, 100, default=42, label="random_seed") ], outputs="number", title="RandomForestRegressor", description="Treino de um modelo de regressão com RandomForestRegressor", allow_flagging="manual" ) # TODO - Add more parameters EXTRA TREE def ExtraTreesRegressorR2(x_file_obj,y_file_obj,n_estimators, criterion, max_depth,min_samples_split, min_samples_leaf,min_weight_fraction_leaf,max_features,max_leaf_nodes,min_impurity_decrease,bootstrap,n_jobs,test_size,random_seed): max_depth = int(max_depth) X = pd.read_csv(x_file_obj.name,dtype=str) y = pd.read_csv(y_file_obj.name,dtype=str) y = y.values.ravel() x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_seed) model = ExtraTreesRegressor(n_estimators=n_estimators, criterion=criterion, max_depth=max_depth, min_samples_split=min_samples_split,min_samples_leaf=min_samples_leaf, min_weight_fraction_leaf=min_weight_fraction_leaf,max_features=max_features,max_leaf_nodes=max_leaf_nodes, min_impurity_decrease=min_impurity_decrease,bootstrap=bootstrap,n_jobs=n_jobs,random_state=random_seed) model.fit(x_train, y_train) predictions = model.predict(x_test) return r2_score(y_test, predictions) extraTreesInterface = gr.Interface( ExtraTreesRegressorR2, [ gr.components.File(label="X File (CSV)"), gr.components.File(label="Y File (CSV)"), gr.components.Slider(0, 1000, default=10, label="n_estimators"), gr.components.Dropdown(["mse", "mae","poisson"], label="criterion",default="mse"), gr.components.Slider(0, 100, default=1, label="max_depth"), gr.components.Slider(0, 100, default=1, label="min_samples_split"), gr.components.Slider(0, 100, default=1, label="min_samples_leaf"), gr.components.Slider(0, 0.5, default=0, label="min_weight_fraction_leaf"), gr.components.Dropdown(["auto", "sqrt", "log2"], label="max_features",default="auto"), gr.components.Slider(0, 100, default=1, label="max_leaf_nodes"), gr.components.Slider(0, 1, default=0, label="min_impurity_decrease"), gr.components.Dropdown(["True", "False"], label="bootstrap",default="True"), gr.components.Dropdown([i for i in range(-1,5,1)], default=-1, label="n_jobs"), gr.components.Slider(0.1, 0.9, default=0.2, label="test_size"), gr.components.Slider(0, 100, default=42, label="random_seed") ], outputs="number", title="ExtraTreesRegressor", description="Treino de um modelo de regressão com ExtraTreesRegressor", allow_flagging="manual" ) def linearRegressionR2(x_file_obj,y_file_obj,fit_intercept, normalize, copy_X): X = pd.read_csv(x_file_obj.name,dtype=str) y = pd.read_csv(y_file_obj.name,dtype=str) y = y.values.ravel() x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) model = LinearRegression(fit_intercept=fit_intercept, normalize=normalize, copy_X=copy_X) model.fit(x_train, y_train) predictions = model.predict(x_test) return r2_score(y_test, predictions) linearRegressionInterface = gr.Interface( linearRegressionR2, [ gr.components.File(label="X File (CSV)"), gr.components.File(label="Y File (CSV)"), gr.components.Checkbox(True, label="fit_intercept"), gr.components.Checkbox(False, label="normalize"), gr.components.Checkbox(True, label="copy_X"), ], outputs="number", title="LinearRegression", description="Treino de um modelo de regressão com LinearRegression", allow_flagging="manual") iface = gr.TabbedInterface([catInterface, adaInterface, randomForestInterface, extraTreesInterface, linearRegressionInterface], ["CatBoostRegressor", "AdaBoostRegresssor","RandomForestRegressor", "ExtraTreesRegressor","LinearRegression"],theme="dark") iface.launch(share=False, show_error=True)