kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
14,045,167 | all_data["PoolQC"] = all_data["PoolQC"].fillna("None" )<data_type_conversions> | m2_fit = m2.fit(X_train_lr, y_train_lr)
print('Score reached: {} '.format(m2.score(X_train_lr, y_train_lr)))
| Tabular Playground Series - Jan 2021 |
14,045,167 | all_data["MiscFeature"] = all_data["MiscFeature"].fillna("None" )<data_type_conversions> | X_test_lr = test_df.drop(['id'], axis=1)
y_test_ridge = m2.predict(X_test_lr ) | Tabular Playground Series - Jan 2021 |
14,045,167 | all_data["Alley"] = all_data["Alley"].fillna("None" )<data_type_conversions> | lgb_train = lgb.Dataset(X_train_df, y_train_df, free_raw_data=False)
lgb_eval = lgb.Dataset(X_val_df, y_val_df, free_raw_data=False ) | Tabular Playground Series - Jan 2021 |
14,045,167 | all_data["Fence"] = all_data["Fence"].fillna("None" )<data_type_conversions> | Tabular Playground Series - Jan 2021 | |
14,045,167 | all_data["FireplaceQu"] = all_data["FireplaceQu"].fillna("None" )<groupby> | gsLGBM.best_params_
| Tabular Playground Series - Jan 2021 |
14,045,167 | grouped_df = all_data.groupby('Neighborhood')['LotFrontage']
for key, item in grouped_df:
print(key,"
")
print(grouped_df.get_group(key))
break<categorify> | m3 = lgb.LGBMRegressor(valid_sets = [lgb_train, lgb_eval], verbose_eval = 30, num_boost_round = 10000, early_stopping_rounds = 10, n_jobs=4, **opt_parameters_LGBM)
m3.fit(X_train_df, y_train_df, eval_set =(X_val_df, y_val_df), eval_metric = 'rmse' ) | Tabular Playground Series - Jan 2021 |
14,045,167 | all_data["LotFrontage"] = all_data.groupby("Neighborhood")["LotFrontage"].transform(
lambda x: x.fillna(x.median()))<data_type_conversions> | X_test = test_df
y_test_lgbm = m3.predict(X_test ) | Tabular Playground Series - Jan 2021 |
14,045,167 | for col in ['GarageType', 'GarageFinish', 'GarageQual', 'GarageCond']:
all_data[col] = all_data[col].fillna('None' )<groupby> | opt_parameters_ADA = {'learning_rate': 0.028555288989857153, 'n_estimators': 36} | Tabular Playground Series - Jan 2021 |
14,045,167 | abc = ['GarageType', 'GarageFinish', 'GarageQual', 'GarageCond','GarageYrBlt', 'GarageArea', 'GarageCars']
all_data.groupby('GarageType')[abc].count()<data_type_conversions> | m4 = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(max_depth=3, min_samples_leaf=1, min_impurity_decrease=10, random_state=47), random_state=47, **opt_parameters_ADA)
m4.fit(X_train_df, y_train_df ) | Tabular Playground Series - Jan 2021 |
14,045,167 | for col in('GarageYrBlt', 'GarageArea', 'GarageCars'):
all_data[col] = all_data[col].fillna(0 )<data_type_conversions> | X_test = test_df
y_test_ada = m4.predict(X_test ) | Tabular Playground Series - Jan 2021 |
14,045,167 | for col in('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF','TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath'):
all_data[col] = all_data[col].fillna(0 )<data_type_conversions> | m5 = CatBoostRegressor(random_seed=47 ) | Tabular Playground Series - Jan 2021 |
14,045,167 | for col in('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'):
all_data[col] = all_data[col].fillna('None' )<data_type_conversions> | gsCB.fit(X_train_df, y_train_df)
print('Best score reached: {} with params: {} '.format(gsCB.best_score_, gsCB.best_params_)) | Tabular Playground Series - Jan 2021 |
14,045,167 | all_data["MasVnrType"] = all_data["MasVnrType"].fillna("None")
all_data["MasVnrArea"] = all_data["MasVnrArea"].fillna(0 )<count_values> | X_test = test_df
y_test_cb = m5.predict(X_test ) | Tabular Playground Series - Jan 2021 |
14,045,167 | all_data['MSZoning'].value_counts()<data_type_conversions> | gbm = xgb.XGBRegressor(
learning_rate = 0.01,
n_estimators= 100,
max_depth= 4,
min_child_weight= 2,
gamma=0.9,
subsample=0.8,
colsample_bytree=0.8,
objective= 'reg:squaredlogerror',
nthread= -1,
verbosity=3,
random_state=20)
lgbm_params = m1.get_params()
lgbm_params["early_stopping_rounds"] = None
m1.set_params(**lgbm_params)
estimators = [('lgbm', m1),('ada', m2),('lasso', m3),('ridge', m4)]
gbm = StackingRegressor(estimators=estimators, final_estimator=gbm, cv=5, verbose=1 ) | Tabular Playground Series - Jan 2021 |
14,045,167 | all_data['MSZoning'] = all_data['MSZoning'].fillna(all_data['MSZoning'].mode() [0] )<count_values> | Tabular Playground Series - Jan 2021 | |
14,045,167 | all_data['Utilities'].value_counts()<drop_column> | gbm.fit(X_train_df, y_train_df ) | Tabular Playground Series - Jan 2021 |
14,045,167 | all_data = all_data.drop(['Utilities'], axis=1 )<data_type_conversions> | X_test = test_df
y_test_gbm = gbm.predict(X_test ) | Tabular Playground Series - Jan 2021 |
14,045,167 | all_data["Functional"] = all_data["Functional"].fillna("Typ" )<categorify> | lasso_submission = pd.DataFrame({'id': test_df['id'], 'target': y_test_lasso})
lasso_submission.to_csv('lasso_submission.csv', index=False ) | Tabular Playground Series - Jan 2021 |
14,045,167 | mode_col = ['Electrical','KitchenQual', 'Exterior1st', 'Exterior2nd', 'SaleType']
for col in mode_col:
all_data[col] = all_data[col].fillna(all_data[col].mode() [0] )<data_type_conversions> | ridge_submission = pd.DataFrame({'id': test_df['id'], 'target': y_test_ridge})
ridge_submission.to_csv('ridge_submission.csv', index=False ) | Tabular Playground Series - Jan 2021 |
14,045,167 | all_data['MSSubClass'] = all_data['MSSubClass'].fillna("None" )<sort_values> | lgbm_submission = pd.DataFrame({'id': test_df['id'], 'target': y_test_lgbm})
lgbm_submission.to_csv('lgbm_submission.csv', index=False ) | Tabular Playground Series - Jan 2021 |
14,045,167 | all_data_na =(all_data.isnull().sum() / len(all_data)) * 100
all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index ).sort_values(ascending=False)
missing_data = pd.DataFrame({'Missing Ratio' :all_data_na})
missing_data.head()<count_values> | ada_submission = pd.DataFrame({'id': test_df['id'], 'target': y_test_ada})
ada_submission.to_csv('ada_submission.csv', index=False ) | Tabular Playground Series - Jan 2021 |
14,045,167 | <data_type_conversions><EOS> | gbm_submission = pd.DataFrame({'id': test_df['id'], 'target': y_test_gbm})
gbm_submission.to_csv('gbm_submission.csv', index=False ) | Tabular Playground Series - Jan 2021 |
14,161,713 | <SOS> metric: RMSE Kaggle data source: tabular-playground-series-jan-2021<categorify> | import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold
from tqdm import tqdm
import pandas as pd
import xgboost as xgb | Tabular Playground Series - Jan 2021 |
14,161,713 | cols =('FireplaceQu', 'BsmtQual', 'BsmtCond', 'GarageQual', 'GarageCond',
'ExterQual', 'ExterCond','HeatingQC', 'PoolQC', 'KitchenQual', 'BsmtFinType1',
'BsmtFinType2', 'Functional', 'Fence', 'BsmtExposure', 'GarageFinish', 'LandSlope',
'LotShape', 'PavedDrive', 'Street', 'Alley', 'CentralAir', 'MSSubClass', 'OverallCond',
'YrSold', 'MoSold')
for c in cols:
lbl = LabelEncoder()
lbl.fit(list(all_data[c].values))
all_data[c] = lbl.transform(list(all_data[c].values))
print('Shape all_data: {}'.format(all_data.shape))<feature_engineering> | train_data = pd.read_csv('.. /input/tabular-playground-series-jan-2021/train.csv')
test_data = pd.read_csv('.. /input/tabular-playground-series-jan-2021/test.csv')
train_data.head() | Tabular Playground Series - Jan 2021 |
14,161,713 | all_data['TotalSF'] = all_data['TotalBsmtSF'] + all_data['1stFlrSF'] + all_data['2ndFlrSF']<feature_engineering> | features = ['cont1', 'cont2', 'cont3', 'cont4', 'cont5', 'cont6', 'cont7',
'cont8', 'cont9', 'cont10', 'cont11', 'cont12', 'cont13', 'cont14']
X_train = train_data[features]
y_train = train_data["target"]
final_X_test = test_data[features] | Tabular Playground Series - Jan 2021 |
14,161,713 | skewness = skewness[abs(skewness)> 0.75]
print("There are {} skewed numerical features to Box Cox transform".format(skewness.shape[0]))
skewed_features = skewness.index
lam = 0.15
for feat in skewed_features:
all_data[feat] = boxcox1p(all_data[feat], lam )<categorify> | X_train,X_test,y_train,y_test = train_test_split(X_train, y_train, test_size=0.1 ) | Tabular Playground Series - Jan 2021 |
14,161,713 | all_data = pd.get_dummies(all_data)
all_data.shape<prepare_x_and_y> | Best_trial = {'lambda': 0.0030282073258141168, 'alpha': 0.01563845128469084, 'colsample_bytree': 0.5,
'subsample': 0.7,'n_estimators': 4000, 'learning_rate': 0.01,'max_depth': 15,
'random_state': 2020, 'min_child_weight': 257,'tree_method':'gpu_hist'
,'predictor': 'gpu_predictor'} | Tabular Playground Series - Jan 2021 |
14,161,713 | train = all_data[:ntrain]
test = all_data[ntrain:]
train.shape<import_modules> | regressor = xgb.XGBRegressor(**Best_trial)
regressor.fit(X_train, y_train, early_stopping_rounds=10, eval_set=[(X_test, y_test)],verbose=False ) | Tabular Playground Series - Jan 2021 |
14,161,713 | from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone
from sklearn.model_selection import KFold, cross_val_score, train_test_split
from sklearn.metrics import mean_squared_error
import xgboost as xgb
import lightgbm as lgb<compute_train_metric> | y_xgb_pred = regressor.predict(final_X_test ) | Tabular Playground Series - Jan 2021 |
14,161,713 | n_folds = 5
def rmsle_cv(model):
kf = KFold(n_folds, shuffle=True, random_state=42 ).get_n_splits(train.values)
rmse= np.sqrt(-cross_val_score(model, train.values, y_train, scoring="neg_mean_squared_error", cv = kf))
return(rmse )<compute_train_metric> | output1 = pd.DataFrame({"id":test_data.id, "target":y_xgb_pred})
output1.to_csv('xgb.csv', index=False ) | Tabular Playground Series - Jan 2021 |
14,161,713 | KRR = KernelRidge(alpha=0.6, kernel='polynomial', degree=2, coef0=2.5)
score = rmsle_cv(KRR)
print("Kernel Ridge score: {:.4f}({:.4f})
".format(score.mean() , score.std()))<compute_test_metric> | params ={'random_state': 33,'n_estimators':5000,
'min_data_per_group': 5,
'boosting_type': 'gbdt',
'device_type' : 'gpu',
'num_leaves': 256,
'num_iterations' : 5000,
'max_dept': -1,
'learning_rate': 0.005,
'subsample_for_bin': 200000,
'lambda_l1': 1.074622455507616e-05,
'lambda_l2': 2.0521330798729704e-06,
'n_jobs': -1,
'cat_smooth': 1.0,
'silent': True,
'importance_type': 'split',
'metric': 'rmse',
'feature_pre_filter': False,
'bagging_fraction': 0.8206341150202605,
'min_data_in_leaf': 100,
'min_sum_hessian_in_leaf': 0.001,
'bagging_freq': 6,
'feature_fraction': 0.5,
'min_gain_to_split': 0.0,
'min_child_samples': 20} | Tabular Playground Series - Jan 2021 |
14,161,713 | lasso = make_pipeline(RobustScaler() , Lasso(alpha =0.0005, random_state=1))
score = rmsle_cv(lasso)
print("Lasso score: {:.4f}({:.4f})
".format(score.mean() , score.std()))<compute_train_metric> | lgb_model = LGBMRegressor(**params)
lgb_model.fit(X_train, y_train, eval_set=(X_test, y_test),early_stopping_rounds = 50, verbose = 0 ) | Tabular Playground Series - Jan 2021 |
14,161,713 | ENet = make_pipeline(RobustScaler() , ElasticNet(alpha=0.0005, l1_ratio=.9, random_state=3))
score = rmsle_cv(ENet)
print("ElasticNet score: {:.4f}({:.4f})
".format(score.mean() , score.std()))<compute_train_metric> | y_pred_lgb = lgb_model.predict(final_X_test ) | Tabular Playground Series - Jan 2021 |
14,161,713 | GBoost = GradientBoostingRegressor(n_estimators=3000, learning_rate=0.05,
max_depth=4, max_features='sqrt',
min_samples_leaf=15, min_samples_split=10,
loss='huber', random_state =5)
score = rmsle_cv(GBoost)
print("Gradient Boosting score: {:.4f}({:.4f})
".format(score.mean() , score.std()))<train_model> | output2 = pd.DataFrame({"id":test_data.id, "target":y_pred_lgb})
output2.to_csv('lgbm.csv', index=False ) | Tabular Playground Series - Jan 2021 |
14,161,713 | LassoMd = lasso.fit(train.values,y_train)
ENetMd = ENet.fit(train.values,y_train)
KRRMd = KRR.fit(train.values,y_train)
GBoostMd = GBoost.fit(train.values,y_train )<predict_on_test> | Best_trial = {'l2_leaf_reg': 0.02247766515106271, 'max_bin': 364, 'subsample': 0.6708650091202213,
'learning_rate': 0.010290546311954876, 'max_depth': 10, 'random_state': 24, 'min_data_in_leaf': 300,
'loss_function': 'RMSE','n_estimators': 25000,'rsm':0.5} | Tabular Playground Series - Jan 2021 |
14,161,713 | finalMd =(np.expm1(LassoMd.predict(test.values)) + np.expm1(ENetMd.predict(test.values)) + np.expm1(KRRMd.predict(test.values)) + np.expm1(GBoostMd.predict(test.values)))/ 4
finalMd<save_to_csv> | cb_model = CatBoostRegressor(**Best_trial ) | Tabular Playground Series - Jan 2021 |
14,161,713 | sub = pd.DataFrame()
sub['Id'] = test_ID
sub['SalePrice'] = finalMd
sub.to_csv('submission.csv',index=False )<set_options> | cb_model.fit(X_train, y_train,eval_set=(X_test, y_test),use_best_model=True,verbose=1000 ) | Tabular Playground Series - Jan 2021 |
14,161,713 | %matplotlib inline
<load_from_csv> | cat_pred =cb_model.predict(final_X_test ) | Tabular Playground Series - Jan 2021 |
14,161,713 | train_data = pd.read_csv(".. /input/house-prices-advanced-regression-techniques/train.csv")
test_data = pd.read_csv(".. /input/house-prices-advanced-regression-techniques/test.csv")
train_data.shape,test_data.shape<sort_values> | output3 = pd.DataFrame({"id":test_data.id, "target":cat_pred})
output3.to_csv('cat.csv', index=False ) | Tabular Playground Series - Jan 2021 |
14,161,713 | <drop_column><EOS> | results =(y_pred_lgb + y_xgb_pred + cat_pred)/ 3
output = pd.DataFrame({"id":test_data.id, "target":results})
output.to_csv('submission.csv', index=False ) | Tabular Playground Series - Jan 2021 |
14,138,753 | <SOS> metric: RMSE Kaggle data source: tabular-playground-series-jan-2021<concatenate> | train = pd.read_csv(".. /input/tabular-playground-series-jan-2021/train.csv")
test = pd.read_csv(".. /input/tabular-playground-series-jan-2021/test.csv")
sub = pd.read_csv(".. /input/tabular-playground-series-jan-2021/sample_submission.csv")
train | Tabular Playground Series - Jan 2021 |
14,138,753 | all_data = pd.concat([train_data, test_data],keys=['train','test'])
train_data.shape, test_data.shape, all_data.shape<drop_column> | train = train.drop(["id"], axis=1)
features = [c for c in train.columns if "cont" in c]
test = test.drop("id", axis=1)
test | Tabular Playground Series - Jan 2021 |
14,138,753 | all_data = all_data.drop(columns=['Id'], axis=1 )<drop_column> | fe = dict(
rankgauss = True,
stats = True,
gaussmix = False,
pca = True,
tsne = True,
umap = True,
drop_original = False,
) | Tabular Playground Series - Jan 2021 |
14,138,753 | all_data['Age']=all_data['YrSold']-all_data['YearBuilt']+1
all_data['AgeRemodAdd']=all_data['YrSold']-all_data['YearRemodAdd']+1
all_data['AgeGarage']=all_data['YrSold']-all_data['GarageYrBlt']+1
all_data.drop(columns = 'YearBuilt')
all_data.drop(columns = 'YearRemodAdd')
all_data.drop(columns = 'GarageYrBlt' )<feature_engineering> | all_data = pd.concat([train, test], axis=0, ignore_index=True)
targets = all_data.target[:300000]
all_data = all_data.drop("target", axis=1)
COLS = [c for c in all_data.columns if "cont" in c]
all_data | Tabular Playground Series - Jan 2021 |
14,138,753 | for i in ['Alley','PoolQC','Fence','MiscFeature','FireplaceQu']:
all_data[i]=all_data[i].apply(lambda x: 'None' if str(x)=='nan' else x )<feature_engineering> | if fe["stats"]:
for stats in tqdm.tqdm(["sum", "var", "mean", "median", "std", "kurt", "skew"]):
all_data["cont_" + stats] = getattr(all_data[COLS], stats )(axis = 1)
all_data | Tabular Playground Series - Jan 2021 |
14,138,753 | for i in('GarageType','GarageFinish','GarageQual','GarageCond'):
all_data[i].fillna('None',inplace=True)
all_data['GarageArea'] =(np.where(( all_data['GarageType']=='None'), 0, all_data['GarageArea']))
all_data['GarageCars'] =(np.where(( all_data['GarageType']=='None'), 0, all_data['GarageCars']))
all_data['AgeGarage'] =(np.where(( all_data['GarageType']=='None'), 0, all_data['AgeGarage']))<feature_engineering> | sys.path.append(".. /input/rank-gauss")
if fe["rankgauss"]:
scaler = GaussRankScaler()
rankgauss_feat = scaler.fit_transform(all_data[COLS])
rankgauss_df = pd.DataFrame(rankgauss_feat, columns=[f"rankgauss_{i}" for i in range(rankgauss_feat.shape[1])])
all_data = pd.concat([all_data, rankgauss_df], axis=1)
all_data | Tabular Playground Series - Jan 2021 |
14,138,753 | for i in('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'):
all_data[i].fillna('None',inplace=True)
for i in('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF','TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath'):
all_data[i] =(np.where(( all_data['BsmtQual']=='None'), 0, all_data[i]))<categorify> | if fe["gaussmix"]:
def get_gmm_class_feature(feat, n):
gmm = GaussianMixture(n_components=n, random_state=42)
gmm.fit(all_data[feat].values.reshape(-1, 1))
all_data[f'{feat}_class'] = gmm.predict(all_data[feat].values.reshape(-1, 1))
get_gmm_class_feature('cont1', 4)
get_gmm_class_feature('cont2', 10)
get_gmm_class_feature('cont3', 6)
get_gmm_class_feature('cont4', 4)
get_gmm_class_feature('cont5', 3)
get_gmm_class_feature('cont6', 2)
get_gmm_class_feature('cont7', 3)
get_gmm_class_feature('cont8', 4)
get_gmm_class_feature('cont9', 4)
get_gmm_class_feature('cont10', 8)
get_gmm_class_feature('cont11', 5)
get_gmm_class_feature('cont12', 4)
get_gmm_class_feature('cont13', 6)
get_gmm_class_feature('cont14', 6)
CLASS_COLS = [c for c in all_data.columns if "_class" in c]
CLASS_COLS_IDX = []
for c in CLASS_COLS:
CLASS_COLS_IDX.append(all_data.columns.get_loc(c))
assert len(CLASS_COLS)> 0
all_data | Tabular Playground Series - Jan 2021 |
14,138,753 | x= { 20:'1-STORY 1946 & NEWER ALL STYLES',
30:'1-STORY 1945 & OLDER',
40:'1-STORY W/FINISHED ATTIC ALL AGES',
45:'1-1/2 STORY - UNFINISHED ALL AGES',
50:'1-1/2 STORY FINISHED ALL AGES',
60:'2-STORY 1946 & NEWER',
70:'2-STORY 1945 & OLDER',
75:'2-1/2 STORY',
80:'SPLIT',
85:'SPLIT FOYER',
90:'DUPLEX',
120:'1-STORY PUD',
150:'1-1/2 STORY PUD',
160:'2-STORY PUD',
180:'PUD - MULTILEVEL',
190:'2 FAMILY CONVERSION'}
all_data['MSSubClass']=all_data['MSSubClass'].map(x )<sort_values> | if fe["pca"]:
pca = PCA(n_components = 0.9, random_state = 42 ).fit(all_data[COLS])
pca_feat = pca.transform(all_data[COLS])
pca_df = pd.DataFrame(pca_feat, columns = [f"pca_cont{i}" for i in range(pca.n_components_)])
all_data = pd.concat([all_data, pca_df], axis=1)
PCA_COLS = [c for c in all_data.columns if "pca" in c]
assert len(PCA_COLS)> 0
all_data | Tabular Playground Series - Jan 2021 |
14,138,753 | total = all_data.isnull().sum().sort_values(ascending = False)
percent = round(all_data.isnull().sum().sort_values(ascending = False)/ len(all_data)*100, 2)
missingValues = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
missingValues.loc[(missingValues['Total'] > 0)]<categorify> | if fe["tsne"]:
tsne_components = 2
perplexity = [5, 10, 15, 20, 25]
for per in perplexity:
tsne = TSNE(n_components = tsne_components, perplexity = per, n_neighbors = 3.5 * per)
tsne_feat = tsne.fit_transform(all_data[COLS])
tsne_df = pd.DataFrame(tsne_feat, columns=[f"tsne_{per}_{i}" for i in range(tsne_components)])
all_data = pd.concat([all_data, tsne_df], axis = 1)
TSNE_COLS = [c for c in all_data.columns if "tsne" in c]
all_data | Tabular Playground Series - Jan 2021 |
14,138,753 | def HandleMissingValues(df):
num_cols = df.select_dtypes(include = ['int64', 'float64'])
cat_cols = df.select_dtypes(include = 'object')
values = {}
for a in cat_cols:
values[a] = 'unknown'
for a in num_cols:
_index = df[df[a].isna() ].index
_value = np.random.normal(loc = df[a].mean() , scale = df[a].std() , size = df[a].isna().sum())
df[a].fillna(pd.Series(_value, index=_index), inplace=True)
df.fillna(value=values, inplace=True)
HandleMissingValues(all_data)
all_data.isnull().sum().sum()<prepare_x_and_y> | if fe["umap"]:
umap_components = 10
umap = UMAP(n_components = umap_components)
umap_feat = umap.fit_transform(all_data[COLS])
umap_df = pd.DataFrame(umap_feat, columns=[f"umap{i}" for i in range(umap_components)])
all_data = pd.concat([all_data, umap_df], axis=1)
UMAP_COLS = [c for c in all_data.columns if "umap" in c]
assert len(UMAP_COLS)> 0
all_data | Tabular Playground Series - Jan 2021 |
14,138,753 | X = all_data.copy()
y = X.pop("SalePrice")
mi_scores = make_mi_scores(X, y)
mi_scores<drop_column> | if fe["drop_original"]:
all_data = all_data.drop(COLS, axis=1 ) | Tabular Playground Series - Jan 2021 |
14,138,753 | def drop_uninformative(df, mi_scores):
return df.loc[:, mi_scores > 0.0]<concatenate> | dtypes = {c: "int8" for c in train.columns if "_class" in c}
train = train.astype(dtypes)
X_train, X_val, y_train, y_val = train_test_split(train, targets, test_size=0.1, random_state=42)
train_pool = Pool(X_train, y_train, cat_features = [c for c in train.columns if "_class" in c])
val_pool = Pool(X_val, y_val, cat_features = [c for c in train.columns if "_class" in c] ) | Tabular Playground Series - Jan 2021 |
14,138,753 | mi_scores = mi_scores.append(pd.Series(0.01, index=["SalePrice"]))
all_data = drop_uninformative(all_data, mi_scores )<categorify> | ITERATIONS = 5000
MAX_EVALS = 150
def objective_func(params, train_pool, val_pool):
model = CatBoostRegressor(iterations = ITERATIONS,task_type="GPU", devices='0:1', grow_policy="Lossguide",
loss_function = "RMSE", custom_metric = "RMSE", eval_metric="RMSE", verbose = 1000, **params)
model.fit(train_pool, eval_set = val_pool, early_stopping_rounds = 200, plot=False)
loss = model.get_best_score()
return {"loss": loss["validation"]["RMSE"], "status": STATUS_OK}
space = {
"learning_rate": hp.loguniform("learning_rate", np.log(0.01), np.log(1)) ,
"max_depth": hp.quniform("max_depth", 6, 11, 1),
"l2_leaf_reg": hp.quniform("l2_leaf_reg", 1, 5, 0.9),
"bagging_temperature": hp.quniform("bagging_temperature", 0, 4, 0.9),
"min_data_in_leaf": hp.quniform("min_data_in_leaf", 1, 42, 1),
"max_leaves": hp.quniform("max_leaves", 2**4-1, 2**6-1, 1),
}
fn = partial(objective_func, train_pool=train_pool, val_pool=val_pool)
best_params = fmin(fn = fn, space=space, algo=tpe.suggest, max_evals = MAX_EVALS)
model = CatBoostRegressor(iterations = ITERATIONS,task_type="GPU", devices='0:1', grow_policy="Lossguide",
loss_function = "RMSE", custom_metric = "RMSE", eval_metric="RMSE", verbose = 200, **best_params)
model.fit(train_pool, eval_set = val_pool, early_stopping_rounds = 200, plot=True ) | Tabular Playground Series - Jan 2021 |
14,138,753 | all_data = pd.get_dummies(all_data)
all_data.head()<prepare_x_and_y> | sub["target"] = model.predict(test)
sub | Tabular Playground Series - Jan 2021 |
14,138,753 | <split><EOS> | sub.to_csv("submission.csv", index=False ) | Tabular Playground Series - Jan 2021 |
14,097,157 | <SOS> metric: RMSE Kaggle data source: tabular-playground-series-jan-2021<train_model> | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns | Tabular Playground Series - Jan 2021 |
14,097,157 | rf_best = RandomForestRegressor(random_state = 0,
criterion = 'mae',
max_depth = 11,
n_estimators = 85,
min_samples_leaf = 1,
min_samples_split = 2)
rf_best.fit(X_train, y_train )<predict_on_test> | train_df = pd.read_csv('.. /input/tabular-playground-series-jan-2021/train.csv')
test_df = pd.read_csv('.. /input/tabular-playground-series-jan-2021/test.csv')
sample_sub = pd.read_csv('.. /input/tabular-playground-series-jan-2021/sample_submission.csv' ) | Tabular Playground Series - Jan 2021 |
14,097,157 | rf_pred = rf_best.predict(X_test)
np.sqrt(mean_squared_log_error(rf_pred, y_test))<choose_model_class> | from sklearn.model_selection import train_test_split | Tabular Playground Series - Jan 2021 |
14,097,157 | gbr_best = GradientBoostingRegressor(learning_rate = 0.05,
max_depth = 3,
n_estimators = 3000,
min_samples_split = 10,
min_samples_leaf = 2,
loss = 'huber')
gbr_best.fit(X_train, y_train )<predict_on_test> | from sklearn.model_selection import train_test_split | Tabular Playground Series - Jan 2021 |
14,097,157 | gbr_pred = gbr_best.predict(X_test)
np.sqrt(mean_squared_log_error(gbr_pred, y_test))<train_on_grid> | X = train_df.drop('target', axis=1)
y = train_df['target'] | Tabular Playground Series - Jan 2021 |
14,097,157 | ridge = Ridge()
n_alphas = 200
alphas = np.logspace(-5, 5, n_alphas)
ridge_params = {
'alpha': alphas
}
ridge_grid= GridSearchCV(ridge, ridge_params, scoring='neg_mean_squared_error', n_jobs = -1, cv=4)
ridge_grid.fit(X_train, y_train)
ridge_best = ridge_grid.best_estimator_<predict_on_test> | X_train, X_val, y_train, y_val = train_test_split(X, y,test_size=0.2, random_state=3 ) | Tabular Playground Series - Jan 2021 |
14,097,157 | ridge_pred = ridge_best.predict(X_test)
np.sqrt(mean_squared_log_error(ridge_pred, y_test))<predict_on_test> | lgb_model = LGBMRegressor(n_estimators = 493,metric='rmse',
reg_alpha = 2, reg_lambda=10, colsample_bytree=0.8 ) | Tabular Playground Series - Jan 2021 |
14,097,157 | vc = VotingRegressor([("gbr", gbr_best),
("rf", rf_best),
("ridge", ridge_best)],
weights = [0.6, 0.1, 0.3])
vc.fit(X_train, y_train)
vc_pred = vc.predict(X_test)
np.sqrt(mean_squared_log_error(vc_pred, y_test))<predict_on_test> | lgb_model.fit(X_train, y_train, eval_set=(X_val, y_val)) | Tabular Playground Series - Jan 2021 |
14,097,157 | vc.fit(X, y)
pred = vc.predict(test_data )<save_to_csv> | y_pred_lgb = lgb_model.predict(test_df ) | Tabular Playground Series - Jan 2021 |
14,097,157 | submission = pd.DataFrame({
"Id": list(range(1461, 1461+len(test_data))),
"SalePrice": pred
})
submission.to_csv('./submission.csv', index=False )<load_pretrained> | import xgboost as xgb | Tabular Playground Series - Jan 2021 |
14,097,157 | os.system('apt-get install p7zip')
!pip install pyunpack
!pip install patool
directory = '/kaggle/working/'
Archive('/kaggle/input/kkbox-music-recommendation-challenge/train.csv.7z' ).extractall(directory)
Archive('/kaggle/input/kkbox-music-recommendation-challenge/test.csv.7z' ).extractall(directory)
Archive('/kaggle/input/kkbox-music-recommendation-challenge/songs.csv.7z' ).extractall(directory)
Archive('/kaggle/input/kkbox-music-recommendation-challenge/members.csv.7z' ).extractall(directory)
Archive('/kaggle/input/kkbox-music-recommendation-challenge/song_extra_info.csv.7z' ).extractall(directory)
train = pd.read_csv('./train.csv')
test = pd.read_csv('./test.csv')
songs = pd.read_csv('./songs.csv')
members = pd.read_csv('./members.csv')
songs_extra = pd.read_csv('./song_extra_info.csv')
print('Data preprocessing...')
dict_count_song_played_train = {k: v for k, v in train['song_id'].value_counts().iteritems() }
dict_count_song_played_test = {k: v for k, v in test['song_id'].value_counts().iteritems() }
def return_number_played(x):
try:
return dict_count_song_played_train[x]
except KeyError:
try:
return dict_count_song_played_test[x]
except KeyError:
return 0
train['number_of_time_played'] = train['song_id'].apply(lambda x: return_number_played(x))
test['number_of_time_played'] = test['song_id'].apply(lambda x: return_number_played(x))
dict_user_activity = {k:v for k,v in pd.concat([train['msno'] , test['msno']] , axis = 0 ).value_counts().iteritems() }
def return_user_activity(x):
try:
return dict_user_activity[x]
except KeyError:
return 0
train['user_activity_msno'] = train['msno'].apply(lambda x: return_user_activity(x))
test['user_activity_msno'] = test['msno'].apply(lambda x: return_user_activity(x))
songs['genre_ids'].fillna('no_genre_id' , inplace= True)
members['registration'] = members['registration_init_time'].apply(lambda x: int(str(x)[0:4])) *12 + members['registration_init_time'].apply(lambda x: int(str(x)[4:6]))
members['expiration'] = members['expiration_date'].apply(lambda x: int(str(x)[0:4])) *12 + members['expiration_date'].apply(lambda x: int(str(x)[4:6]))
members['membership_month'] =(members['expiration_date'].apply(lambda x: int(str(x)[0:4])) *12 + members['expiration_date'].apply(lambda x: int(str(x)[4:6]))
- members['registration_init_time'].apply(lambda x: int(str(x)[0:4])) *12 - members['registration_init_time'].apply(lambda x: int(str(x)[4:6])))
def isrc_to_year(isrc):
if type(isrc)== str:
if int(isrc[5:7])> 17:
return 1900 + int(isrc[5:7])
else:
return 2000 + int(isrc[5:7])
else:
return np.nan
songs_extra['song_year'] = songs_extra['isrc'].apply(isrc_to_year)
songs_extra.drop(['isrc', 'name'], axis = 1, inplace = True)
song_cols = ['song_id', 'genre_ids', 'artist_name', 'song_length', 'language']
train = train.merge(songs[song_cols], on='song_id', how='left')
test = test.merge(songs[song_cols], on='song_id', how='left')
members_cols = ['msno', 'city', 'bd', 'gender', 'registration', 'expiration', 'membership_month']
train = train.merge(members[members_cols], on='msno', how='left')
test = test.merge(members[members_cols], on='msno', how='left')
train = train.merge(songs_extra, on = 'song_id', how = 'left')
test = test.merge(songs_extra, on = 'song_id', how = 'left')
min_max_scaling = ['number_of_time_played', 'user_activity_msno','membership_month', 'song_length']
for f in min_max_scaling:
ms = MinMaxScaler()
train[f] = ms.fit_transform(train[[f]])
test[f] = ms.transform(test[[f]])
del members, songs; gc.collect() ;
for col in train.columns:
if train[col].dtype == object:
train[col] = train[col].astype('category')
test[col] = test[col].astype('category')
X = train.drop(['target'], axis=1)
y = train['target'].values
X_test = test.drop(['id'], axis=1)
ids = test['id'].values
del train, test; gc.collect() ;
print('Training LGBM model...')
<set_options> | params = {'objective': 'reg:squarederror',
'eval_metric': 'rmse',
'eta': 0.01,
'max_depth': 10,
'subsample': 0.6,
'colsample_bytree': 0.6,
'lambda':10,
'alpha':2,
'random_state': 42,
'silent': True}
tr_data = xgb.DMatrix(X_train, y_train)
va_data = xgb.DMatrix(X_val, y_val)
watchlist = [(tr_data, 'train'),(va_data, 'valid')]
model_xgb = xgb.train(params, tr_data, 1000, watchlist, maximize=False,verbose_eval=100 ) | Tabular Playground Series - Jan 2021 |
14,097,157 | warnings.simplefilter(action='ignore', category=FutureWarning )<define_variables> | dtest = xgb.DMatrix(test_df)
y_xgb_pred = model_xgb.predict(dtest, ntree_limit=model_xgb.best_ntree_limit ) | Tabular Playground Series - Jan 2021 |
14,097,157 | DATA_DIRECTORY = ".. /input/home-credit-default-risk"<load_from_csv> | from catboost import CatBoostRegressor | Tabular Playground Series - Jan 2021 |
14,097,157 | df_train = pd.read_csv(os.path.join(DATA_DIRECTORY, 'application_train.csv'))
df_test = pd.read_csv(os.path.join(DATA_DIRECTORY, 'application_test.csv'))
df = df_train.append(df_test)
del df_train, df_test; gc.collect()<categorify> | cb_model = CatBoostRegressor(iterations=795,
learning_rate=0.05,
depth=10,
reg_lambda=10,
eval_metric='RMSE',
random_seed = 42,
bagging_temperature = 0.2,
od_type='Iter',
metric_period = 50,
od_wait=20)
cb_model.fit(X_train, y_train,
eval_set=(X_val, y_val),
use_best_model=True,
verbose=50 ) | Tabular Playground Series - Jan 2021 |
14,097,157 | df = df[df['AMT_INCOME_TOTAL'] < 20000000]
df = df[df['CODE_GENDER'] != 'XNA']
df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace=True)
df['DAYS_LAST_PHONE_CHANGE'].replace(0, np.nan, inplace=True )<groupby> | cat_pred =cb_model.predict(test_df ) | Tabular Playground Series - Jan 2021 |
14,097,157 | def get_age_group(days_birth):
age_years = -days_birth / 365
if age_years < 27: return 1
elif age_years < 40: return 2
elif age_years < 50: return 3
elif age_years < 65: return 4
elif age_years < 99: return 5
else: return 0<feature_engineering> | sample_sub['target'] =(y_pred_lgb + y_xgb_pred + cat_pred)/ 3 | Tabular Playground Series - Jan 2021 |
14,097,157 | <feature_engineering><EOS> | sample_sub.to_csv('submission.csv',columns=['id', 'target'], header=True, index=False ) | Tabular Playground Series - Jan 2021 |
13,950,448 | <SOS> metric: RMSE Kaggle data source: tabular-playground-series-jan-2021<feature_engineering> | %matplotlib inline | Tabular Playground Series - Jan 2021 |
13,950,448 | df['CREDIT_TO_ANNUITY_RATIO'] = df['AMT_CREDIT'] / df['AMT_ANNUITY']
df['CREDIT_TO_GOODS_RATIO'] = df['AMT_CREDIT'] / df['AMT_GOODS_PRICE']
df['ANNUITY_TO_INCOME_RATIO'] = df['AMT_ANNUITY'] / df['AMT_INCOME_TOTAL']
df['CREDIT_TO_INCOME_RATIO'] = df['AMT_CREDIT'] / df['AMT_INCOME_TOTAL']
df['INCOME_TO_EMPLOYED_RATIO'] = df['AMT_INCOME_TOTAL'] / df['DAYS_EMPLOYED']
df['INCOME_TO_BIRTH_RATIO'] = df['AMT_INCOME_TOTAL'] / df['DAYS_BIRTH']
df['EMPLOYED_TO_BIRTH_RATIO'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH']
df['ID_TO_BIRTH_RATIO'] = df['DAYS_ID_PUBLISH'] / df['DAYS_BIRTH']
df['CAR_TO_BIRTH_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_BIRTH']
df['CAR_TO_EMPLOYED_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_EMPLOYED']
df['PHONE_TO_BIRTH_RATIO'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_BIRTH']<merge> | train = pd.read_csv(DATA / "train.csv")
test = pd.read_csv(DATA / "test.csv")
smpl_sub = pd.read_csv(DATA / "sample_submission.csv")
print("train: {}, test: {}, sample sub: {}".format(
train.shape, test.shape, smpl_sub.shape
)) | Tabular Playground Series - Jan 2021 |
13,950,448 | def do_mean(df, group_cols, counted, agg_name):
gp = df[group_cols + [counted]].groupby(group_cols)[counted].mean().reset_index().rename(
columns={counted: agg_name})
df = df.merge(gp, on=group_cols, how='left')
del gp
gc.collect()
return df<merge> | class TreeModel:
def __init__(self, model_type: str):
self.model_type = model_type
self.trn_data = None
self.val_data = None
self.model = None
def train(self,
params: dict,
X_train: pd.DataFrame, y_train: np.ndarray,
X_val: pd.DataFrame, y_val: np.ndarray,
train_weight: tp.Optional[np.ndarray] = None,
val_weight: tp.Optional[np.ndarray] = None,
train_params: dict = {}):
if self.model_type == "lgb":
self.trn_data = lgb.Dataset(X_train, label=y_train, weight=train_weight)
self.val_data = lgb.Dataset(X_val, label=y_val, weight=val_weight)
self.model = lgb.train(params=params,
train_set=self.trn_data,
valid_sets=[self.trn_data, self.val_data],
**train_params)
elif self.model_type == "xgb":
self.trn_data = xgb.DMatrix(X_train, y_train, weight=train_weight)
self.val_data = xgb.DMatrix(X_val, y_val, weight=val_weight)
self.model = xgb.train(params=params,
dtrain=self.trn_data,
evals=[(self.trn_data, "train"),(self.val_data, "val")],
**train_params)
elif self.model_type == "cat":
self.trn_data = Pool(X_train, label=y_train, group_id=[0] * len(X_train))
self.val_data = Pool(X_val, label=y_val, group_id=[0] * len(X_val))
self.model = CatBoost(params)
self.model.fit(
self.trn_data, eval_set=[self.val_data], use_best_model=True, **train_params)
else:
raise NotImplementedError
def predict(self, X: pd.DataFrame):
if self.model_type == "lgb":
return self.model.predict(
X, num_iteration=self.model.best_iteration)
elif self.model_type == "xgb":
X_DM = xgb.DMatrix(X)
return self.model.predict(
X_DM, ntree_limit=self.model.best_ntree_limit)
elif self.model_type == "cat":
return self.model.predict(X)
else:
raise NotImplementedError
@property
def feature_names_(self):
if self.model_type == "lgb":
return self.model.feature_name()
elif self.model_type == "xgb":
return list(self.model.get_score(importance_type="gain" ).keys())
elif self.model_type == "cat":
return self.model.feature_names_
else:
raise NotImplementedError
@property
def feature_importances_(self):
if self.model_type == "lgb":
return self.model.feature_importance(importance_type="gain")
elif self.model_type == "xgb":
return list(self.model.get_score(importance_type="gain" ).values())
elif self.model_type == "cat":
return self.model.feature_importances_
else:
raise NotImplementedError | Tabular Playground Series - Jan 2021 |
13,950,448 | def do_median(df, group_cols, counted, agg_name):
gp = df[group_cols + [counted]].groupby(group_cols)[counted].median().reset_index().rename(
columns={counted: agg_name})
df = df.merge(gp, on=group_cols, how='left')
del gp
gc.collect()
return df<merge> | ID_COL = "id"
FEAT_COLS = [f"cont{i}" for i in range(1, 15)]
TGT_COL = "target"
N_SPLITS = 5
RANDOM_SEED_LIST = [
42,
]
MODEL_PARAMS = {
"lgb": {
"objective": "root_mean_squared_error",
"boosting": "gbdt",
"learning_rate": 0.05,
"seed": RANDOM_SEED_LIST[0],
'max_depth': 7,
'colsample_bytree':.85,
"subsample":.85,
"n_jobs": 2,
},
"xgb": {
"objective": "reg:squarederror",
"learning_rate": 0.01,
"seed": RANDOM_SEED_LIST[0],
"max_depth": 7,
"subsample":.85,
"colsample_bytree":.85,
"n_jobs": 2,
'tree_method': "gpu_hist",
"gpu_id": 0,
},
"cat": {
'loss_function': 'RMSE',
'learning_rate': 0.05,
'max_depth': 7,
'random_state': RANDOM_SEED_LIST[0],
"thread_count": 2,
'num_boost_round': 20000
}
}
TRAIN_PARAMS = {
"lgb": {
"num_boost_round": 20000,
"early_stopping_rounds": 200,
"verbose_eval": 100,
},
"xgb": {
"num_boost_round": 20000,
"early_stopping_rounds": 200,
"verbose_eval": 100,
},
"cat": {'early_stopping_rounds': 200, 'verbose_eval': 100}
} | Tabular Playground Series - Jan 2021 |
13,950,448 | def do_std(df, group_cols, counted, agg_name):
gp = df[group_cols + [counted]].groupby(group_cols)[counted].std().reset_index().rename(
columns={counted: agg_name})
df = df.merge(gp, on=group_cols, how='left')
del gp
gc.collect()
return df<merge> | use_feat_cols = []
train_feat = train[[ID_COL]].copy()
test_feat = test[[ID_COL]].copy() | Tabular Playground Series - Jan 2021 |
13,950,448 | def do_sum(df, group_cols, counted, agg_name):
gp = df[group_cols + [counted]].groupby(group_cols)[counted].sum().reset_index().rename(
columns={counted: agg_name})
df = df.merge(gp, on=group_cols, how='left')
del gp
gc.collect()
return df<categorify> | train_feat = pd.concat([
train_feat, train[FEAT_COLS]], axis=1)
test_feat = pd.concat([
test_feat, test[FEAT_COLS]], axis=1)
use_feat_cols.extend(FEAT_COLS ) | Tabular Playground Series - Jan 2021 |
13,950,448 | group = ['ORGANIZATION_TYPE', 'NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE', 'AGE_RANGE', 'CODE_GENDER']
df = do_median(df, group, 'EXT_SOURCES_MEAN', 'GROUP_EXT_SOURCES_MEDIAN')
df = do_std(df, group, 'EXT_SOURCES_MEAN', 'GROUP_EXT_SOURCES_STD')
df = do_mean(df, group, 'AMT_INCOME_TOTAL', 'GROUP_INCOME_MEAN')
df = do_std(df, group, 'AMT_INCOME_TOTAL', 'GROUP_INCOME_STD')
df = do_mean(df, group, 'CREDIT_TO_ANNUITY_RATIO', 'GROUP_CREDIT_TO_ANNUITY_MEAN')
df = do_std(df, group, 'CREDIT_TO_ANNUITY_RATIO', 'GROUP_CREDIT_TO_ANNUITY_STD')
df = do_mean(df, group, 'AMT_CREDIT', 'GROUP_CREDIT_MEAN')
df = do_mean(df, group, 'AMT_ANNUITY', 'GROUP_ANNUITY_MEAN')
df = do_std(df, group, 'AMT_ANNUITY', 'GROUP_ANNUITY_STD' )<categorify> | def run_train_and_inference(X, X_test, y, use_model, model_params, train_params, seed_list, n_splits):
oof_pred_arr = np.zeros(len(X))
test_pred_arr = np.zeros(len(X_test))
feature_importances = pd.DataFrame()
score_list = []
for seed in seed_list:
if use_model == "cat":
model_params['random_state'] = seed
else:
model_params["seed"] = seed
kf = KFold(n_splits=n_splits, shuffle=True, random_state=seed)
tmp_oof_pred = np.zeros(len(X))
tmp_test_pred = np.zeros(len(X_test))
for fold,(trn_idx, val_idx)in enumerate(kf.split(X, y)) :
print("*" * 100)
print(f"Seed: {seed} - Fold: {fold}")
X_trn = X.loc[trn_idx].reset_index(drop=True)
X_val = X.loc[val_idx].reset_index(drop=True)
y_trn = y[trn_idx]
y_val = y[val_idx]
model = TreeModel(model_type=use_model)
with timer(prefix="Model training"):
model.train(
params=model_params, X_train=X_trn, y_train=y_trn,
X_val=X_val, y_val=y_val, train_params=train_params)
fi_tmp = pd.DataFrame()
fi_tmp["feature"] = model.feature_names_
fi_tmp["importance"] = model.feature_importances_
fi_tmp["fold"] = fold
fi_tmp["seed"] = seed
feature_importances = feature_importances.append(fi_tmp)
val_pred = model.predict(X_val)
score = mean_squared_error(y_val, val_pred, squared=False)
print(f"score: {score:.5f}")
score_list.append([seed, fold, score])
tmp_oof_pred[val_idx] = val_pred
tmp_test_pred += model.predict(X_test)
oof_score = mean_squared_error(y, tmp_oof_pred, squared=False)
print(f"oof score: {oof_score: 5f}")
score_list.append([seed, "oof", oof_score])
oof_pred_arr += tmp_oof_pred
test_pred_arr += tmp_test_pred / n_splits
oof_pred_arr /= len(seed_list)
test_pred_arr /= len(seed_list)
oof_score = mean_squared_error(y, oof_pred_arr, squared=False)
score_list.append(["avg", "oof", oof_score])
score_df = pd.DataFrame(
score_list, columns=["seed", "fold", "rmse score"])
return oof_pred_arr, test_pred_arr, score_df, feature_importances | Tabular Playground Series - Jan 2021 |
13,950,448 | def label_encoder(df, categorical_columns=None):
if not categorical_columns:
categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
for col in categorical_columns:
df[col], uniques = pd.factorize(df[col])
return df, categorical_columns<drop_column> | X = train_feat[use_feat_cols]
X_test = test_feat[use_feat_cols]
y = train[TGT_COL].values
print(f"train_feat: {X.shape}, test_feat: {X_test.shape}" ) | Tabular Playground Series - Jan 2021 |
13,950,448 | def drop_application_columns(df):
drop_list = [
'CNT_CHILDREN', 'CNT_FAM_MEMBERS', 'HOUR_APPR_PROCESS_START',
'FLAG_EMP_PHONE', 'FLAG_MOBIL', 'FLAG_CONT_MOBILE', 'FLAG_EMAIL', 'FLAG_PHONE',
'FLAG_OWN_REALTY', 'REG_REGION_NOT_LIVE_REGION', 'REG_REGION_NOT_WORK_REGION',
'REG_CITY_NOT_WORK_CITY', 'OBS_30_CNT_SOCIAL_CIRCLE', 'OBS_60_CNT_SOCIAL_CIRCLE',
'AMT_REQ_CREDIT_BUREAU_DAY', 'AMT_REQ_CREDIT_BUREAU_MON', 'AMT_REQ_CREDIT_BUREAU_YEAR',
'COMMONAREA_MODE', 'NONLIVINGAREA_MODE', 'ELEVATORS_MODE', 'NONLIVINGAREA_AVG',
'FLOORSMIN_MEDI', 'LANDAREA_MODE', 'NONLIVINGAREA_MEDI', 'LIVINGAPARTMENTS_MODE',
'FLOORSMIN_AVG', 'LANDAREA_AVG', 'FLOORSMIN_MODE', 'LANDAREA_MEDI',
'COMMONAREA_MEDI', 'YEARS_BUILD_AVG', 'COMMONAREA_AVG', 'BASEMENTAREA_AVG',
'BASEMENTAREA_MODE', 'NONLIVINGAPARTMENTS_MEDI', 'BASEMENTAREA_MEDI',
'LIVINGAPARTMENTS_AVG', 'ELEVATORS_AVG', 'YEARS_BUILD_MEDI', 'ENTRANCES_MODE',
'NONLIVINGAPARTMENTS_MODE', 'LIVINGAREA_MODE', 'LIVINGAPARTMENTS_MEDI',
'YEARS_BUILD_MODE', 'YEARS_BEGINEXPLUATATION_AVG', 'ELEVATORS_MEDI', 'LIVINGAREA_MEDI',
'YEARS_BEGINEXPLUATATION_MODE', 'NONLIVINGAPARTMENTS_AVG', 'HOUSETYPE_MODE',
'FONDKAPREMONT_MODE', 'EMERGENCYSTATE_MODE'
]
for doc_num in [2,4,5,6,7,9,10,11,12,13,14,15,16,17,19,20,21]:
drop_list.append('FLAG_DOCUMENT_{}'.format(doc_num))
df.drop(drop_list, axis=1, inplace=True)
return df<categorify> | oof_pred_lgb, test_pred_lgb, score_lgb, feat_imps_lgb = run_train_and_inference(
X, X_test, y, "lgb", MODEL_PARAMS["lgb"], TRAIN_PARAMS["lgb"], RANDOM_SEED_LIST, N_SPLITS ) | Tabular Playground Series - Jan 2021 |
13,950,448 | df, le_encoded_cols = label_encoder(df, None)
df = drop_application_columns(df )<categorify> | score_lgb.loc[score_lgb.fold == "oof"] | Tabular Playground Series - Jan 2021 |
13,950,448 | df = pd.get_dummies(df )<load_from_csv> | oof_pred_xgb, test_pred_xgb, score_xgb, feat_imps_xgb = run_train_and_inference(
X, X_test, y, "xgb", MODEL_PARAMS["xgb"], TRAIN_PARAMS["xgb"], RANDOM_SEED_LIST, N_SPLITS ) | Tabular Playground Series - Jan 2021 |
13,950,448 | bureau = pd.read_csv(os.path.join(DATA_DIRECTORY, 'bureau.csv'))<feature_engineering> | score_xgb.loc[score_xgb.fold == "oof"] | Tabular Playground Series - Jan 2021 |
13,950,448 | bureau['CREDIT_DURATION'] = -bureau['DAYS_CREDIT'] + bureau['DAYS_CREDIT_ENDDATE']
bureau['ENDDATE_DIF'] = bureau['DAYS_CREDIT_ENDDATE'] - bureau['DAYS_ENDDATE_FACT']
bureau['DEBT_PERCENTAGE'] = bureau['AMT_CREDIT_SUM'] / bureau['AMT_CREDIT_SUM_DEBT']
bureau['DEBT_CREDIT_DIFF'] = bureau['AMT_CREDIT_SUM'] - bureau['AMT_CREDIT_SUM_DEBT']
bureau['CREDIT_TO_ANNUITY_RATIO'] = bureau['AMT_CREDIT_SUM'] / bureau['AMT_ANNUITY']<categorify> | oof_pred_cat, test_pred_cat, score_cat, feat_imps_cat = run_train_and_inference(
X, X_test, y, "cat", MODEL_PARAMS["cat"], TRAIN_PARAMS["cat"], RANDOM_SEED_LIST, N_SPLITS ) | Tabular Playground Series - Jan 2021 |
13,950,448 | def one_hot_encoder(df, categorical_columns=None, nan_as_category=True):
original_columns = list(df.columns)
if not categorical_columns:
categorical_columns = [col for col in df.columns if df[col].dtype == 'object']
df = pd.get_dummies(df, columns=categorical_columns, dummy_na=nan_as_category)
categorical_columns = [c for c in df.columns if c not in original_columns]
return df, categorical_columns<groupby> | score_cat.loc[score_cat.fold == "oof"] | Tabular Playground Series - Jan 2021 |
13,950,448 | def group(df_to_agg, prefix, aggregations, aggregate_by= 'SK_ID_CURR'):
agg_df = df_to_agg.groupby(aggregate_by ).agg(aggregations)
agg_df.columns = pd.Index(['{}{}_{}'.format(prefix, e[0], e[1].upper())
for e in agg_df.columns.tolist() ])
return agg_df.reset_index()<merge> | model_names = ["lgb", "xgb", "cat"] | Tabular Playground Series - Jan 2021 |
13,950,448 | def group_and_merge(df_to_agg, df_to_merge, prefix, aggregations, aggregate_by= 'SK_ID_CURR'):
agg_df = group(df_to_agg, prefix, aggregations, aggregate_by= aggregate_by)
return df_to_merge.merge(agg_df, how='left', on= aggregate_by )<load_from_csv> | pd.DataFrame(
np.corrcoef([
oof_pred_lgb,
oof_pred_xgb,
oof_pred_cat
]),
columns=model_names, index=model_names ) | Tabular Playground Series - Jan 2021 |
13,950,448 | def get_bureau_balance(path, num_rows= None):
bb = pd.read_csv(os.path.join(path, 'bureau_balance.csv'))
bb, categorical_cols = one_hot_encoder(bb, nan_as_category= False)
bb_processed = bb.groupby('SK_ID_BUREAU')[categorical_cols].mean().reset_index()
agg = {'MONTHS_BALANCE': ['min', 'max', 'mean', 'size']}
bb_processed = group_and_merge(bb, bb_processed, '', agg, 'SK_ID_BUREAU')
del bb; gc.collect()
return bb_processed<categorify> | pd.DataFrame(
np.corrcoef([
test_pred_lgb,
test_pred_xgb,
test_pred_cat
]),
columns=model_names, index=model_names ) | Tabular Playground Series - Jan 2021 |
13,950,448 | bureau, categorical_cols = one_hot_encoder(bureau, nan_as_category= False)
bureau = bureau.merge(get_bureau_balance(DATA_DIRECTORY), how='left', on='SK_ID_BUREAU')
bureau['STATUS_12345'] = 0
for i in range(1,6):
bureau['STATUS_12345'] += bureau['STATUS_{}'.format(i)]<merge> | oof_pred_avg =(oof_pred_lgb + oof_pred_xgb + oof_pred_cat)/ 3
oof_score_avg = mean_squared_error(y, oof_pred_avg, squared=False)
print(f"oof score avg: {oof_score_avg:.5f}" ) | Tabular Playground Series - Jan 2021 |
13,950,448 | features = ['AMT_CREDIT_MAX_OVERDUE', 'AMT_CREDIT_SUM_OVERDUE', 'AMT_CREDIT_SUM',
'AMT_CREDIT_SUM_DEBT', 'DEBT_PERCENTAGE', 'DEBT_CREDIT_DIFF', 'STATUS_0', 'STATUS_12345']
agg_length = bureau.groupby('MONTHS_BALANCE_SIZE')[features].mean().reset_index()
agg_length.rename({feat: 'LL_' + feat for feat in features}, axis=1, inplace=True)
bureau = bureau.merge(agg_length, how='left', on='MONTHS_BALANCE_SIZE')
del agg_length; gc.collect()<define_variables> | test_pred_avg =(test_pred_lgb + test_pred_xgb + test_pred_cat)/ 3 | Tabular Playground Series - Jan 2021 |
13,950,448 | <merge><EOS> | sub = smpl_sub.copy()
sub[TGT_COL] = test_pred_avg
sub.to_csv("submission.csv", index=False)
sub.head() | Tabular Playground Series - Jan 2021 |
13,964,440 | <SOS> metric: RMSE Kaggle data source: tabular-playground-series-jan-2021<merge> | InteractiveShell.ast_node_interactivity = "all"
%matplotlib inline | Tabular Playground Series - Jan 2021 |
13,964,440 | sort_bureau = bureau.sort_values(by=['DAYS_CREDIT'])
gr = sort_bureau.groupby('SK_ID_CURR')['AMT_CREDIT_MAX_OVERDUE'].last().reset_index()
gr.rename({'AMT_CREDIT_MAX_OVERDUE': 'BUREAU_LAST_LOAN_MAX_OVERDUE'}, inplace=True)
agg_bureau = agg_bureau.merge(gr, on='SK_ID_CURR', how='left')
agg_bureau['BUREAU_DEBT_OVER_CREDIT'] = \
agg_bureau['BUREAU_AMT_CREDIT_SUM_DEBT_SUM']/agg_bureau['BUREAU_AMT_CREDIT_SUM_SUM']
agg_bureau['BUREAU_ACTIVE_DEBT_OVER_CREDIT'] = \
agg_bureau['BUREAU_ACTIVE_AMT_CREDIT_SUM_DEBT_SUM']/agg_bureau['BUREAU_ACTIVE_AMT_CREDIT_SUM_SUM']<merge> | train_df = pd.read_csv('.. /input/tabular-playground-series-jan-2021/train.csv')
test_df = pd.read_csv('.. /input/tabular-playground-series-jan-2021/test.csv')
sub_df = pd.read_csv('.. /input/tabular-playground-series-jan-2021/sample_submission.csv')
seed = 66 | Tabular Playground Series - Jan 2021 |
13,964,440 | df = pd.merge(df, agg_bureau, on='SK_ID_CURR', how='left')
del agg_bureau, bureau
gc.collect()<load_from_csv> | train_df.isna().sum().sum()
test_df.isna().sum().sum() | Tabular Playground Series - Jan 2021 |
13,964,440 | prev = pd.read_csv(os.path.join(DATA_DIRECTORY, 'previous_application.csv'))
pay = pd.read_csv(os.path.join(DATA_DIRECTORY, 'installments_payments.csv'))<define_variables> | X = train_df.drop(['id', 'target'], axis=1)
y = train_df.target
estim = DecisionTreeRegressor()
estim.fit(X,y ) | Tabular Playground Series - Jan 2021 |
13,964,440 | PREVIOUS_AGG = {
'SK_ID_PREV': ['nunique'],
'AMT_ANNUITY': ['min', 'max', 'mean'],
'AMT_DOWN_PAYMENT': ['max', 'mean'],
'HOUR_APPR_PROCESS_START': ['min', 'max', 'mean'],
'RATE_DOWN_PAYMENT': ['max', 'mean'],
'DAYS_DECISION': ['min', 'max', 'mean'],
'CNT_PAYMENT': ['max', 'mean'],
'DAYS_TERMINATION': ['max'],
'CREDIT_TO_ANNUITY_RATIO': ['mean', 'max'],
'APPLICATION_CREDIT_DIFF': ['min', 'max', 'mean'],
'APPLICATION_CREDIT_RATIO': ['min', 'max', 'mean', 'var'],
'DOWN_PAYMENT_TO_CREDIT': ['mean'],
}
PREVIOUS_ACTIVE_AGG = {
'SK_ID_PREV': ['nunique'],
'SIMPLE_INTERESTS': ['mean'],
'AMT_ANNUITY': ['max', 'sum'],
'AMT_APPLICATION': ['max', 'mean'],
'AMT_CREDIT': ['sum'],
'AMT_DOWN_PAYMENT': ['max', 'mean'],
'DAYS_DECISION': ['min', 'mean'],
'CNT_PAYMENT': ['mean', 'sum'],
'DAYS_LAST_DUE_1ST_VERSION': ['min', 'max', 'mean'],
'AMT_PAYMENT': ['sum'],
'INSTALMENT_PAYMENT_DIFF': ['mean', 'max'],
'REMAINING_DEBT': ['max', 'mean', 'sum'],
'REPAYMENT_RATIO': ['mean'],
}
PREVIOUS_LATE_PAYMENTS_AGG = {
'DAYS_DECISION': ['min', 'max', 'mean'],
'DAYS_LAST_DUE_1ST_VERSION': ['min', 'max', 'mean'],
'APPLICATION_CREDIT_DIFF': ['min'],
'NAME_CONTRACT_TYPE_Consumer loans': ['mean'],
'NAME_CONTRACT_TYPE_Cash loans': ['mean'],
'NAME_CONTRACT_TYPE_Revolving loans': ['mean'],
}
PREVIOUS_LOAN_TYPE_AGG = {
'AMT_CREDIT': ['sum'],
'AMT_ANNUITY': ['mean', 'max'],
'SIMPLE_INTERESTS': ['min', 'mean', 'max', 'var'],
'APPLICATION_CREDIT_DIFF': ['min', 'var'],
'APPLICATION_CREDIT_RATIO': ['min', 'max', 'mean'],
'DAYS_DECISION': ['max'],
'DAYS_LAST_DUE_1ST_VERSION': ['max', 'mean'],
'CNT_PAYMENT': ['mean'],
}
PREVIOUS_TIME_AGG = {
'AMT_CREDIT': ['sum'],
'AMT_ANNUITY': ['mean', 'max'],
'SIMPLE_INTERESTS': ['mean', 'max'],
'DAYS_DECISION': ['min', 'mean'],
'DAYS_LAST_DUE_1ST_VERSION': ['min', 'max', 'mean'],
'APPLICATION_CREDIT_DIFF': ['min'],
'APPLICATION_CREDIT_RATIO': ['min', 'max', 'mean'],
'NAME_CONTRACT_TYPE_Consumer loans': ['mean'],
'NAME_CONTRACT_TYPE_Cash loans': ['mean'],
'NAME_CONTRACT_TYPE_Revolving loans': ['mean'],
}
PREVIOUS_APPROVED_AGG = {
'SK_ID_PREV': ['nunique'],
'AMT_ANNUITY': ['min', 'max', 'mean'],
'AMT_CREDIT': ['min', 'max', 'mean'],
'AMT_DOWN_PAYMENT': ['max'],
'AMT_GOODS_PRICE': ['max'],
'HOUR_APPR_PROCESS_START': ['min', 'max'],
'DAYS_DECISION': ['min', 'mean'],
'CNT_PAYMENT': ['max', 'mean'],
'DAYS_TERMINATION': ['mean'],
'CREDIT_TO_ANNUITY_RATIO': ['mean', 'max'],
'APPLICATION_CREDIT_DIFF': ['max'],
'APPLICATION_CREDIT_RATIO': ['min', 'max', 'mean'],
'DAYS_FIRST_DRAWING': ['max', 'mean'],
'DAYS_FIRST_DUE': ['min', 'mean'],
'DAYS_LAST_DUE_1ST_VERSION': ['min', 'max', 'mean'],
'DAYS_LAST_DUE': ['max', 'mean'],
'DAYS_LAST_DUE_DIFF': ['min', 'max', 'mean'],
'SIMPLE_INTERESTS': ['min', 'max', 'mean'],
}
PREVIOUS_REFUSED_AGG = {
'AMT_APPLICATION': ['max', 'mean'],
'AMT_CREDIT': ['min', 'max'],
'DAYS_DECISION': ['min', 'max', 'mean'],
'CNT_PAYMENT': ['max', 'mean'],
'APPLICATION_CREDIT_DIFF': ['min', 'max', 'mean', 'var'],
'APPLICATION_CREDIT_RATIO': ['min', 'mean'],
'NAME_CONTRACT_TYPE_Consumer loans': ['mean'],
'NAME_CONTRACT_TYPE_Cash loans': ['mean'],
'NAME_CONTRACT_TYPE_Revolving loans': ['mean'],
}
<categorify> | X, y | Tabular Playground Series - Jan 2021 |
13,964,440 | ohe_columns = [
'NAME_CONTRACT_STATUS', 'NAME_CONTRACT_TYPE', 'CHANNEL_TYPE',
'NAME_TYPE_SUITE', 'NAME_YIELD_GROUP', 'PRODUCT_COMBINATION',
'NAME_PRODUCT_TYPE', 'NAME_CLIENT_TYPE']
prev, categorical_cols = one_hot_encoder(prev, ohe_columns, nan_as_category= False )<feature_engineering> | F_test = f_regression(X, y)
F_test | Tabular Playground Series - Jan 2021 |
13,964,440 | prev['APPLICATION_CREDIT_DIFF'] = prev['AMT_APPLICATION'] - prev['AMT_CREDIT']
prev['APPLICATION_CREDIT_RATIO'] = prev['AMT_APPLICATION'] / prev['AMT_CREDIT']
prev['CREDIT_TO_ANNUITY_RATIO'] = prev['AMT_CREDIT']/prev['AMT_ANNUITY']
prev['DOWN_PAYMENT_TO_CREDIT'] = prev['AMT_DOWN_PAYMENT'] / prev['AMT_CREDIT']
total_payment = prev['AMT_ANNUITY'] * prev['CNT_PAYMENT']
prev['SIMPLE_INTERESTS'] =(total_payment/prev['AMT_CREDIT'] - 1)/prev['CNT_PAYMENT']<merge> | X_train, X_val, y_train, y_val = train_test_split(
X, y, test_size=0.2, random_state=seed)
def opt_lgm(trial, rounds=10):
param = {
'boosting_type': trial.suggest_categorical('boosting_type', ['gbdt', 'rf', 'dart']),
'lambda_l1': trial.suggest_loguniform('lambda_l1', 1e-8, 10.0),
'lambda_l2': trial.suggest_loguniform('lambda_l2', 1e-8, 10.0),
'feature_fraction': trial.suggest_uniform('feature_fraction', 0.4, 1.0),
'bagging_fraction': trial.suggest_uniform('bagging_fraction', 0.4, 1.0),
'bagging_freq': trial.suggest_int('bagging_freq', 1, 7),
'min_child_samples': trial.suggest_int('min_child_samples', 5, 100),
'learning_rate': trial.suggest_uniform('learning_rate', 0.005, 0.1),
'importance_type': ['gain', 'split']
}
param['n_estimators'] = 500
model = lgb.LGBMRegressor(**param)
model = model.fit(X_train, y_train)
valid_prediction = model.predict(X_val)
mse = mean_squared_error(y_val, valid_prediction, squared=False)
return mse
study = optuna.create_study(direction='minimize')
study.optimize(opt_lgm, n_trials=2)
params = study.best_params | Tabular Playground Series - Jan 2021 |
13,964,440 | approved = prev[prev['NAME_CONTRACT_STATUS_Approved'] == 1]
active_df = approved[approved['DAYS_LAST_DUE'] == 365243]
active_pay = pay[pay['SK_ID_PREV'].isin(active_df['SK_ID_PREV'])]
active_pay_agg = active_pay.groupby('SK_ID_PREV')[['AMT_INSTALMENT', 'AMT_PAYMENT']].sum()
active_pay_agg.reset_index(inplace= True)
active_pay_agg['INSTALMENT_PAYMENT_DIFF'] = active_pay_agg['AMT_INSTALMENT'] - active_pay_agg['AMT_PAYMENT']
active_df = active_df.merge(active_pay_agg, on= 'SK_ID_PREV', how= 'left')
active_df['REMAINING_DEBT'] = active_df['AMT_CREDIT'] - active_df['AMT_PAYMENT']
active_df['REPAYMENT_RATIO'] = active_df['AMT_PAYMENT'] / active_df['AMT_CREDIT']
active_agg_df = group(active_df, 'PREV_ACTIVE_', PREVIOUS_ACTIVE_AGG)
active_agg_df['TOTAL_REPAYMENT_RATIO'] = active_agg_df['PREV_ACTIVE_AMT_PAYMENT_SUM']/\
active_agg_df['PREV_ACTIVE_AMT_CREDIT_SUM']
del active_pay, active_pay_agg, active_df; gc.collect()<categorify> | params['n_estimators'] = 500
model = lgb.LGBMRegressor(**params)
kf = KFold(n_splits=5)
i=0
for train_index, test_index in kf.split(X):
X_train, X_test = X.loc[train_index, :], X.loc[test_index, :]
y_train, y_test = y[train_index], y[test_index]
clf = model.fit(X_train, y_train)
preds = clf.predict(X_test)
print('mse:', mean_squared_error(y_test, preds, squared=False))
sub_preds = test_df.drop(['id'], axis=1)
sub_df[f'pred_{i}'] = clf.predict(sub_preds)
i+=1 | Tabular Playground Series - Jan 2021 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.