kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
1,249,981
x_train.fillna(0,inplace=True) x_test.fillna(0,inplace=True )<import_modules>
out_file = 'random_search_trials.csv' of_connection = open(out_file, 'w') writer = csv.writer(of_connection) headers = ['score', 'hyperparameters', 'iteration'] writer.writerow(headers) of_connection.close()
Home Credit Default Risk
1,249,981
from sklearn.linear_model import LinearRegression from sklearn.preprocessing import StandardScaler<prepare_x_and_y>
def random_search(param_grid, out_file, max_evals = MAX_EVALS): results = pd.DataFrame(columns = ['score', 'params', 'iteration'], index = list(range(MAX_EVALS))) for i in range(MAX_EVALS): random_params = {k: random.sample(v, 1)[0] for k, v in param_grid.items() } random_params['subsample'] = 1.0 if random_params['boosting_type'] == 'goss' else random_params['subsample'] eval_results = objective(random_params, i) results.loc[i, :] = eval_results of_connection = open(out_file, 'a') writer = csv.writer(of_connection) writer.writerow(eval_results) of_connection.close() results.sort_values('score', ascending = False, inplace = True) results.reset_index(inplace = True) return results
Home Credit Default Risk
1,249,981
x_scaler = StandardScaler() y_scaler = StandardScaler()<normalization>
def grid_search(param_grid, out_file, max_evals = MAX_EVALS): results = pd.DataFrame(columns = ['score', 'params', 'iteration'], index = list(range(MAX_EVALS))) keys, values = zip(*param_grid.items()) i = 0 for v in itertools.product(*values): parameters = dict(zip(keys, v)) parameters['subsample'] = 1.0 if parameters['boosting_type'] == 'goss' else parameters['subsample'] eval_results = objective(parameters, i) results.loc[i, :] = eval_results i += 1 of_connection = open(out_file, 'a') writer = csv.writer(of_connection) writer.writerow(eval_results) of_connection.close() if i > MAX_EVALS: break results.sort_values('score', ascending = False, inplace = True) results.reset_index(inplace = True) return results
Home Credit Default Risk
1,249,981
x_train_scaler = x_scaler.fit_transform(x_train) x_test_scaler = x_scaler.transform(x_test )<choose_model_class>
random_results = pd.read_csv('.. /input/home-credit-model-tuning/random_search_trials_1000.csv') grid_results = pd.read_csv('.. /input/home-credit-model-tuning/grid_search_trials_1000.csv' )
Home Credit Default Risk
1,249,981
lr = LinearRegression()<train_model>
grid_results['hyperparameters'] = grid_results['hyperparameters'].map(ast.literal_eval) random_results['hyperparameters'] = random_results['hyperparameters'].map(ast.literal_eval )
Home Credit Default Risk
1,249,981
lr.fit(x_train_scaler,y_train )<predict_on_test>
def evaluate(results, name): results = results.sort_values('score', ascending = False ).reset_index(drop = True) print('The highest cross validation score from {} was {:.5f} found on iteration {}.'.format(name, results.loc[0, 'score'], results.loc[0, 'iteration'])) hyperparameters = results.loc[0, 'hyperparameters'] model = lgb.LGBMClassifier(**hyperparameters) model.fit(train_features, train_labels) preds = model.predict_proba(test_features)[:, 1] print('ROC AUC from {} on test data = {:.5f}.'.format(name, roc_auc_score(test_labels, preds))) hyp_df = pd.DataFrame(columns = list(results.loc[0, 'hyperparameters'].keys())) for i, hyp in enumerate(results['hyperparameters']): hyp_df = hyp_df.append(pd.DataFrame(hyp, index = [0]), ignore_index = True) hyp_df['iteration'] = results['iteration'] hyp_df['score'] = results['score'] return hyp_df
Home Credit Default Risk
1,249,981
pred = lr.predict(x_test_scaler )<create_dataframe>
grid_hyp = evaluate(grid_results, name = 'grid search' )
Home Credit Default Risk
1,249,981
sub = pd.DataFrame(data = {'Id' : test.Id, 'SalePrice' :pred} )<save_to_csv>
random_hyp = evaluate(random_results, name = 'random search' )
Home Credit Default Risk
1,249,981
sub.to_csv('submission.csv',index = False )<load_from_csv>
alt.renderers.enable('notebook' )
Home Credit Default Risk
1,249,981
train = pd.read_csv('.. /input/train.csv') test = pd.read_csv('.. /input/test.csv' )<import_modules>
random_hyp['search'] = 'random' grid_hyp['search'] = 'grid' hyp = random_hyp.append(grid_hyp) hyp.head()
Home Credit Default Risk
1,249,981
from sklearn.linear_model import LinearRegression from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline from sklearn.preprocessing import PolynomialFeatures<drop_column>
best_grid_hyp = grid_hyp.iloc[grid_hyp['score'].idxmax() ].copy() best_random_hyp = random_hyp.iloc[random_hyp['score'].idxmax() ].copy()
Home Credit Default Risk
1,249,981
pipe = Pipeline([ ('scaler', StandardScaler()), ('poly', PolynomialFeatures()), ('model', LinearRegression()), ] )<prepare_x_and_y>
print('Average validation score of grid search = {:.5f}.'.format(np.mean(grid_hyp['score']))) print('Average validation score of random search = {:.5f}.'.format(np.mean(random_hyp['score'])) )
Home Credit Default Risk
1,249,981
X_train = train[['LotFrontage','LotArea']].fillna(0) X_test = test[['LotFrontage','LotArea']].fillna(0) y_train = train['SalePrice']<import_modules>
random_hyp['score'] = random_hyp['score'].astype(float) best_random_hyp = random_hyp.loc[0, :].copy()
Home Credit Default Risk
1,249,981
from sklearn.model_selection import cross_val_score from sklearn.metrics import mean_squared_error<compute_train_metric>
train = pd.read_csv('.. /input/home-credit-simple-featuers/simple_features_train.csv') test = pd.read_csv('.. /input/home-credit-simple-featuers/simple_features_test.csv') test_ids = test['SK_ID_CURR'] train_labels = np.array(train['TARGET'].astype(np.int32)).reshape(( -1,)) train = train.drop(columns = ['SK_ID_CURR', 'TARGET']) test = test.drop(columns = ['SK_ID_CURR']) print('Training shape: ', train.shape) print('Testing shape: ', test.shape )
Home Credit Default Risk
1,249,981
scores = cross_val_score(pipe, X_train, y_train, scoring='neg_mean_squared_error', cv=10 )<feature_engineering>
train_set = lgb.Dataset(train, label = train_labels) hyperparameters = dict(**random_results.loc[0, 'hyperparameters']) del hyperparameters['n_estimators'] cv_results = lgb.cv(hyperparameters, train_set, num_boost_round = 10000, early_stopping_rounds = 100, metrics = 'auc', nfold = N_FOLDS )
Home Credit Default Risk
1,249,981
np.mean(pow(-scores, 0.5))<train_model>
print('The cross validation score on the full dataset = {:.5f} with std: {:.5f}.'.format( cv_results['auc-mean'][-1], cv_results['auc-stdv'][-1])) print('Number of estimators = {}.'.format(len(cv_results['auc-mean'])) )
Home Credit Default Risk
1,249,981
pipe.fit(X_train, y_train )<predict_on_test>
model = lgb.LGBMClassifier(n_estimators = len(cv_results['auc-mean']), **hyperparameters) model.fit(train, train_labels) preds = model.predict_proba(test)[:, 1]
Home Credit Default Risk
1,249,981
preds = pipe.predict(X_test )<save_to_csv>
submission = pd.DataFrame({'SK_ID_CURR': test_ids, 'TARGET': preds}) submission.to_csv('submission_simple_features_random.csv', index = False )
Home Credit Default Risk
1,254,160
sub = pd.DataFrame({'Id': test.Id, 'SalePrice': preds}) sub.to_csv('submission.csv', index=False )<set_options>
%matplotlib inline
Home Credit Default Risk
1,254,160
warnings.filterwarnings('ignore') <load_from_csv>
from sklearn.model_selection import GridSearchCV from skopt import BayesSearchCV
Home Credit Default Risk
1,254,160
data_path = '.. /input/kaggledays-paris/' train = pd.read_csv(data_path + 'train.csv',encoding='UTF-8') test = pd.read_csv(data_path + 'test.csv',encoding='UTF-8') colorfile = pd.read_csv('.. /input/color-feature/color_transcode2.csv',encoding='UTF-8') train = train.merge(colorfile, on='color', how='left') test = test.merge(colorfile, on='color', how='left') train['color'] = train['new_color'] test['color'] = test['new_color'] print(train.shape, test.shape) print(train.head()) train['is_test'] = 0 test['is_test'] = 1 df_all = pd.concat([train, test], axis=0) print('train ',train.shape) print('test ',test.shape )<categorify>
import lightgbm as lgb
Home Credit Default Risk
1,254,160
print('Preprocessing text...') tfidf = TfidfVectorizer(max_features=5,norm='l2',) tfidf.fit(df_all[ 'en_US_description'].astype(str ).values) tfidf_all = np.array(tfidf.transform(df_all[ 'en_US_description'].astype(str ).values ).toarray() , dtype=np.float16) for i in tqdm(range(5)) : df_all['en_US_description_tfidf_' + str(i)] = tfidf_all[:, i] del tfidf, tfidf_all gc.collect() print('Done.') print('train ',train.shape) print('test ',test.shape )<categorify>
import gc
Home Credit Default Risk
1,254,160
print('Label Encoder...') cols = [f_ for f_ in df_all.columns if df_all[f_].dtype == 'object' and 'sku' not in f_ and 'ID' not in f_] print(cols) cnt = 0 for c in tqdm(cols): le = LabelEncoder() df_all[c] = le.fit_transform(df_all[c].astype(str)) cnt += 1 del le print('len(cols)= {}'.format(cnt)) train = df_all.loc[df_all['is_test'] == 0].drop(['is_test'], axis=1) test = df_all.loc[df_all['is_test'] == 1].drop(['is_test'], axis=1) print('train ',train.shape) print('test ',test.shape )<merge>
from sklearn.model_selection import train_test_split, KFold, StratifiedKFold from sklearn.metrics import accuracy_score, confusion_matrix from sklearn.feature_selection import VarianceThreshold from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder, MinMaxScaler
Home Credit Default Risk
1,254,160
vimages = pd.read_csv(data_path + 'vimages.csv') pca = PCA(n_components=10) vpca = pca.fit_transform(vimages.drop('sku_hash', axis=1 ).values) vimages_pca = vimages[['sku_hash']] for i in tqdm(range(vpca.shape[1])) : vimages_pca['dim_pca_{}'.format(i)] = vpca[:, i] train = train.merge(vimages_pca, on='sku_hash', how='left') test = test.merge(vimages_pca, on='sku_hash', how='left') print('train ',train.shape) print('test ',test.shape )<merge>
def status_print(optim_result): all_models = pd.DataFrame(bayes_cv_tuner.cv_results_) best_params = pd.Series(bayes_cv_tuner.best_params_) print('Model Best ROC-AUC: {} Best params: {} '.format( len(all_models), np.round(bayes_cv_tuner.best_score_, 4), bayes_cv_tuner.best_params_ )) clf_name = bayes_cv_tuner.estimator.__class__.__name__ all_models.to_csv(clf_name+"_cv_results.csv" )
Home Credit Default Risk
1,254,160
sales = pd.read_csv(data_path + 'sales.csv') sales = sales.merge(df_all[['sku_hash','model','function']], on='sku_hash', how='left') sales = sales.sort_values('Date', ascending=True) salesg = sales.groupby('sku_hash' ).agg({ 'sales_quantity': ['sum', 'mean'], 'Month_transaction': ['last'], } ).reset_index() salesg.columns = ['%s%s' %(a, '_%s' % b if b else '')for a, b in salesg.columns] print(salesg.head()) train = train.merge(salesg, on='sku_hash', how='left') test = test.merge(salesg, on='sku_hash', how='left') aggs={} aggs['sales_quantity']= {'sum':'sum'} agg_tmp = sales.groupby(['sku_hash','Date'] ).agg(aggs) agg_tmp.columns = [ '{}_{}'.format(k, agg)for k in aggs.keys() for agg in aggs[k]] agg_tmp=agg_tmp.reset_index() agg_tmp = agg_tmp.sort_values('Date', ascending=True) XX_sales = agg_tmp.groupby(['sku_hash'])['sales_quantity_sum'].apply(lambda x: sales_features(x)) XX_sales = expand_series_to_df(XX_sales.reset_index()) XX_sales.columns = ['sku_hash', 'sales_quantity_sum_linear_trend_rvalue', 'sales_quantity_sum_linear_trend_intercept', 'sales_quantity_sum_linear_trend_attr_slope', 'sales_quantity_sum_linear_trend_attr_stderr', 'sales_quantity_sum_kurtosis', 'sales_quantity_sum_skewness' ] train = train.merge(XX_sales, on='sku_hash', how='left') test = test.merge(XX_sales, on='sku_hash', how='left') print('train ',train.shape) print('test ',test.shape )<groupby>
data = pd.read_csv(".. /input/application_train.csv") test = pd.read_csv('.. /input/application_test.csv') prev = pd.read_csv('.. /input/previous_application.csv') buro = pd.read_csv('.. /input/bureau.csv') buro_balance = pd.read_csv('.. /input/bureau_balance.csv') credit_card = pd.read_csv('.. /input/credit_card_balance.csv') POS_CASH = pd.read_csv('.. /input/POS_CASH_balance.csv') payments = pd.read_csv('.. /input/installments_payments.csv') lgbm_submission = pd.read_csv('.. /input/sample_submission.csv' )
Home Credit Default Risk
1,254,160
agg_tmp = sales.groupby(['model','Date'] ).agg(aggs) agg_tmp.columns = [ '{}_{}'.format(k, agg)for k in aggs.keys() for agg in aggs[k]] agg_tmp=agg_tmp.reset_index() agg_tmp = agg_tmp.sort_values('Date', ascending=True) XX_sales_BYMODEL = agg_tmp.groupby(['model'])['sales_quantity_sum'].apply(lambda x: sales_features(x)) XX_sales_BYMODEL = expand_series_to_df(XX_sales_BYMODEL.reset_index()) XX_sales_BYMODEL.columns = ['model', 'BYMODELsales_quantity_sum_linear_trend_rvalue', 'BYMODELsales_quantity_sum_linear_trend_intercept', 'BYMODELsales_quantity_sum_linear_trend_attr_slope', 'BYMODELsales_quantity_sum_linear_trend_attr_stderr', 'BYMODELsales_quantity_sum_kurtosis', 'BYMODELsales_quantity_sum_skewness' ] train = train.merge(XX_sales_BYMODEL, on='model', how='left') test = test.merge(XX_sales_BYMODEL, on='model', how='left') print('train ',train.shape) print('test ',test.shape )<feature_engineering>
y = data["TARGET"] del data["TARGET"]
Home Credit Default Risk
1,254,160
def extract_features(df): df['month_real'] =(df['month']+df['Month_transaction_last'] ).apply(lambda x: x%12) pass print('Extracting month_real features for train:') extract_features(train) print('Extracting month_real features for test:') extract_features(test) print('train ',train.shape) print('test ',test.shape )<prepare_x_and_y>
categorical_feature = [col for col in data.columns if data[col].dtype == 'object']
Home Credit Default Risk
1,254,160
y = np.log1p(train['target'].values) f = [ 'en_US_description_tfidf_0', 'en_US_description_tfidf_1', 'en_US_description_tfidf_2', 'en_US_description_tfidf_3', 'en_US_description_tfidf_4', 'dim_pca_0', 'dim_pca_1', 'dim_pca_2', 'dim_pca_3','dim_pca_4', 'dim_pca_5', 'dim_pca_6', 'dim_pca_7', 'dim_pca_8','dim_pca_9', 'sales_quantity_sum', 'sales_quantity_mean', 'fr_FR_price', 'product_type', 'month', 'month_real', 'product_gender', 'macro_function', 'function', 'macro_material', 'color', 'Month_transaction_last', 'sales_quantity_sum_linear_trend_rvalue', 'sales_quantity_sum_linear_trend_intercept', 'sales_quantity_sum_linear_trend_attr_slope', 'sales_quantity_sum_linear_trend_attr_stderr', 'sales_quantity_sum_kurtosis', 'sales_quantity_sum_skewness', 'BYMODELsales_quantity_sum_linear_trend_rvalue', 'BYMODELsales_quantity_sum_linear_trend_intercept', 'BYMODELsales_quantity_sum_linear_trend_attr_slope', 'BYMODELsales_quantity_sum_linear_trend_attr_stderr', 'BYMODELsales_quantity_sum_kurtosis', 'BYMODELsales_quantity_sum_skewness', ]<features_selection>
one_hot_df = pd.concat([data, test]) one_hot_df = pd.get_dummies(one_hot_df, columns=categorical_feature )
Home Credit Default Risk
1,254,160
clfs, importances, err, oof_predict = train_classifiers(train,y,f) <save_to_csv>
data = one_hot_df.iloc[:data.shape[0],:] test = one_hot_df.iloc[data.shape[0]:,]
Home Credit Default Risk
1,254,160
preds_ = None for clf in clfs: if preds_ is None: preds_ = clf.predict(test[f], num_iteration=clf.best_iteration_) else: preds_ += clf.predict(test[f], num_iteration=clf.best_iteration_) preds_ = preds_ / len(clfs) preds_ = np.expm1(preds_) subm = pd.DataFrame() subm['ID'] = test['ID'].values subm['target'] = preds_ subm.to_csv('submission{}.csv'.format(err), index=False) <import_modules>
buro_grouped_size = buro_balance.groupby("SK_ID_BUREAU")["MONTHS_BALANCE"].size() buro_grouped_max = buro_balance.groupby("SK_ID_BUREAU")["MONTHS_BALANCE"].max() buro_grouped_min = buro_balance.groupby("SK_ID_BUREAU")["MONTHS_BALANCE"].min()
Home Credit Default Risk
1,254,160
import torch import torch.nn as nn import pandas as pd import numpy as np import xgboost as xgb from sklearn import preprocessing from sklearn.model_selection import train_test_split from sklearn.model_selection import TimeSeriesSplit,KFold, StratifiedKFold from time import time import pickle import datetime as dt from imblearn.over_sampling import RandomOverSampler from scipy.sparse import csc_matrix<import_modules>
buro_counts = buro_balance.groupby("SK_ID_BUREAU")["STATUS"].value_counts(normalize=False) buro_counts_unstacked = buro_counts.unstack("STATUS")
Home Credit Default Risk
1,254,160
import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import Dataset, DataLoader, random_split<load_from_csv>
buro_counts_unstacked.columns = ['STATUS_0', 'STATUS_1','STATUS_2','STATUS_3','STATUS_4','STATUS_5','STATUS_C','STATUS_X',] buro_counts_unstacked["MONTHS_COUNT"] = buro_grouped_size buro_counts_unstacked["MONTHS_MIN"] = buro_grouped_min buro_counts_unstacked["MONTHS_MAX"] = buro_grouped_max
Home Credit Default Risk
1,254,160
def init_read_big(train_path, test_path, train_big_path): train = pd.read_csv(train_path) test = pd.read_csv(test_path) train_big = pd.read_csv(train_big_path) return train, test, train_big def init_read_small(train_path, test_path): train = pd.read_csv(train_path) test = pd.read_csv(test_path) return train, test def remove_column_big(train, train_big, column_title_big): train_small = train.copy() column_title_small = [A for A in train.columns] train_big = train_big[column_title_big] train = train_big.copy() train.columns = column_title_small return train def data_preprocessing(train, test): train = train.drop(['Quote_ID'], axis=1) y = train.QuoteConversion_Flag.values train = train.drop(['QuoteConversion_Flag'], axis=1) test = test.drop('Quote_ID', axis=1) train['Date'] = pd.to_datetime(pd.Series(train['Original_Quote_Date'])) train = train.drop('Original_Quote_Date', axis=1) test['Date'] = pd.to_datetime(pd.Series(test['Original_Quote_Date'])) test = test.drop('Original_Quote_Date', axis=1) train['Year'] = train['Date'].apply(lambda x: int(str(x)[:4])) train['Month'] = train['Date'].apply(lambda x: int(str(x)[5:7])) train['weekday'] = train['Date'].dt.dayofweek test['Year'] = test['Date'].apply(lambda x: int(str(x)[:4])) test['Month'] = test['Date'].apply(lambda x: int(str(x)[5:7])) test['weekday'] = test['Date'].dt.dayofweek train = train.drop(['Date'], axis=1) test = test.drop(['Date'], axis=1) for A in train.columns: if train[A].dtype=='object': lbl = preprocessing.LabelEncoder() lbl.fit(list(train[A].values)+ list(test[A].values)) train[A] = lbl.transform(list(train[A].values)) test[A] = lbl.transform(list(test[A].values)) train = train.fillna(-999) test = test.fillna(-999) print(" Pre-processing_big complete!!") return train, test, y <load_from_csv>
buro = buro.join(buro_counts_unstacked, how='left', on='SK_ID_BUREAU' )
Home Credit Default Risk
1,254,160
train_path = '/kaggle/input/2019s-uts-data-analytics-assignment-3/Assignment3_TrainingSet.csv' test_path = '/kaggle/input/2019s-uts-data-analytics-assignment-3/Assignment3_TestSet.csv' train, test = init_read_small(train_path, test_path) train, test, y = data_preprocessing(train, test )<normalization>
prev_cat_features = [pcol for pcol in prev.columns if prev[pcol].dtype == "object"] prev = pd.get_dummies(prev, columns=prev_cat_features)
Home Credit Default Risk
1,254,160
sc = StandardScaler() train = sc.fit_transform(train) test = sc.fit_transform(test )<train_model>
avg_prev = prev.groupby("SK_ID_CURR" ).mean()
Home Credit Default Risk
1,254,160
<normalization>
cnt_prev = prev[["SK_ID_CURR", "SK_ID_PREV"]].groupby("SK_ID_CURR" ).count() avg_prev["nb_app"] = cnt_prev["SK_ID_PREV"] del avg_prev["SK_ID_PREV"]
Home Credit Default Risk
1,254,160
class Net(nn.Module): def __init__(self, D_in, H=15, D_out=1): super().__init__() self.fc1 = nn.Linear(D_in, H) self.fc2 = nn.Linear(H, D_out) self.relu = nn.ReLU() def forward(self, x): x = self.fc1(x) x = self.relu(x) x = self.fc2(x) return x.squeeze()<find_best_params>
buro_cat_features = [bcol for bcol in buro.columns if buro[bcol].dtype == 'object'] buro = pd.get_dummies(buro, columns=buro_cat_features) avg_buro = buro.groupby('SK_ID_CURR' ).mean() avg_buro['buro_count'] = buro[['SK_ID_BUREAU', 'SK_ID_CURR']].groupby('SK_ID_CURR' ).count() ['SK_ID_BUREAU'] del avg_buro['SK_ID_BUREAU']
Home Credit Default Risk
1,254,160
class EarlyStopping(object): def __init__(self, mode='min', min_delta=0, patience=10, percentage=False): self.mode = mode self.min_delta = min_delta self.patience = patience self.best = None self.num_bad_epochs = 0 self.is_better = None self._init_is_better(mode, min_delta, percentage) if patience == 0: self.is_better = lambda a, b: True self.step = lambda a: False def step(self, metrics): if self.best is None: self.best = metrics return False if np.isnan(metrics): return True if self.is_better(metrics, self.best): self.num_bad_epochs = 0 self.best = metrics else: self.num_bad_epochs += 1 if self.num_bad_epochs >= self.patience: return True return False def _init_is_better(self, mode, min_delta, percentage): if mode not in {'min', 'max'}: raise ValueError('mode ' + mode + ' is unknown!') if not percentage: if mode == 'min': self.is_better = lambda a, best: a < best - min_delta if mode == 'max': self.is_better = lambda a, best: a > best + min_delta else: if mode == 'min': self.is_better = lambda a, best: a < best -( best * min_delta / 100) if mode == 'max': self.is_better = lambda a, best: a > best +( best * min_delta / 100 )<set_options>
le = LabelEncoder() POS_CASH['NAME_CONTRACT_STATUS'] = le.fit_transform(POS_CASH['NAME_CONTRACT_STATUS'].astype(str)) nunique_status = POS_CASH[['SK_ID_CURR', 'NAME_CONTRACT_STATUS']].groupby('SK_ID_CURR' ).nunique() nunique_status2 = POS_CASH[['SK_ID_CURR', 'NAME_CONTRACT_STATUS']].groupby('SK_ID_CURR' ).max() nunique_status2.head(10 )
Home Credit Default Risk
1,254,160
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )<split>
POS_CASH['NUNIQUE_STATUS'] = nunique_status['NAME_CONTRACT_STATUS'] POS_CASH['NUNIQUE_STATUS2'] = nunique_status2['NAME_CONTRACT_STATUS'] POS_CASH.drop(['SK_ID_PREV', 'NAME_CONTRACT_STATUS'], axis=1, inplace=True )
Home Credit Default Risk
1,254,160
n_fold = 5 random_state = 999 D_in, H = 30, 15 models = [] train_no = 1 training_cycle = 20000 repetition = 1 training_sequence = 1 for A in range(repetition): kf = StratifiedKFold(n_splits = n_fold , shuffle = True, random_state = random_state+A) for train_index, val_index in kf.split(train, y): train_X = train[train_index] val_X = train[val_index] train_y = y[train_index] val_y = y[val_index] train_X_torch = torch.tensor(train_X) val_X_torch = torch.tensor(val_X) train_y_torch = torch.tensor(train_y) val_y_torch = torch.tensor(val_y) print(f' urrently Training sequence no {training_sequence}') net = Net(D_in, H ).to(device) criteria = nn.MSELoss() optimizer = optim.Adam(net.parameters() , weight_decay=0.0001) es = EarlyStopping(patience=20) for epoch in range(training_cycle): print(f'Training Epoch No {epoch}') inputs = train_X_torch.to(device) labels = train_y_torch.to(device) inputs_valid = val_X_torch.to(device) labels_valid = val_y_torch.to(device) optimizer.zero_grad() outputs = net(inputs.float()) outputs_valid = net(inputs_valid.float()) loss = criteria(outputs, labels.float()) loss_valid = criteria(outputs_valid, labels_valid.float()) metric_valid = loss_valid.detach().numpy() loss.backward() optimizer.step() print(f'Current Loss is {metric_valid}') if es.step(metric_valid): break models.append(net) training_sequence += 1 del net, es, metric_valid <choose_model_class>
le =LabelEncoder() credit_card['NAME_CONTRACT_STATUS'] = le.fit_transform(credit_card['NAME_CONTRACT_STATUS'].astype(str)) nunique_status = credit_card[['SK_ID_CURR', 'NAME_CONTRACT_STATUS']].groupby('SK_ID_CURR' ).nunique() nunique_status2 = credit_card[['SK_ID_CURR', 'NAME_CONTRACT_STATUS']].groupby('SK_ID_CURR' ).max() credit_card['NUNIQUE_STATUS'] = nunique_status['NAME_CONTRACT_STATUS'] credit_card['NUNIQUE_STATUS2'] = nunique_status2['NAME_CONTRACT_STATUS'] credit_card.drop(['SK_ID_PREV', 'NAME_CONTRACT_STATUS'], axis=1, inplace=True)
Home Credit Default Risk
1,254,160
<define_variables>
avg_payments = payments.groupby("SK_ID_CURR" ).mean() avg_payments2 = payments.groupby("SK_ID_CURR" ).max() avg_payments3 = payments.groupby("SK_ID_CURR" ).min()
Home Credit Default Risk
1,254,160
test_torch = torch.tensor(test) pred_df = sum([net(test_torch.float())for net in models])/(n_fold*repetition )<predict_on_test>
del avg_payments["SK_ID_PREV"]
Home Credit Default Risk
1,254,160
test_predict_numpy = pred_df.detach().numpy()<feature_engineering>
data = data.merge(right=avg_prev.reset_index() , how="left", on="SK_ID_CURR") test = test.merge(right=avg_prev.reset_index() , how="left", on="SK_ID_CURR")
Home Credit Default Risk
1,254,160
for A in range(len(test_predict_numpy)) : if test_predict_numpy[A] > np.mean(test_predict_numpy): test_predict_numpy[A] = 1 else: test_predict_numpy[A] = 0<count_values>
data = data.merge(right=avg_buro.reset_index() , how='left', on='SK_ID_CURR') test = test.merge(right=avg_buro.reset_index() , how='left', on='SK_ID_CURR' )
Home Credit Default Risk
1,254,160
sum(test_predict_numpy )<save_to_csv>
data = data.merge(right=POS_CASH.groupby("SK_ID_CURR" ).mean().reset_index() , how="left", on="SK_ID_CURR") test = test.merge(right=POS_CASH.groupby("SK_ID_CURR" ).mean().reset_index() , how="left", on="SK_ID_CURR" )
Home Credit Default Risk
1,254,160
sample = pd.read_csv('/kaggle/input/2019s-uts-data-analytics-assignment-3/Assignment3_Random_Submission-Kaggle.csv') sample.QuoteConversion_Flag = test_predict_numpy sample.to_csv('pytorch_model.csv', index=False) print("<load_from_csv>
data = data.merge(credit_card.groupby('SK_ID_CURR' ).mean().reset_index() , how='left', on='SK_ID_CURR') test = test.merge(credit_card.groupby('SK_ID_CURR' ).mean().reset_index() , how='left', on='SK_ID_CURR')
Home Credit Default Risk
1,254,160
train=pd.read_csv('/kaggle/input/ec524-heart-disease/train.csv') test=pd.read_csv('/kaggle/input/ec524-heart-disease/test.csv' )<count_missing_values>
data = data.merge(right=avg_payments.reset_index() , how='left', on='SK_ID_CURR') test = test.merge(right=avg_payments.reset_index() , how='left', on='SK_ID_CURR')
Home Credit Default Risk
1,254,160
train.isna().sum()<data_type_conversions>
data = data.merge(right=avg_payments2.reset_index() , how='left', on='SK_ID_CURR') test = test.merge(right=avg_payments2.reset_index() , how='left', on='SK_ID_CURR' )
Home Credit Default Risk
1,254,160
train.fillna(train.mean() ,inplace=True )<count_missing_values>
data = data.merge(right=avg_payments3.reset_index() , how='left', on='SK_ID_CURR') test = test.merge(right=avg_payments3.reset_index() , how='left', on='SK_ID_CURR' )
Home Credit Default Risk
1,254,160
train.isna().sum()<count_missing_values>
test = test[test.columns[data.isnull().mean() < 0.85]] data = data[data.columns[data.isnull().mean() < 0.85]]
Home Credit Default Risk
1,254,160
test.isna().sum()<drop_column>
excluded_feats = ["SK_ID_CURR"] features = [f for f in data.columns if f not in excluded_feats]
Home Credit Default Risk
1,254,160
train=train.drop(['id'],axis=1 )<prepare_x_and_y>
Home Credit Default Risk
1,254,160
x=train.drop(['heart_disease'],axis=1) y=train['heart_disease'].values<normalization>
lgbm_params = { "boosting":"dart", "application":"binary", "learning_rate": 0.1, 'reg_alpha':0.01, 'reg_lambda': 0.01, "n_estimators":10000, "max_depth":7, "num_leaves":100, "max_bin":225, "drop_rate":0.02 }
Home Credit Default Risk
1,254,160
scaler=StandardScaler() x=scaler.fit_transform(x) <choose_model_class>
model = lgb.LGBMClassifier(application="binary", boosting_type=lgbm_params["boosting"], learning_rate=lgbm_params["learning_rate"],n_estimators=lgbm_params["n_estimators"],drop_rate=lgbm_params["drop_rate"], num_leaves=lgbm_params["num_leaves"], max_depth=lgbm_params["max_depth"], max_bin=lgbm_params["max_bin"])
Home Credit Default Risk
1,254,160
lr=LogisticRegression() rf=RandomForestClassifier() vote=VotingClassifier(estimators=[('lr',lr),('rf',rf)] )<train_model>
feature_importances = np.zeros(data.shape[1]) for i in range(2): train_data, test_data, train_y, test_y = train_test_split(data, y, test_size=0.2, random_state=i) model.fit(train_data, train_y, early_stopping_rounds=100, eval_set=[(test_data, test_y)], eval_metric='auc', verbose=200) feature_importances += model.feature_importances_
Home Credit Default Risk
1,254,160
vote.fit(x,y )<compute_test_metric>
threshold = 0.98 features_to_keep = list(norm_feature_importances[norm_feature_importances['cumulative_importance'] < threshold]['feature']) data = data[features_to_keep] test = test[features_to_keep]
Home Credit Default Risk
1,254,160
vote.score(x,y )<drop_column>
n_folds = 5 k_fold = KFold(n_splits=n_folds, shuffle=False, random_state=50) feature_importances_values = np.zeros(data.shape[1]) test_predictions = np.zeros(test.shape[0]) out_of_fold = np.zeros(data.shape[0]) valid_scores = [] train_scores = [] for train_indices, test_indices in k_fold.split(data): train_data, train_y = data.iloc[train_indices], y.iloc[train_indices] test_data, test_y = data.iloc[test_indices], y.iloc[test_indices] model = lgb.LGBMClassifier(application="binary", boosting_type=lgbm_params["boosting"], learning_rate=lgbm_params["learning_rate"], n_estimators=lgbm_params["n_estimators"], num_leaves=lgbm_params["num_leaves"],max_depth=lgbm_params["max_depth"], reg_lambda=lgbm_params['reg_lambda'],reg_alpha=lgbm_params["reg_alpha"], drop_rate=lgbm_params["drop_rate"], random_state=50) model.fit(train_data, train_y, eval_metric='auc', eval_set=[(test_data, test_y),(train_data, train_y)], eval_names=['valid', 'train'], early_stopping_rounds=100, verbose=200) best_iteration = model.best_iteration_ feature_importances_values += model.feature_importances_ / k_fold.n_splits test_predictions += model.predict_proba(test, num_iteration=best_iteration)[:,1]/k_fold.n_splits out_of_fold[test_indices] = model.predict_proba(test_data, num_iteration = best_iteration)[:, 1] valid_score = model.best_score_['valid']['auc'] train_score = model.best_score_['train']['auc'] valid_scores.append(valid_score) train_scores.append(train_score) gc.enable() del model, train_data, test_data gc.collect()
Home Credit Default Risk
1,254,160
test_=test.drop(['id'],axis=1 )<normalization>
submission = pd.DataFrame({'SK_ID_CURR': test["SK_ID_CURR"], 'TARGET': test_predictions}) submission.to_csv("submissions.csv", index=False )
Home Credit Default Risk
1,233,526
test_scaled=scaler.fit_transform(test_ )<predict_on_test>
import gc
Home Credit Default Risk
1,233,526
y_pred=vote.predict(test_scaled )<create_dataframe>
from sklearn.model_selection import train_test_split, KFold from sklearn.metrics import accuracy_score, confusion_matrix from sklearn.feature_selection import VarianceThreshold from sklearn.preprocessing import LabelEncoder, MinMaxScaler
Home Credit Default Risk
1,233,526
sub=pd.DataFrame() sub['id']=test['id'] sub['heart_disease']=y_pred <save_to_csv>
data = pd.read_csv(".. /input/application_train.csv") test = pd.read_csv('.. /input/application_test.csv') prev = pd.read_csv('.. /input/previous_application.csv') buro = pd.read_csv('.. /input/bureau.csv') buro_balance = pd.read_csv('.. /input/bureau_balance.csv') credit_card = pd.read_csv('.. /input/credit_card_balance.csv') POS_CASH = pd.read_csv('.. /input/POS_CASH_balance.csv') payments = pd.read_csv('.. /input/installments_payments.csv') lgbm_submission = pd.read_csv('.. /input/sample_submission.csv' )
Home Credit Default Risk
1,233,526
sub.to_csv('sub.csv',index=False )<set_options>
y = data["TARGET"] del data["TARGET"]
Home Credit Default Risk
1,233,526
img_size =(224, 224) img_array_list = [] cls_list = [] dir = '.. /input/train/PNEUMONIA' img_list = glob.glob(dir + '/*.jpeg') for i in img_list: img = load_img(i, color_mode='grayscale', target_size=(img_size)) img_array = img_to_array(img)/ 255 img_array_list.append(img_array) cls_list.append(1) dir = '.. /input/train/NORMAL' img_list = glob.glob(dir + '/*.jpeg') for i in img_list: img = load_img(i, color_mode='grayscale', target_size=(img_size)) img_array = img_to_array(img)/ 255 img_array_list.append(img_array) cls_list.append(0) X_train = np.array(img_array_list) y_train = np.array(cls_list) print(X_train.shape, y_train.shape )<train_model>
categorical_feature = [col for col in data.columns if data[col].dtype == 'object']
Home Credit Default Risk
1,233,526
X_learn, X_valid, y_learn, y_valid = train_test_split(X_train, y_train, test_size=0.3, random_state=0) shape = X_learn.shape X_learn = np.reshape(X_learn,(shape[0], shape[1] * shape[2] * shape[3])) ros = RandomOverSampler(random_state=0) X_res, y_res = ros.fit_resample(X_learn, y_learn) X_res = np.reshape(X_res,(X_res.shape[0], shape[1], shape[2], shape[3])) Y_res = to_categorical(y_res) Y_valid = to_categorical(y_valid) print('Train:', X_res.shape, Y_res.shape) print('Test:', X_valid.shape, Y_valid.shape )<choose_model_class>
one_hot_df = pd.concat([data, test]) one_hot_df = pd.get_dummies(one_hot_df, columns=categorical_feature )
Home Credit Default Risk
1,233,526
def auc(y_true, y_pred): auc = tf.py_func(roc_auc_score,(y_true, y_pred), tf.double) return auc model = Sequential() model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu', input_shape=(img_size[0], img_size[1], 1))) model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(3, 3))) model.add(Dropout(rate=0.25)) model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu')) model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(3, 3))) model.add(Dropout(rate=0.25)) model.add(Flatten()) model.add(Dense(units=128, activation='relu')) model.add(Dropout(rate=0.25)) model.add(Dense(units=2, activation='softmax')) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy', auc]) model.summary()<train_model>
data = one_hot_df.iloc[:data.shape[0],:] test = one_hot_df.iloc[data.shape[0]:,]
Home Credit Default Risk
1,233,526
model.fit(X_res, Y_res, batch_size=32, epochs=10) model.evaluate(X_valid, Y_valid, batch_size=32 )<train_model>
buro_grouped_size = buro_balance.groupby("SK_ID_BUREAU")["MONTHS_BALANCE"].size() buro_grouped_max = buro_balance.groupby("SK_ID_BUREAU")["MONTHS_BALANCE"].max() buro_grouped_min = buro_balance.groupby("SK_ID_BUREAU")["MONTHS_BALANCE"].min()
Home Credit Default Risk
1,233,526
shape = X_train.shape X_train = np.reshape(X_train,(shape[0], shape[1] * shape[2] * shape[3])) ros = RandomOverSampler(random_state=0) X_res, y_res = ros.fit_resample(X_train, y_train) X_train = np.reshape(X_train,(shape[0], shape[1], shape[2], shape[3])) X_res = np.reshape(X_res,(X_res.shape[0], shape[1], shape[2], shape[3])) Y_res = to_categorical(y_res) print('Train:', X_res.shape, Y_res.shape )<train_model>
buro_counts = buro_balance.groupby("SK_ID_BUREAU")["STATUS"].value_counts(normalize=False) buro_counts_unstacked = buro_counts.unstack("STATUS")
Home Credit Default Risk
1,233,526
model.fit(X_res, Y_res, batch_size=32, epochs=10 )<normalization>
buro_counts_unstacked.columns = ['STATUS_0', 'STATUS_1','STATUS_2','STATUS_3','STATUS_4','STATUS_5','STATUS_C','STATUS_X',] buro_counts_unstacked["MONTHS_COUNT"] = buro_grouped_size buro_counts_unstacked["MONTHS_MIN"] = buro_grouped_min buro_counts_unstacked["MONTHS_MAX"] = buro_grouped_max
Home Credit Default Risk
1,233,526
img_array_list = [] dir = '.. /input/test' img_list = glob.glob(dir + '/*.jpeg') img_list.sort() for i in img_list: img = load_img(i, color_mode='grayscale', target_size=(img_size)) img_array = img_to_array(img)/ 255 img_array_list.append(img_array) X_test = np.array(img_array_list) print(X_test.shape )<predict_on_test>
buro = buro.join(buro_counts_unstacked, how='left', on='SK_ID_BUREAU' )
Home Credit Default Risk
1,233,526
predict = model.predict(X_test)[:, 1]<save_to_csv>
prev_cat_features = [pcol for pcol in prev.columns if prev[pcol].dtype == "object"] prev = pd.get_dummies(prev, columns=prev_cat_features) prev.head(10 )
Home Credit Default Risk
1,233,526
submit = pd.read_csv('.. /input/sampleSubmission.csv') submit['pneumonia'] = predict submit.to_csv('submission.csv', index=False )<import_modules>
avg_prev = prev.groupby("SK_ID_CURR" ).mean() avg_prev.head(10 )
Home Credit Default Risk
1,233,526
import numpy as np from sklearn.metrics.pairwise import cosine_similarity from sklearn.feature_extraction.text import TfidfVectorizer<define_variables>
cnt_prev = prev[["SK_ID_CURR", "SK_ID_PREV"]].groupby("SK_ID_CURR" ).count() avg_prev["nb_app"] = cnt_prev["SK_ID_PREV"] del avg_prev["SK_ID_PREV"]
Home Credit Default Risk
1,233,526
root_path = '.. /input/homework4_v2/' max_df = 0.95 min_df = 5 smooth_idf = True sublinear_tf = True alpha = 1 beta = 0.75 gamma = 0.15 rel_count = 5 nrel_count = 1 iters = 5<define_variables>
buro_cat_features = [bcol for bcol in buro.columns if buro[bcol].dtype == 'object'] buro = pd.get_dummies(buro, columns=buro_cat_features) avg_buro = buro.groupby('SK_ID_CURR' ).mean() avg_buro['buro_count'] = buro[['SK_ID_BUREAU', 'SK_ID_CURR']].groupby('SK_ID_CURR' ).count() ['SK_ID_BUREAU'] del avg_buro['SK_ID_BUREAU']
Home Credit Default Risk
1,233,526
with open(root_path + 'doc_list.txt')as file: doc_list = [line.rstrip() for line in file] with open(root_path + 'query_list.txt')as file: query_list = [line.rstrip() for line in file]<string_transform>
le = LabelEncoder() POS_CASH['NAME_CONTRACT_STATUS'] = le.fit_transform(POS_CASH['NAME_CONTRACT_STATUS'].astype(str)) nunique_status = POS_CASH[['SK_ID_CURR', 'NAME_CONTRACT_STATUS']].groupby('SK_ID_CURR' ).nunique() nunique_status2 = POS_CASH[['SK_ID_CURR', 'NAME_CONTRACT_STATUS']].groupby('SK_ID_CURR' ).max() nunique_status2.head(10 )
Home Credit Default Risk
1,233,526
documents, queries = [], [] for doc_name in doc_list: with open(root_path + 'Document/' + doc_name)as file: doc = ' '.join([word for line in file.readlines() [3:] for word in line.split() [:-1]]) documents.append(doc) for query_name in query_list: with open(root_path + 'Query/' + query_name)as file: query = ' '.join([word for line in file.readlines() for word in line.split() [:-1]]) queries.append(query )<compute_train_metric>
POS_CASH['NUNIQUE_STATUS'] = nunique_status['NAME_CONTRACT_STATUS'] POS_CASH['NUNIQUE_STATUS2'] = nunique_status2['NAME_CONTRACT_STATUS'] POS_CASH.drop(['SK_ID_PREV', 'NAME_CONTRACT_STATUS'], axis=1, inplace=True )
Home Credit Default Risk
1,233,526
vectorizer = TfidfVectorizer(max_df=max_df, min_df=min_df, smooth_idf=smooth_idf, sublinear_tf=sublinear_tf) doc_tfidfs = vectorizer.fit_transform(documents ).toarray() query_vecs = vectorizer.transform(queries ).toarray() cos_sim = cosine_similarity(query_vecs, doc_tfidfs) rankings = np.flip(cos_sim.argsort() , axis=1 )<compute_test_metric>
le =LabelEncoder() credit_card['NAME_CONTRACT_STATUS'] = le.fit_transform(credit_card['NAME_CONTRACT_STATUS'].astype(str)) nunique_status = credit_card[['SK_ID_CURR', 'NAME_CONTRACT_STATUS']].groupby('SK_ID_CURR' ).nunique() nunique_status2 = credit_card[['SK_ID_CURR', 'NAME_CONTRACT_STATUS']].groupby('SK_ID_CURR' ).max() credit_card['NUNIQUE_STATUS'] = nunique_status['NAME_CONTRACT_STATUS'] credit_card['NUNIQUE_STATUS2'] = nunique_status2['NAME_CONTRACT_STATUS'] credit_card.drop(['SK_ID_PREV', 'NAME_CONTRACT_STATUS'], axis=1, inplace=True)
Home Credit Default Risk
1,233,526
for _ in range(iters): rel_vecs = doc_tfidfs[rankings[:, :rel_count]].mean(axis=1) nrel_vecs = doc_tfidfs[rankings[:, -nrel_count:]].mean(axis=1) query_vecs = alpha * query_vecs + beta * rel_vecs - gamma * nrel_vecs cos_sim = cosine_similarity(query_vecs, doc_tfidfs) rankings = np.flip(cos_sim.argsort(axis=1), axis=1 )<save_to_csv>
avg_payments = payments.groupby("SK_ID_CURR" ).mean() avg_payments2 = payments.groupby("SK_ID_CURR" ).max() avg_payments3 = payments.groupby("SK_ID_CURR" ).min()
Home Credit Default Risk
1,233,526
with open('submission.csv', mode='w')as file: file.write('Query,RetrievedDocuments ') for query_name, ranking in zip(query_list, rankings): ranked_docs = ' '.join([doc_list[idx] for idx in ranking]) file.write('%s,%s ' %(query_name, ranked_docs))<import_modules>
del avg_payments["SK_ID_PREV"]
Home Credit Default Risk
1,233,526
print(os.listdir(".. /input")) <load_from_csv>
data = data.merge(right=avg_prev.reset_index() , how="left", on="SK_ID_CURR") test = test.merge(right=avg_prev.reset_index() , how="left", on="SK_ID_CURR")
Home Credit Default Risk
1,233,526
data = pd.read_csv(".. /input/train.csv") test = pd.read_csv('.. /input/test.csv') ids = pd.read_csv('.. /input/sampleSubmission.csv') test.head(5 )<count_missing_values>
data = data.merge(right=avg_buro.reset_index() , how='left', on='SK_ID_CURR') test = test.merge(right=avg_buro.reset_index() , how='left', on='SK_ID_CURR' )
Home Credit Default Risk
1,233,526
data.isnull().any()<feature_engineering>
data = data.merge(right=POS_CASH.groupby("SK_ID_CURR" ).mean().reset_index() , how="left", on="SK_ID_CURR") test = test.merge(right=POS_CASH.groupby("SK_ID_CURR" ).mean().reset_index() , how="left", on="SK_ID_CURR" )
Home Credit Default Risk
1,233,526
data.loc[data['TotalCharges'].isna() ] = 0<count_missing_values>
data = data.merge(credit_card.groupby('SK_ID_CURR' ).mean().reset_index() , how='left', on='SK_ID_CURR') test = test.merge(credit_card.groupby('SK_ID_CURR' ).mean().reset_index() , how='left', on='SK_ID_CURR')
Home Credit Default Risk
1,233,526
test.isnull().any()<feature_engineering>
data = data.merge(right=avg_payments.reset_index() , how='left', on='SK_ID_CURR') test = test.merge(right=avg_payments.reset_index() , how='left', on='SK_ID_CURR')
Home Credit Default Risk
1,233,526
test.loc[test['TotalCharges'].isna() ] = 0<prepare_x_and_y>
data = data.merge(right=avg_payments2.reset_index() , how='left', on='SK_ID_CURR') test = test.merge(right=avg_payments2.reset_index() , how='left', on='SK_ID_CURR' )
Home Credit Default Risk
1,233,526
X = data.drop(['customerID','Churn'], 1) x = pd.get_dummies(X) x = x.drop(['gender_0','Partner_0','Dependents_0','PaymentMethod_0','MultipleLines_0','PaperlessBilling_0','StreamingMovies_0','PhoneService_0','InternetService_0','DeviceProtection_0','TechSupport_0','OnlineSecurity_0','OnlineBackup_0'],1) y = pd.get_dummies(data.Churn) y = y.drop([0],1) teste = test.drop(['customerID'], 1) teste = pd.get_dummies(teste) teste = teste.drop(['gender_0','Partner_0','Dependents_0','PaymentMethod_0','MultipleLines_0','PaperlessBilling_0','StreamingMovies_0','PhoneService_0','InternetService_0','DeviceProtection_0','TechSupport_0','OnlineSecurity_0','OnlineBackup_0'],1) y.head(2) <prepare_x_and_y>
data = data.merge(right=avg_payments3.reset_index() , how='left', on='SK_ID_CURR') test = test.merge(right=avg_payments3.reset_index() , how='left', on='SK_ID_CURR' )
Home Credit Default Risk
1,233,526
test_np = np.array(teste) X=np.array(x) Y=np.array(y )<split>
test = test[test.columns[data.isnull().mean() < 0.85]] data = data[data.columns[data.isnull().mean() < 0.85]]
Home Credit Default Risk
1,233,526
x_Train,x_Test,y_Train,y_Test=train_test_split(X,Y,test_size=0.30 )<normalization>
excluded_feats = ["SK_ID_CURR"] features = [f for f in data.columns if f not in excluded_feats]
Home Credit Default Risk
1,233,526
scaler = StandardScaler() sdsfds = scaler.fit_transform(x_Train )<compute_train_metric>
folds = KFold(n_splits = 4, shuffle=True, random_state=546789) oof_preds = np.zeros(data.shape[0]) sub_preds = np.zeros(test.shape[0] )
Home Credit Default Risk
1,233,526
def score(model,x,y): prob=model.predict_proba(x) prob = prob[:, 1] auc = roc_auc_score(y, prob) print('AUC: {} ROC_AUC: {} {}'.format(model.score(x,y),auc,prob[:10]))<train_model>
for n_fold,(trn_idx, val_idx)in enumerate(folds.split(data)) : trn_x, trn_y = data[features].iloc[trn_idx], y.iloc[trn_idx] val_x, val_y = data[features].iloc[val_idx], y.iloc[val_idx] clf = XGBClassifier( objective="binary:logistic", booster="gbtree", eval_metric = "auc", nthread = 4, eta = 0.05, gamma = 0, max_depth = 6, subsample=0.7, colsample_bytree = 0.7, colsample_bylevel = 0.675, min_child_weight = 22, alpha = 0, random_state = 42, nrounds = 2000 ) clf.fit(trn_x, trn_y, eval_set=[(trn_x, trn_y),(val_x, val_y)], verbose=10, early_stopping_rounds=30) oof_preds[val_idx] = clf.predict_proba(val_x)[:, 1] sub_preds += clf.predict_proba(test[features])[:, 1] / folds.n_splits print("Fold %2d AUC : %.6f" %(n_fold + 1, roc_auc_score(val_y, oof_preds[val_idx]))) del clf, trn_x, trn_y, val_x, val_y gc.collect()
Home Credit Default Risk
1,233,526
knn=KNeighborsClassifier(n_neighbors=3) asa = knn.fit(x_Train,y_Train[:,1]) score(knn,x_Test,y_Test[:,1] )<predict_on_test>
print('Full AUC score %.6f' % roc_auc_score(y, oof_preds)) test['TARGET'] = sub_preds
Home Credit Default Risk
1,233,526
<train_model><EOS>
test[["SK_ID_CURR","TARGET"]].to_csv('xgb_submission_esi.csv', index=False, float_format='%.8f' )
Home Credit Default Risk
1,189,554
<SOS> metric: AUC Kaggle data source: home-credit-default-risk<predict_on_test>
warnings.simplefilter(action='ignore', category=FutureWarning) py.init_notebook_mode(connected=True) %matplotlib inline cf.go_offline()
Home Credit Default Risk
1,189,554
prob=xgb.predict_proba(test_np )<train_model>
def one_hot_encoder(df, nan_as_category = True): original_columns = list(df.columns) categorical_columns = [col for col in df.columns if df[col].dtype == 'object'] df = pd.get_dummies(df, columns= categorical_columns, dummy_na= nan_as_category) new_columns = [c for c in df.columns if c not in original_columns] return df, new_columns
Home Credit Default Risk
1,189,554
clf = RandomForestClassifier(n_estimators=10) clf.fit(x_Train,y_Train[:,1]) score(clf, x_Test,y_Test[:,1] )<train_model>
num_rows = None nan_as_category = True
Home Credit Default Risk
1,189,554
ada = AdaBoostClassifier(n_estimators=100, random_state=0) ada.fit(x_Train,y_Train[:,1]) score(ada,x_Test,y_Test[:,1] )<predict_on_test>
print("Start Train Test................." )
Home Credit Default Risk
1,189,554
prob3 = ada.predict_proba(test_np )<train_model>
df = pd.read_csv('.. /input/application_train.csv', nrows= num_rows) test_df = pd.read_csv('.. /input/application_test.csv', nrows= num_rows) print("Train samples: {}, test samples: {}".format(len(df), len(test_df))) df = df.append(test_df ).reset_index() del test_df gc.collect()
Home Credit Default Risk
1,189,554
gbc = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=1, random_state=0 ).fit(x_Train, y_Train[:,1]) score(gbc, x_Test, y_Test[:,1]) <train_model>
for bin_feature in ['CODE_GENDER', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY']: df[bin_feature], uniques = pd.factorize(df[bin_feature]) df, cat_cols = one_hot_encoder(df, nan_as_category) df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace= True )
Home Credit Default Risk
1,189,554
rgl = LogisticRegression(random_state=0, solver='lbfgs', multi_class='multinomial' ).fit(x_Train, y_Train[:,1]) score(rgl, x_Test, y_Test[:,1] )<predict_on_test>
df['DAYS_EMPLOYED_PERC'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH'] df['INCOME_CREDIT_PERC'] = df['AMT_INCOME_TOTAL'] / df['AMT_CREDIT'] df['INCOME_PER_PERSON'] = df['AMT_INCOME_TOTAL'] / df['CNT_FAM_MEMBERS'] df['ANNUITY_INCOME_PERC'] = df['AMT_ANNUITY'] / df['AMT_INCOME_TOTAL']
Home Credit Default Risk
1,189,554
prob4 = rgl.predict_proba(test_np )<train_model>
a = df['DAYS_EMPLOYED_PERC'].tolist() a = [x for x in a if str(x)!= 'nan'] b = df['INCOME_CREDIT_PERC'].tolist() b = [x for x in b if str(x)!= 'nan'] c = df['INCOME_PER_PERSON'].tolist() c = [x for x in c if str(x)!= 'nan'] d = df['ANNUITY_INCOME_PERC'].tolist() d = [x for x in d if str(x)!= 'nan']
Home Credit Default Risk
1,189,554
BC_model = BaggingClassifier().fit(x_Train, y_Train[:,1]) score(BC_model, x_Test, y_Test[:,1] )<train_model>
print("End Train Test.................. " )
Home Credit Default Risk