kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
10,939,062
data = matrix.copy()<prepare_x_and_y>
y_pred = rid.predict(X_pred) y_pred = np.argmax(y_pred, axis=-1) print(y_pred.shape )
Digit Recognizer
10,939,062
X_train = data[data.date_block_num <= 32].drop(['item_cnt_month'], axis=1) Y_train = data[data.date_block_num <= 32]['item_cnt_month'] X_valid = data[data.date_block_num == 33].drop(['item_cnt_month'], axis=1) Y_valid = data[data.date_block_num == 33]['item_cnt_month'] X_test = data[data.date_block_num == 34].drop(['item_cnt_month'], axis=1 )<train_model>
ridge = sub.copy() for i in range(len(y_pred)) : ridge.iloc[i].Label = y_pred[i] ridge.to_csv("submission_ridge.csv", index=False )
Digit Recognizer
10,939,062
model = XGBRegressor( max_depth=10, n_estimators=1000, min_child_weight=0.5, colsample_bytree=0.8, subsample=0.8, eta=0.1, seed=42 ) model.fit( X_train, Y_train, eval_metric='rmse', eval_set=[(X_train, Y_train),(X_valid, Y_valid)], verbose=True, early_stopping_rounds=20 )<save_to_csv>
init = Constant (.333) X = Input(shape=(10,3)) out = Conv1D(1, 1, use_bias=False, kernel_initializer=init, kernel_regularizer='l2' )(X) out = Flatten()(out) out = Softmax()(out) model = Model(X, out) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['categorical_accuracy']) model.summary()
Digit Recognizer
10,939,062
Y_pred = model.predict(X_valid ).clip(0, 20) Y_test = model.predict(X_test ).clip(0, 20) submission = pd.DataFrame({ 'ID': test.index, 'item_cnt_month': Y_test }) submission.to_csv('xgb_submission.csv', index=False )<import_modules>
X = np.stack([f_y_train, v_y_train, r_y_train], axis=-1) X_pred = np.stack([f_y_test, v_y_test, r_y_test], axis=-1) X.shape, X_pred.shape
Digit Recognizer
10,939,062
import numpy as np import pandas as pd import joblib import gc<load_from_csv>
model.fit(X, y, epochs=10 )
Digit Recognizer
10,939,062
test = pd.read_csv('.. /input/home-credit-default-risk/application_test.csv') test.set_index(['SK_ID_CURR'], inplace=True) test.shape<load_pretrained>
y_pred = model.predict(X_pred) print(y_pred[0]) y_pred = np.argmax(y_pred, axis=-1) print(y_pred.shape )
Digit Recognizer
10,939,062
preprocessor = joblib.load('.. /input/wk6-default/wk6default_preprocessor.joblib') LGBM_model = joblib.load('.. /input/wk6-default/wk6_LGBM_default_model.joblib' )<load_from_csv>
weighted = sub.copy() for i in range(len(y_pred)) : weighted.iloc[i].Label = y_pred[i] weighted.to_csv("submission_weighted.csv", index=False )
Digit Recognizer
10,939,062
bureau_bal = pd.read_csv('.. /input/home-credit-default-risk/bureau_balance.csv') bureau = pd.read_csv('.. /input/home-credit-default-risk/bureau.csv') bb = pd.merge(bureau, bureau_bal, on = 'SK_ID_BUREAU', how = 'left') bb['REMAIN_CRED'] = bb['AMT_CREDIT_SUM'] - bb['AMT_CREDIT_SUM_DEBT'] - bb['AMT_CREDIT_SUM_LIMIT'] bb['AC_RATIO'] = bb['AMT_ANNUITY'] / bb['AMT_CREDIT_SUM'] bb.columns = ['BU_'+column if column !=('SK_ID_CURR') else column for column in bb.columns] bur_cat = pd.get_dummies(bb.select_dtypes('object')) bur_cat['SK_ID_CURR'] = bb['SK_ID_CURR'] bur_cat = bur_cat.groupby(by = ['SK_ID_CURR'] ).agg(['mean']) bur_num = bb.groupby(by = ['SK_ID_CURR'] ).agg(['max', 'mean', 'sum'] ).astype('float32') bureau_rev = bur_cat.merge(bur_num, on = ['SK_ID_CURR'], how = 'left') test = test.merge(bureau_rev, on = ['SK_ID_CURR'], how = 'left') del bur_cat del bur_num del bureau del bureau_bal gc.collect()<feature_engineering>
weights = model.layers[1].get_weights() weights = weights/np.sum(weights) weights
Digit Recognizer
10,939,062
cc_bal = pd.read_csv('.. /input/home-credit-default-risk/credit_card_balance.csv') cc_bal['DRAW_RATIO'] = cc_bal['AMT_DRAWINGS_CURRENT'] / cc_bal['CNT_DRAWINGS_CURRENT'] cc_bal['RECEIVE_RATIO'] = cc_bal['AMT_RECIVABLE'] / cc_bal['AMT_RECEIVABLE_PRINCIPAL'] cc_bal['RECEIVE_PER'] = cc_bal['AMT_RECIVABLE'] / cc_bal['AMT_TOTAL_RECEIVABLE'] cc_bal.columns = ['CC_'+ column if column !='SK_ID_CURR' else column for column in cc_bal.columns] cc_cat = pd.get_dummies(cc_bal.select_dtypes('object')) cc_cat['SK_ID_CURR'] = cc_bal['SK_ID_CURR'] cc_cat = cc_cat.groupby(by = ['SK_ID_CURR'] ).mean() cc_num = cc_bal.groupby(by = ['SK_ID_CURR'] ).agg(['max', 'mean', 'sum'] ).astype('float32') test = test.merge(cc_cat, on = ['SK_ID_CURR'], how = 'left') test = test.merge(cc_num, on = ['SK_ID_CURR'], how = 'left') del cc_bal del cc_cat del cc_num gc.collect()<feature_engineering>
!pip install pyrankvote
Digit Recognizer
10,939,062
install = pd.read_csv('.. /input/home-credit-default-risk/installments_payments.csv') install['PAY_PERCENT'] = install['AMT_INSTALMENT'] / install['AMT_PAYMENT'] install['PAY_DIFF'] = install['AMT_INSTALMENT'] - install['AMT_PAYMENT'] install['DPD'] = install['DAYS_ENTRY_PAYMENT'] - install['DAYS_INSTALMENT'] install['DPD'] = install['DPD'].apply(lambda x: x if x>0 else 0) install['DBD'] = install['DAYS_INSTALMENT'] - install['DAYS_ENTRY_PAYMENT'] install['DBD'] = install['DBD'].apply(lambda x: x if x>0 else 0) install.columns = ['IP_'+ column if column !='SK_ID_CURR' else column for column in install.columns] inst_num = install.groupby(by = ['SK_ID_CURR'] ).agg(['max', 'mean'] ).astype('float32') test = test.merge(inst_num, on = 'SK_ID_CURR', how='left') del install del inst_num gc.collect()<load_from_csv>
import pyrankvote from pyrankvote import Candidate, Ballot
Digit Recognizer
10,939,062
pos = pd.read_csv('.. /input/home-credit-default-risk/POS_CASH_balance.csv') pos.columns = ['PC_'+ column if column !='SK_ID_CURR' else column for column in pos.columns] pos_num = pos.groupby(by = ['SK_ID_CURR'] ).agg(['max', 'mean', 'sum'] ).astype('float32') test = test.merge(pos_num, on = ['SK_ID_CURR'], how = 'left') del pos del pos_num gc.collect() <feature_engineering>
candidates = [Candidate(i)for i in range(10)]
Digit Recognizer
10,939,062
<feature_engineering><EOS>
single = sub.copy() runoff = sub.copy() for i in range(len(X_pred)) : ballots = [] for j in range(3): ballot = np.argsort(X_pred[i,:,j]) ballot = np.flip(ballot) ballots.append(Ballot(ranked_candidates=[candidates[i] for i in ballot])) election_result = pyrankvote.single_transferable_vote(candidates, ballots, number_of_seats=1) winners = election_result.get_winners() single.iloc[i].Label = winners[0].name election_result = pyrankvote.instant_runoff_voting(candidates, ballots) winners = election_result.get_winners() runoff.iloc[i].Label = winners[0].name single.to_csv("submission_single.csv", index=False) runoff.to_csv("submission_runoff.csv", index=False )
Digit Recognizer
10,932,587
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<feature_engineering>
%matplotlib inline
Digit Recognizer
10,932,587
test['EmpAge_RATIO'] = test['DAYS_EMPLOYED'] / test['AGE'] test['CredInc_RATIO'] = test['AMT_CREDIT'] / test['AMT_INCOME_TOTAL'] test['AnnInc_RATIO'] = test['AMT_ANNUITY'] / test['AMT_INCOME_TOTAL'] test['AnnCred_RATIO'] = test['AMT_ANNUITY'] /(test['AMT_CREDIT'] + 1) test['CredGoods_RATIO'] = test['AMT_CREDIT'] /(test['AMT_GOODS_PRICE'] + 1) test['AVG_EXT_INCOME'] = test['AMT_INCOME_TOTAL'] * test['AVG_EXT'] test['AVG_EXT_GOODS'] = test['AMT_GOODS_PRICE'] * test['AVG_EXT']<define_variables>
mnist_train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv") mnist_test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv" )
Digit Recognizer
10,932,587
dels = ['APARTMENTS_MODE', 'BASEMENTAREA_MODE', 'YEARS_BEGINEXPLUATATION_MODE', 'YEARS_BUILD_MODE', 'COMMONAREA_MODE', 'ELEVATORS_MODE', 'ENTRANCES_MODE', 'FLOORSMAX_MODE', 'FLOORSMIN_MODE', 'LANDAREA_MODE', 'LIVINGAPARTMENTS_MODE', 'LIVINGAREA_MODE', 'NONLIVINGAPARTMENTS_MODE', 'NONLIVINGAREA_MODE', 'APARTMENTS_MEDI', 'BASEMENTAREA_MEDI', 'YEARS_BEGINEXPLUATATION_MEDI', 'YEARS_BUILD_MEDI', 'COMMONAREA_MEDI', 'ELEVATORS_MEDI', 'ENTRANCES_MEDI', 'FLOORSMAX_MEDI', 'FLOORSMIN_MEDI', 'LANDAREA_MEDI', 'LIVINGAPARTMENTS_MEDI', 'LIVINGAREA_MEDI', 'NONLIVINGAPARTMENTS_MEDI', 'NONLIVINGAREA_MEDI', 'FONDKAPREMONT_MODE', 'HOUSETYPE_MODE', 'TOTALAREA_MODE', 'WALLSMATERIAL_MODE', 'EMERGENCYSTATE_MODE', 'FLAG_DOCUMENT_2', 'FLAG_DOCUMENT_3', 'FLAG_DOCUMENT_4', 'FLAG_DOCUMENT_5', 'FLAG_DOCUMENT_6', 'FLAG_DOCUMENT_7', 'FLAG_DOCUMENT_8', 'FLAG_DOCUMENT_9', 'FLAG_DOCUMENT_10', 'FLAG_DOCUMENT_11', 'FLAG_DOCUMENT_12', 'FLAG_DOCUMENT_13', 'FLAG_DOCUMENT_14', 'FLAG_DOCUMENT_15', 'FLAG_DOCUMENT_16', 'FLAG_DOCUMENT_17', 'FLAG_DOCUMENT_18', 'FLAG_DOCUMENT_19', 'FLAG_DOCUMENT_20', 'FLAG_DOCUMENT_21', 'DAYS_BIRTH', 'LIVINGAPARTMENTS_AVG', 'LIVINGAREA_AVG', 'CNT_FAM_MEMBERS', 'OBS_30_CNT_SOCIAL_CIRCLE', 'OBS_60_CNT_SOCIAL_CIRCLE', 'ELEVATORS_AVG', 'AVG_EXT'] test = test.drop(test[dels], axis =1) gc.collect()<categorify>
mnist_train.isna().any().any()
Digit Recognizer
10,932,587
test = test.replace([np.inf, -np.inf], np.nan )<predict_on_test>
mnist_train_data = mnist_train.loc[:, "pixel0":] mnist_train_label = mnist_train.loc[:, "label"] mnist_train_data = mnist_train_data/255.0 mnist_test = mnist_test/255.0
Digit Recognizer
10,932,587
test_pred = LGBM_model.predict_proba(X_test) print(test_pred.shape) print(test_pred[:5] )<load_from_csv>
standardized_scalar = StandardScaler() standardized_data = standardized_scalar.fit_transform(mnist_train_data) standardized_data.shape
Digit Recognizer
10,932,587
submission = pd.read_csv('.. /input/home-credit-default-risk/sample_submission.csv') submission.head(10 )<prepare_x_and_y>
cov_matrix = np.matmul(standardized_data.T, standardized_data) cov_matrix.shape
Digit Recognizer
10,932,587
submission.TARGET = test_pred[:,1] submission.head(10 )<save_to_csv>
lambdas, vectors = eigh(cov_matrix, eigvals=(782, 783)) vectors.shape
Digit Recognizer
10,932,587
submission.to_csv('default_submission_wk06.csv', index=False, header = True )<load_from_csv>
new_coordinates = np.matmul(vectors, standardized_data.T) print(new_coordinates.shape) new_coordinates = np.vstack(( new_coordinates, mnist_train_label)).T
Digit Recognizer
10,932,587
MainDir = ".. /input/.. /input/home-credit-default-risk" test = pd.read_csv(f'{MainDir}/application_test.csv' )<load_pretrained>
df_new = pd.DataFrame(new_coordinates, columns=["f1", "f2", "labels"]) df_new.head()
Digit Recognizer
10,932,587
preprocessor = joblib.load('.. /input/defaultdata08/default_preprocessor_08.joblib') model = joblib.load('.. /input/defaultdata08/default_model_08.joblib') print(type(model))<load_from_csv>
pca = decomposition.PCA() pca.n_components = 2 pca_data = pca.fit_transform(standardized_data) pca_data.shape
Digit Recognizer
10,932,587
bureau = pd.read_csv(f'{MainDir}/bureau.csv') print(bureau.shape, "- shape of bureau table") bureau_balance = pd.read_csv(f'{MainDir}/bureau_balance.csv') bb_status = pd.crosstab(bureau_balance.SK_ID_BUREAU, bureau_balance.STATUS) bb_status.columns = ['BB_'+column for column in bb_status.columns] bureau = bureau.merge(bb_status, left_on = 'SK_ID_BUREAU', right_on = 'SK_ID_BUREAU') bureau = bureau.drop(['SK_ID_BUREAU'], axis = 1) print(bureau.shape, "- shape of bureau table after merge") bureau.columns = ['BU_'+column if column !='SK_ID_CURR' else column for column in bureau.columns] bureau_num = bureau.groupby(by=['SK_ID_CURR'] ).mean().reset_index() print(bureau_num.shape, "- shape of numeric features(incl index)") bureau_cat = pd.get_dummies(bureau.select_dtypes('object')) bureau_cat['SK_ID_CURR'] = bureau['SK_ID_CURR'] bureau_cat = bureau_cat.groupby(by = ['SK_ID_CURR'] ).mean().reset_index() print(bureau_cat.shape, "- shape of categorical features(incl index)") bureau_count = bureau.groupby(by = ['SK_ID_CURR'])['BU_CREDIT_ACTIVE'].count().reset_index() bureau_count.rename(columns={'BU_CREDIT_ACTIVE':'COUNT_of_BUREAU'}) bureau_count.head(5) test = test.merge(bureau_num, on='SK_ID_CURR', how='left') test = test.merge(bureau_cat, on='SK_ID_CURR', how='left') test = test.merge(bureau_count, on='SK_ID_CURR', how='left') print(test.shape, "- shape of training data after merges") list = ['bureau', 'bureau_num', 'bureau_cat', 'bureau_balance'] del list gc.collect() previous = pd.read_csv(f'{MainDir}/previous_application.csv') print(previous.shape, "- shape of previous_application") pos = pd.read_csv(f'{MainDir}/POS_CASH_balance.csv') pos.columns = ['PO_'+column if column !='SK_ID_PREV' else column for column in pos.columns] pos_num = pos.groupby(by=['SK_ID_PREV'] ).mean().reset_index() print(pos_num.shape, "- shape of numeric features(incl index)") pos_cat = pd.get_dummies(pos.select_dtypes('object')) pos_cat['SK_ID_PREV'] = pos['SK_ID_PREV'] pos_cat = pos_cat.groupby(by = ['SK_ID_PREV'] ).mean().reset_index() print(pos_cat.shape, "- shape of categorical features(incl index)") previous = previous.merge(pos_num, on='SK_ID_PREV', how='left') previous = previous.merge(pos_cat, on='SK_ID_PREV', how='left') print(previous.shape, "- shape of previous data after merges") list = ['pos', 'pos_num', 'pos_cat'] del list gc.collect() inst = pd.read_csv(f'{MainDir}/installments_payments.csv') inst.columns = ['IP_'+column if column !='SK_ID_PREV' else column for column in inst.columns] inst_num = inst.groupby(by=['SK_ID_PREV'] ).mean().reset_index() print(inst_num.shape, "- shape of numeric features(incl index)") previous = previous.merge(inst_num, left_on='SK_ID_PREV', right_on = 'SK_ID_PREV', how='left') print(previous.shape, "- shape of previous data after merges") list = ['inst', 'inst_num'] del list gc.collect() ccb = pd.read_csv(f'{MainDir}/credit_card_balance.csv') ccb.columns = ['CC_'+column if column !='SK_ID_PREV' else column for column in ccb.columns] ccb_num = ccb.groupby(by=['SK_ID_PREV'] ).mean().reset_index() print(ccb_num.shape, "- shape of numeric features(incl index)") ccb_cat = pd.get_dummies(ccb.select_dtypes('object')) ccb_cat['SK_ID_PREV'] = ccb['SK_ID_PREV'] ccb_cat = ccb_cat.groupby(by = ['SK_ID_PREV'] ).mean().reset_index() print(ccb_cat.shape, "- shape of categorical features(incl index)") previous = previous.merge(ccb_num, on='SK_ID_PREV', how='left') previous = previous.merge(ccb_cat, on='SK_ID_PREV', how='left') print(previous.shape, "- shape of previous data after merges") list = ['ccb', 'ccb_num', 'ccb_cat'] del list gc.collect() previous.columns = ['PR_'+column if column !='SK_ID_CURR' else column for column in previous.columns] previous['PR_DAYS_LAST_DUE'].replace({365243: np.nan}, inplace = True) previous['PR_DAYS_TERMINATION'].replace({365243: np.nan}, inplace = True) previous['PR_DAYS_FIRST_DRAWING'].replace({365243: np.nan}, inplace = True) previous_num = previous.groupby(by=['SK_ID_CURR'] ).mean().reset_index() print(previous_num.shape, "- shape of numeric features(incl index)") previous_cat = pd.get_dummies(previous.select_dtypes('object')) previous_cat['SK_ID_CURR'] = previous['SK_ID_CURR'] previous_cat = bureau_cat.groupby(by = ['SK_ID_CURR'] ).mean().reset_index() print(previous_cat.shape, "- shape of categorical features(incl index)") test = test.merge(previous_num, on='SK_ID_CURR', how='left') test = test.merge(previous_cat, on='SK_ID_CURR', how='left') print(test.shape, "- shape of training data after merges") list = ['previous', 'previous_num', 'previous_cat'] del list gc.collect() test['DAYS_EMPLOYED'].replace({365243: np.nan}, inplace = True) test['CI_ratio'] = test['AMT_CREDIT'] / test['AMT_INCOME_TOTAL'] test['AI_ratio'] = test['AMT_ANNUITY'] / test['AMT_INCOME_TOTAL'] test['AC_ratio'] = test['AMT_CREDIT'] / test['AMT_ANNUITY'] test['CG_ratio'] = test['AMT_CREDIT'] / test['AMT_GOODS_PRICE'] test['log_INCOME'] = np.log(test['AMT_INCOME_TOTAL']) test['log_ANNUITY'] = np.log(test['AMT_ANNUITY']) test['log_CREDIT'] = np.log(test['AMT_CREDIT']) test['log_GOODS'] = np.log(test['AMT_GOODS_PRICE']) test['MissingBureau'] = test.iloc[:, 41:44].isnull().sum(axis=1 ).astype("category") test['FLAG_CG_ratio'] = test['AMT_CREDIT'] > test['AMT_GOODS_PRICE'] test['DAYS_ID_4200'] = test['DAYS_ID_PUBLISH'] < -4200 test['AVG_EXT'] = test.iloc[:, 41:44].sum(axis=1)/(3- test.iloc[:,41:44].isnull().sum(axis=1)) test['AVG_EXT'].replace(np.nan, 0.2, inplace = True) test.EXT_SOURCE_1.fillna(test.AVG_EXT, inplace=True) test.EXT_SOURCE_2.fillna(test.AVG_EXT, inplace=True) test.EXT_SOURCE_3.fillna(test.AVG_EXT, inplace=True) test.drop(['AVG_EXT'], axis = 1) test.drop(['ORGANIZATION_TYPE'], axis = 1) test['OD_ratio'] = test['BU_AMT_CREDIT_SUM_OVERDUE'] / test['BU_AMT_CREDIT_SUM_DEBT'] test['OD_ratio'].replace([np.nan, np.inf, -np.inf], 0, inplace = True) test['Credit_ratio'] = test['BU_AMT_CREDIT_SUM'] / test['BU_AMT_CREDIT_SUM_LIMIT'] test['Credit_ratio'].replace([np.nan, np.inf, -np.inf], 0, inplace = True) test['Debt_ratio'] = test['BU_AMT_CREDIT_SUM_DEBT'] / test['BU_AMT_CREDIT_SUM'] test['Debt_ratio'].replace([np.nan, np.inf, -np.inf], 0, inplace = True) test['PR_term'] = test['PR_IP_AMT_PAYMENT'] / test['PR_IP_AMT_INSTALMENT'] test['PR_term'].replace([np.nan, np.inf, -np.inf], 0, inplace = True) X_test = preprocessor.transform(test) print(X_test.shape )<predict_on_test>
pca_data = np.vstack(( pca_data.T, mnist_train_label)).T
Digit Recognizer
10,932,587
test_pred = model.predict_proba(X_test) print(test_pred.shape) print(test_pred[:5] )<save_to_csv>
df_PCA = pd.DataFrame(new_coordinates, columns=["f1", "f2", "labels"]) df_PCA.head()
Digit Recognizer
10,932,587
submission = pd.read_csv('.. /input/home-credit-default-risk/sample_submission.csv') submission.head(10) submission.TARGET = test_pred[:,1] submission.head(10) submission.to_csv('default_submission_08.csv', index=False, header = True )<import_modules>
mnist_train_data = np.array(mnist_train_data) mnist_train_label = np.array(mnist_train_label )
Digit Recognizer
10,932,587
import os import gc import numpy as np import pandas as pd from scipy.stats import kurtosis from sklearn.metrics import roc_auc_score from sklearn.preprocessing import MinMaxScaler from sklearn.impute import SimpleImputer from sklearn.linear_model import LogisticRegression import matplotlib.pyplot as plt import seaborn as sns import warnings from sklearn.model_selection import train_test_split, cross_val_score, StratifiedKFold import xgboost as xgb from xgboost import XGBClassifier<define_variables>
from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Dropout, Lambda, Flatten, BatchNormalization from tensorflow.keras.layers import Conv2D, MaxPool2D, AvgPool2D from tensorflow.keras.optimizers import Adadelta from keras.utils.np_utils import to_categorical from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.callbacks import ReduceLROnPlateau from tensorflow.keras.callbacks import LearningRateScheduler
Digit Recognizer
10,932,587
DATA_DIRECTORY = ".. /input/home-credit-loan-better-data-processing"<load_from_csv>
nclasses = mnist_train_label.max() - mnist_train_label.min() + 1 mnist_train_label = to_categorical(mnist_train_label, num_classes = nclasses) print("Shape of ytrain after encoding: ", mnist_train_label.shape )
Digit Recognizer
10,932,587
train = pd.read_csv(os.path.join(DATA_DIRECTORY, 'train.csv')) test = pd.read_csv(os.path.join(DATA_DIRECTORY, 'test.csv')) labels = pd.read_csv(os.path.join(DATA_DIRECTORY, 'labels.csv')) <categorify>
def build_model(input_shape=(28, 28, 1)) : model = Sequential() model.add(Conv2D(32, kernel_size = 3, activation='relu', input_shape = input_shape)) model.add(BatchNormalization()) model.add(Conv2D(32, kernel_size = 3, activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(32, kernel_size = 5, strides=2, padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Conv2D(64, kernel_size = 3, activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(64, kernel_size = 3, activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(64, kernel_size = 5, strides=2, padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Conv2D(128, kernel_size = 4, activation='relu')) model.add(BatchNormalization()) model.add(Flatten()) model.add(Dropout(0.4)) model.add(Dense(10, activation='softmax')) return model def compile_model(model, optimizer='adam', loss='categorical_crossentropy'): model.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"]) def train_model(model, train, test, epochs, split): history = model.fit(train, test, shuffle=True, epochs=epochs, validation_split=split) return history
Digit Recognizer
10,932,587
train = train.rename(columns = lambda x:re.sub('[^A-Za-z0-9_]+', '', x)) test = test.rename(columns = lambda x:re.sub('[^A-Za-z0-9_]+', '', x)) labels = labels.rename(columns = lambda x:re.sub('[^A-Za-z0-9_]+', '', x))<categorify>
cnn_model = build_model(( 28, 28, 1)) compile_model(cnn_model, 'adam', 'categorical_crossentropy') model_history = train_model(cnn_model, mnist_train_data, mnist_train_label, 80, 0.2 )
Digit Recognizer
10,932,587
train=np.nan_to_num(train) test=np.nan_to_num(test) labels=np.nan_to_num(labels )<create_dataframe>
predictions = cnn_model.predict(mnist_test_arr )
Digit Recognizer
10,932,587
train = pd.DataFrame(train) test = pd.DataFrame(test) labels=pd.DataFrame(labels )<split>
predictions_test = [] for i in predictions: predictions_test.append(np.argmax(i))
Digit Recognizer
10,932,587
<predict_on_test><EOS>
submission = pd.DataFrame({ "ImageId": mnist_test.index+1, "Label": predictions_test }) submission.to_csv('my_submission.csv', index=False )
Digit Recognizer
11,252,037
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<compute_test_metric>
np.random.seed(0) %matplotlib inline
Digit Recognizer
11,252,037
print('Accuracy Score : ' + str(accuracy_score(y_test,y_pred))) print('Precision Score : ' + str(precision_score(y_test,y_pred))) print('Recall Score : ' + str(recall_score(y_test,y_pred))) print('F1 Score : ' + str(f1_score(y_test,y_pred))) print('Confusion Matrix : ' + str(confusion_matrix(y_test,y_pred)) )<compute_test_metric>
train = pd.read_csv(".. /input/digit-recognizer/train.csv") test = pd.read_csv(".. /input/digit-recognizer/test.csv") X_train = train.drop(labels = ["label"], axis = 1) y_train = train['label'] X_test = test print(X_train.shape, X_test.shape )
Digit Recognizer
11,252,037
clf = LGBMClassifier().fit(X_train,y_train) y_pred = clf.predict(X_test) print('Accuracy Score : ' + str(accuracy_score(y_test,y_pred))) print('Precision Score : ' + str(precision_score(y_test,y_pred))) print('Recall Score : ' + str(recall_score(y_test,y_pred))) print('F1 Score : ' + str(f1_score(y_test,y_pred))) print('Confusion Matrix : ' + str(confusion_matrix(y_test,y_pred)) )<compute_train_metric>
img_rows, img_cols = 28, 28 num_pixels = X_train.shape[1] input_shape =(img_rows, img_cols )
Digit Recognizer
11,252,037
clf = LogisticRegression() grid_values = {'penalty': ['l2'],'C':[0.001,.009,0.01,.09,1,5,10,25]} grid_clf_acc = GridSearchCV(clf, param_grid = grid_values,scoring = 'recall') grid_clf_acc.fit(X_train, y_train) y_pred_acc = grid_clf_acc.predict(X_test) print('Accuracy Score : ' + str(accuracy_score(y_test,y_pred_acc))) print('Precision Score : ' + str(precision_score(y_test,y_pred_acc))) print('Recall Score : ' + str(recall_score(y_test,y_pred_acc))) print('F1 Score : ' + str(f1_score(y_test,y_pred_acc))) confusion_matrix(y_test,y_pred_acc )<predict_on_test>
X_train /= 255 X_test /= 255 y_train = to_categorical(y_train) num_classes = y_train.shape[1] print(X_train.shape, X_test.shape )
Digit Recognizer
11,252,037
pred = model.predict_proba(df_test )<save_to_csv>
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size = 0.1, random_state = 2 )
Digit Recognizer
11,252,037
submit = test[['SK_ID_CURR']] submit['TARGET'] = pred submit.to_csv('lgbm_Minimized_code.csv', index = False )<set_options>
def get_mlp() : return Sequential([ Dense(512, input_dim = num_pixels, activation='relu'), Dense(num_classes, activation='softmax') ] )
Digit Recognizer
11,252,037
warnings.simplefilter(action='ignore', category=FutureWarning )<define_variables>
model = get_mlp() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary()
Digit Recognizer
11,252,037
DATA_DIRECTORY = ".. /input/home-credit-default-risk"<load_from_csv>
learning_history = model.fit(X_train, y_train, batch_size = 1024, epochs = 40, verbose = 2, validation_data=(X_val, y_val)) ;
Digit Recognizer
11,252,037
df_train = pd.read_csv(os.path.join(DATA_DIRECTORY, 'application_train.csv')) df_test = pd.read_csv(os.path.join(DATA_DIRECTORY, 'application_test.csv')) df = df_train.append(df_test) del df_train, df_test; gc.collect()<categorify>
score = model.evaluate(X_val, y_val, verbose = 0) print('Test loss: {}%'.format(score[0] * 100)) print('Test accuracy: {}%'.format(score[1] * 100)) print("MLP Error: %.2f%%" %(100 - score[1] * 100))
Digit Recognizer
11,252,037
df = df[df['AMT_INCOME_TOTAL'] < 20000000] df = df[df['CODE_GENDER'] != 'XNA'] df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace=True) df['DAYS_LAST_PHONE_CHANGE'].replace(0, np.nan, inplace=True )<groupby>
def get_mlpv2() : return Sequential([ Dense(512, input_dim=num_pixels, activation='relu'), Dropout(0.3), Dense(256, activation='relu'), Dropout(0.2), Dense(128, activation='relu'), Dense(num_classes, kernel_initializer='normal', activation='softmax') ] )
Digit Recognizer
11,252,037
def get_age_group(days_birth): age_years = -days_birth / 365 if age_years < 27: return 1 elif age_years < 40: return 2 elif age_years < 50: return 3 elif age_years < 65: return 4 elif age_years < 99: return 5 else: return 0<feature_engineering>
model = get_mlpv2() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary()
Digit Recognizer
11,252,037
docs = [f for f in df.columns if 'FLAG_DOC' in f] df['DOCUMENT_COUNT'] = df[docs].sum(axis=1) df['NEW_DOC_KURT'] = df[docs].kurtosis(axis=1) df['AGE_RANGE'] = df['DAYS_BIRTH'].apply(lambda x: get_age_group(x))<feature_engineering>
learning_history = model.fit(X_train, y_train, batch_size = 1024, epochs = 40, verbose = 2, validation_data=(X_val, y_val)) ;
Digit Recognizer
11,252,037
df['EXT_SOURCES_PROD'] = df['EXT_SOURCE_1'] * df['EXT_SOURCE_2'] * df['EXT_SOURCE_3'] df['EXT_SOURCES_WEIGHTED'] = df.EXT_SOURCE_1 * 2 + df.EXT_SOURCE_2 * 1 + df.EXT_SOURCE_3 * 3 np.warnings.filterwarnings('ignore', r'All-NaN(slice|axis)encountered') for function_name in ['min', 'max', 'mean', 'nanmedian', 'var']: feature_name = 'EXT_SOURCES_{}'.format(function_name.upper()) df[feature_name] = eval('np.{}'.format(function_name))( df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']], axis=1 )<feature_engineering>
score = model.evaluate(X_val, y_val, verbose = 0) print('Test loss: {}%'.format(score[0] * 100)) print('Test accuracy: {}%'.format(score[1] * 100)) print("MLP Error: %.2f%%" %(100 - score[1] * 100))
Digit Recognizer
11,252,037
df['CREDIT_TO_ANNUITY_RATIO'] = df['AMT_CREDIT'] / df['AMT_ANNUITY'] df['CREDIT_TO_GOODS_RATIO'] = df['AMT_CREDIT'] / df['AMT_GOODS_PRICE'] df['ANNUITY_TO_INCOME_RATIO'] = df['AMT_ANNUITY'] / df['AMT_INCOME_TOTAL'] df['CREDIT_TO_INCOME_RATIO'] = df['AMT_CREDIT'] / df['AMT_INCOME_TOTAL'] df['INCOME_TO_EMPLOYED_RATIO'] = df['AMT_INCOME_TOTAL'] / df['DAYS_EMPLOYED'] df['INCOME_TO_BIRTH_RATIO'] = df['AMT_INCOME_TOTAL'] / df['DAYS_BIRTH'] df['EMPLOYED_TO_BIRTH_RATIO'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH'] df['ID_TO_BIRTH_RATIO'] = df['DAYS_ID_PUBLISH'] / df['DAYS_BIRTH'] df['CAR_TO_BIRTH_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_BIRTH'] df['CAR_TO_EMPLOYED_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_EMPLOYED'] df['PHONE_TO_BIRTH_RATIO'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_BIRTH']<merge>
def get_triplecnn() : return Sequential([ Conv2D(32, kernel_size=(3, 3), activation='relu', padding='same', input_shape = input_shape), Conv2D(32, kernel_size=(3, 3), activation='relu'), MaxPool2D(pool_size=(2, 2)) , Dropout(0.25), Conv2D(64, kernel_size=(3, 3), activation='relu', padding='same'), Conv2D(64, kernel_size=(3, 3), activation='relu'), MaxPool2D(pool_size=(2, 2)) , Dropout(0.25), Conv2D(128, kernel_size=(3, 3), activation='relu', padding='same'), Conv2D(128, kernel_size=(3, 3), activation='relu'), MaxPool2D(pool_size=(2, 2)) , Dropout(0.25), Flatten() , Dense(256, activation='relu'), Dropout(0.5), Dense(num_classes, activation = "softmax") ] )
Digit Recognizer
11,252,037
def do_mean(df, group_cols, counted, agg_name): gp = df[group_cols + [counted]].groupby(group_cols)[counted].mean().reset_index().rename( columns={counted: agg_name}) df = df.merge(gp, on=group_cols, how='left') del gp gc.collect() return df<merge>
learning_history = model.fit(X_train, y_train, batch_size = 128, epochs = 50, verbose = 1, validation_data =(X_val, y_val))
Digit Recognizer
11,252,037
def do_median(df, group_cols, counted, agg_name): gp = df[group_cols + [counted]].groupby(group_cols)[counted].median().reset_index().rename( columns={counted: agg_name}) df = df.merge(gp, on=group_cols, how='left') del gp gc.collect() return df<merge>
score = model.evaluate(X_val, y_val, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) print("CNN Error: %.2f%%" %(100-score[1]*100))
Digit Recognizer
11,252,037
def do_std(df, group_cols, counted, agg_name): gp = df[group_cols + [counted]].groupby(group_cols)[counted].std().reset_index().rename( columns={counted: agg_name}) df = df.merge(gp, on=group_cols, how='left') del gp gc.collect() return df<merge>
y_pred = model.predict(X_val )
Digit Recognizer
11,252,037
def do_sum(df, group_cols, counted, agg_name): gp = df[group_cols + [counted]].groupby(group_cols)[counted].sum().reset_index().rename( columns={counted: agg_name}) df = df.merge(gp, on=group_cols, how='left') del gp gc.collect() return df<categorify>
train_aug = ImageDataGenerator( featurewise_center = False, samplewise_center = False, featurewise_std_normalization = False, samplewise_std_normalization = False, zca_whitening = False, horizontal_flip = False, vertical_flip = False, fill_mode = 'nearest', rotation_range = 10, zoom_range = 0.1, width_shift_range = 0.1, height_shift_range = 0.1) train_aug.fit(X_train) train_gen = train_aug.flow(X_train, y_train, batch_size=64 )
Digit Recognizer
11,252,037
group = ['ORGANIZATION_TYPE', 'NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE', 'AGE_RANGE', 'CODE_GENDER'] df = do_median(df, group, 'EXT_SOURCES_MEAN', 'GROUP_EXT_SOURCES_MEDIAN') df = do_std(df, group, 'EXT_SOURCES_MEAN', 'GROUP_EXT_SOURCES_STD') df = do_mean(df, group, 'AMT_INCOME_TOTAL', 'GROUP_INCOME_MEAN') df = do_std(df, group, 'AMT_INCOME_TOTAL', 'GROUP_INCOME_STD') df = do_mean(df, group, 'CREDIT_TO_ANNUITY_RATIO', 'GROUP_CREDIT_TO_ANNUITY_MEAN') df = do_std(df, group, 'CREDIT_TO_ANNUITY_RATIO', 'GROUP_CREDIT_TO_ANNUITY_STD') df = do_mean(df, group, 'AMT_CREDIT', 'GROUP_CREDIT_MEAN') df = do_mean(df, group, 'AMT_ANNUITY', 'GROUP_ANNUITY_MEAN') df = do_std(df, group, 'AMT_ANNUITY', 'GROUP_ANNUITY_STD' )<categorify>
def get_newtriplecnn() : return Sequential([ Conv2D(32, kernel_size=(3, 3), activation='relu', padding='same', input_shape = input_shape), Conv2D(32, kernel_size=(3, 3), activation='relu', padding='same'), BatchNormalization() , MaxPool2D(pool_size=(2, 2)) , Dropout(0.25), Conv2D(64, kernel_size=(3, 3), activation='relu', padding='same'), Conv2D(64, kernel_size=(3, 3), activation='relu', padding='same'), BatchNormalization() , MaxPool2D(pool_size=(2, 2)) , Dropout(0.25), Conv2D(128, kernel_size=(3, 3), activation='relu', padding='same'), Conv2D(128, kernel_size=(3, 3), activation='relu', padding='same'), BatchNormalization() , MaxPool2D(pool_size=(2, 2)) , Dropout(0.25), Flatten() , Dense(512, activation='relu'), BatchNormalization() , Dropout(0.5), Dense(256, activation='relu'), BatchNormalization() , Dropout(0.4), Dense(64, activation='relu'), BatchNormalization() , Dropout(0.3), Dense(num_classes, activation = "softmax") ] )
Digit Recognizer
11,252,037
def label_encoder(df, categorical_columns=None): if not categorical_columns: categorical_columns = [col for col in df.columns if df[col].dtype == 'object'] for col in categorical_columns: df[col], uniques = pd.factorize(df[col]) return df, categorical_columns<drop_column>
callbacks1 = [ EarlyStopping(monitor = 'loss', patience = 6), ReduceLROnPlateau(monitor = 'loss', patience = 3), ModelCheckpoint('.. /working/model.best.hdf5', save_best_only=True) ]
Digit Recognizer
11,252,037
def drop_application_columns(df): drop_list = [ 'CNT_CHILDREN', 'CNT_FAM_MEMBERS', 'HOUR_APPR_PROCESS_START', 'FLAG_EMP_PHONE', 'FLAG_MOBIL', 'FLAG_CONT_MOBILE', 'FLAG_EMAIL', 'FLAG_PHONE', 'FLAG_OWN_REALTY', 'REG_REGION_NOT_LIVE_REGION', 'REG_REGION_NOT_WORK_REGION', 'REG_CITY_NOT_WORK_CITY', 'OBS_30_CNT_SOCIAL_CIRCLE', 'OBS_60_CNT_SOCIAL_CIRCLE', 'AMT_REQ_CREDIT_BUREAU_DAY', 'AMT_REQ_CREDIT_BUREAU_MON', 'AMT_REQ_CREDIT_BUREAU_YEAR', 'COMMONAREA_MODE', 'NONLIVINGAREA_MODE', 'ELEVATORS_MODE', 'NONLIVINGAREA_AVG', 'FLOORSMIN_MEDI', 'LANDAREA_MODE', 'NONLIVINGAREA_MEDI', 'LIVINGAPARTMENTS_MODE', 'FLOORSMIN_AVG', 'LANDAREA_AVG', 'FLOORSMIN_MODE', 'LANDAREA_MEDI', 'COMMONAREA_MEDI', 'YEARS_BUILD_AVG', 'COMMONAREA_AVG', 'BASEMENTAREA_AVG', 'BASEMENTAREA_MODE', 'NONLIVINGAPARTMENTS_MEDI', 'BASEMENTAREA_MEDI', 'LIVINGAPARTMENTS_AVG', 'ELEVATORS_AVG', 'YEARS_BUILD_MEDI', 'ENTRANCES_MODE', 'NONLIVINGAPARTMENTS_MODE', 'LIVINGAREA_MODE', 'LIVINGAPARTMENTS_MEDI', 'YEARS_BUILD_MODE', 'YEARS_BEGINEXPLUATATION_AVG', 'ELEVATORS_MEDI', 'LIVINGAREA_MEDI', 'YEARS_BEGINEXPLUATATION_MODE', 'NONLIVINGAPARTMENTS_AVG', 'HOUSETYPE_MODE', 'FONDKAPREMONT_MODE', 'EMERGENCYSTATE_MODE' ] for doc_num in [2,4,5,6,7,9,10,11,12,13,14,15,16,17,19,20,21]: drop_list.append('FLAG_DOCUMENT_{}'.format(doc_num)) df.drop(drop_list, axis=1, inplace=True) return df<categorify>
history = model.fit_generator(( train_gen), epochs = 100, steps_per_epoch = X_train.shape[0] // 64, validation_data =(X_val, y_val), callbacks = callbacks1, )
Digit Recognizer
11,252,037
df, le_encoded_cols = label_encoder(df, None) df = drop_application_columns(df )<categorify>
model = load_model('.. /working/model.best.hdf5' )
Digit Recognizer
11,252,037
df = pd.get_dummies(df )<load_from_csv>
score = model.evaluate(X_val, y_val, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) print("CNN Error: %.2f%%" %(100-score[1]*100))
Digit Recognizer
11,252,037
bureau = pd.read_csv(os.path.join(DATA_DIRECTORY, 'bureau.csv'))<feature_engineering>
output = model.predict(X_test) output = np.argmax(output, axis = 1) output = pd.Series(output, name="Label") submission = pd.concat([pd.Series(range(1,28001), name = "ImageId"), output], axis = 1) submission.to_csv("submission.csv", index=False )
Digit Recognizer
11,252,037
bureau['CREDIT_DURATION'] = -bureau['DAYS_CREDIT'] + bureau['DAYS_CREDIT_ENDDATE'] bureau['ENDDATE_DIF'] = bureau['DAYS_CREDIT_ENDDATE'] - bureau['DAYS_ENDDATE_FACT'] bureau['DEBT_PERCENTAGE'] = bureau['AMT_CREDIT_SUM'] / bureau['AMT_CREDIT_SUM_DEBT'] bureau['DEBT_CREDIT_DIFF'] = bureau['AMT_CREDIT_SUM'] - bureau['AMT_CREDIT_SUM_DEBT'] bureau['CREDIT_TO_ANNUITY_RATIO'] = bureau['AMT_CREDIT_SUM'] / bureau['AMT_ANNUITY']<categorify>
def load_data(path): with np.load(path)as f: x_train, y_train = f['x_train'], f['y_train'] x_test, y_test = f['x_test'], f['y_test'] return(x_train, y_train),(x_test, y_test) (x_train1, y_train1),(x_test1, y_test1)= load_data('.. /input/mnist-numpy/mnist.npz' )
Digit Recognizer
11,252,037
def one_hot_encoder(df, categorical_columns=None, nan_as_category=True): original_columns = list(df.columns) if not categorical_columns: categorical_columns = [col for col in df.columns if df[col].dtype == 'object'] df = pd.get_dummies(df, columns=categorical_columns, dummy_na=nan_as_category) categorical_columns = [c for c in df.columns if c not in original_columns] return df, categorical_columns<groupby>
x_train1 = x_train1 / 255 x_test1 = x_test1 / 255 x_train1 = x_train1.reshape(-1, 28, 28, 1) x_test1 = x_test1.reshape(-1, 28, 28, 1) y_train1 = y_train1.reshape(y_train1.shape[0], 1) y_test1 = y_test1.reshape(y_test1.shape[0], 1 )
Digit Recognizer
11,252,037
def group(df_to_agg, prefix, aggregations, aggregate_by= 'SK_ID_CURR'): agg_df = df_to_agg.groupby(aggregate_by ).agg(aggregations) agg_df.columns = pd.Index(['{}{}_{}'.format(prefix, e[0], e[1].upper()) for e in agg_df.columns.tolist() ]) return agg_df.reset_index()<merge>
Add_X = np.vstack(( x_train1, x_test1)) Add_y = np.vstack(( y_train1, y_test1)) Add_y = to_categorical(Add_y )
Digit Recognizer
11,252,037
def group_and_merge(df_to_agg, df_to_merge, prefix, aggregations, aggregate_by= 'SK_ID_CURR'): agg_df = group(df_to_agg, prefix, aggregations, aggregate_by= aggregate_by) return df_to_merge.merge(agg_df, how='left', on= aggregate_by )<load_from_csv>
train = pd.read_csv(".. /input/digit-recognizer/train.csv") X_train = train.drop(labels = ["label"], axis = 1) y_train = train['label'] y_train = to_categorical(y_train) X_train /= 255 X_train = X_train.values.reshape(-1, 28, 28, 1 )
Digit Recognizer
11,252,037
def get_bureau_balance(path, num_rows= None): bb = pd.read_csv(os.path.join(path, 'bureau_balance.csv')) bb, categorical_cols = one_hot_encoder(bb, nan_as_category= False) bb_processed = bb.groupby('SK_ID_BUREAU')[categorical_cols].mean().reset_index() agg = {'MONTHS_BALANCE': ['min', 'max', 'mean', 'size']} bb_processed = group_and_merge(bb, bb_processed, '', agg, 'SK_ID_BUREAU') del bb; gc.collect() return bb_processed<categorify>
add_train_aug = ImageDataGenerator( featurewise_center = False, samplewise_center = False, featurewise_std_normalization = False, samplewise_std_normalization = False, zca_whitening = False, horizontal_flip = False, vertical_flip = False, fill_mode = 'nearest', rotation_range = 10, zoom_range = 0.1, width_shift_range = 0.1, height_shift_range = 0.1) add_train_aug.fit(Add_X) add_train_gen = add_train_aug.flow(Add_X, Add_y, batch_size=64 )
Digit Recognizer
11,252,037
bureau, categorical_cols = one_hot_encoder(bureau, nan_as_category= False) bureau = bureau.merge(get_bureau_balance(DATA_DIRECTORY), how='left', on='SK_ID_BUREAU') bureau['STATUS_12345'] = 0 for i in range(1,6): bureau['STATUS_12345'] += bureau['STATUS_{}'.format(i)]<merge>
add_callbacks = [ EarlyStopping(monitor = 'loss', patience = 6), ReduceLROnPlateau(monitor = 'loss', patience = 3), ModelCheckpoint('.. /working/additional_model.best.hdf5', save_best_only=True) ]
Digit Recognizer
11,252,037
features = ['AMT_CREDIT_MAX_OVERDUE', 'AMT_CREDIT_SUM_OVERDUE', 'AMT_CREDIT_SUM', 'AMT_CREDIT_SUM_DEBT', 'DEBT_PERCENTAGE', 'DEBT_CREDIT_DIFF', 'STATUS_0', 'STATUS_12345'] agg_length = bureau.groupby('MONTHS_BALANCE_SIZE')[features].mean().reset_index() agg_length.rename({feat: 'LL_' + feat for feat in features}, axis=1, inplace=True) bureau = bureau.merge(agg_length, how='left', on='MONTHS_BALANCE_SIZE') del agg_length; gc.collect()<define_variables>
def get_addcnn() : return Sequential([ Conv2D(32, kernel_size=(3, 3), activation='relu', padding='same', input_shape = input_shape), Conv2D(32, kernel_size=(3, 3), activation='relu', padding='same'), BatchNormalization() , MaxPool2D(pool_size=(2, 2)) , Dropout(0.25), Conv2D(64, kernel_size=(3, 3), activation='relu', padding='same'), Conv2D(64, kernel_size=(3, 3), activation='relu', padding='same'), BatchNormalization() , MaxPool2D(pool_size=(2, 2)) , Dropout(0.25), Conv2D(128, kernel_size=(3, 3), activation='relu', padding='same'), Conv2D(128, kernel_size=(3, 3), activation='relu', padding='same'), BatchNormalization() , MaxPool2D(pool_size=(2, 2)) , Dropout(0.25), Flatten() , Dense(512, activation='relu'), BatchNormalization() , Dropout(0.5), Dense(256, activation='relu'), BatchNormalization() , Dropout(0.4), Dense(64, activation='relu'), BatchNormalization() , Dropout(0.3), Dense(num_classes, activation = "softmax") ] )
Digit Recognizer
11,252,037
BUREAU_AGG = { 'SK_ID_BUREAU': ['nunique'], 'DAYS_CREDIT': ['min', 'max', 'mean'], 'DAYS_CREDIT_ENDDATE': ['min', 'max'], 'AMT_CREDIT_MAX_OVERDUE': ['max', 'mean'], 'AMT_CREDIT_SUM': ['max', 'mean', 'sum'], 'AMT_CREDIT_SUM_DEBT': ['max', 'mean', 'sum'], 'AMT_CREDIT_SUM_OVERDUE': ['max', 'mean', 'sum'], 'AMT_ANNUITY': ['mean'], 'DEBT_CREDIT_DIFF': ['mean', 'sum'], 'MONTHS_BALANCE_MEAN': ['mean', 'var'], 'MONTHS_BALANCE_SIZE': ['mean', 'sum'], 'STATUS_0': ['mean'], 'STATUS_1': ['mean'], 'STATUS_12345': ['mean'], 'STATUS_C': ['mean'], 'STATUS_X': ['mean'], 'CREDIT_ACTIVE_Active': ['mean'], 'CREDIT_ACTIVE_Closed': ['mean'], 'CREDIT_ACTIVE_Sold': ['mean'], 'CREDIT_TYPE_Consumer credit': ['mean'], 'CREDIT_TYPE_Credit card': ['mean'], 'CREDIT_TYPE_Car loan': ['mean'], 'CREDIT_TYPE_Mortgage': ['mean'], 'CREDIT_TYPE_Microloan': ['mean'], 'LL_AMT_CREDIT_SUM_OVERDUE': ['mean'], 'LL_DEBT_CREDIT_DIFF': ['mean'], 'LL_STATUS_12345': ['mean'], } BUREAU_ACTIVE_AGG = { 'DAYS_CREDIT': ['max', 'mean'], 'DAYS_CREDIT_ENDDATE': ['min', 'max'], 'AMT_CREDIT_MAX_OVERDUE': ['max', 'mean'], 'AMT_CREDIT_SUM': ['max', 'sum'], 'AMT_CREDIT_SUM_DEBT': ['mean', 'sum'], 'AMT_CREDIT_SUM_OVERDUE': ['max', 'mean'], 'DAYS_CREDIT_UPDATE': ['min', 'mean'], 'DEBT_PERCENTAGE': ['mean'], 'DEBT_CREDIT_DIFF': ['mean'], 'CREDIT_TO_ANNUITY_RATIO': ['mean'], 'MONTHS_BALANCE_MEAN': ['mean', 'var'], 'MONTHS_BALANCE_SIZE': ['mean', 'sum'], } BUREAU_CLOSED_AGG = { 'DAYS_CREDIT': ['max', 'var'], 'DAYS_CREDIT_ENDDATE': ['max'], 'AMT_CREDIT_MAX_OVERDUE': ['max', 'mean'], 'AMT_CREDIT_SUM_OVERDUE': ['mean'], 'AMT_CREDIT_SUM': ['max', 'mean', 'sum'], 'AMT_CREDIT_SUM_DEBT': ['max', 'sum'], 'DAYS_CREDIT_UPDATE': ['max'], 'ENDDATE_DIF': ['mean'], 'STATUS_12345': ['mean'], } BUREAU_LOAN_TYPE_AGG = { 'DAYS_CREDIT': ['mean', 'max'], 'AMT_CREDIT_MAX_OVERDUE': ['mean', 'max'], 'AMT_CREDIT_SUM': ['mean', 'max'], 'AMT_CREDIT_SUM_DEBT': ['mean', 'max'], 'DEBT_PERCENTAGE': ['mean'], 'DEBT_CREDIT_DIFF': ['mean'], 'DAYS_CREDIT_ENDDATE': ['max'], } BUREAU_TIME_AGG = { 'AMT_CREDIT_MAX_OVERDUE': ['max', 'mean'], 'AMT_CREDIT_SUM_OVERDUE': ['mean'], 'AMT_CREDIT_SUM': ['max', 'sum'], 'AMT_CREDIT_SUM_DEBT': ['mean', 'sum'], 'DEBT_PERCENTAGE': ['mean'], 'DEBT_CREDIT_DIFF': ['mean'], 'STATUS_0': ['mean'], 'STATUS_12345': ['mean'], }<merge>
model = get_addcnn() model.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy']) model.summary()
Digit Recognizer
11,252,037
agg_bureau = group(bureau, 'BUREAU_', BUREAU_AGG) active = bureau[bureau['CREDIT_ACTIVE_Active'] == 1] agg_bureau = group_and_merge(active,agg_bureau,'BUREAU_ACTIVE_',BUREAU_ACTIVE_AGG) closed = bureau[bureau['CREDIT_ACTIVE_Closed'] == 1] agg_bureau = group_and_merge(closed,agg_bureau,'BUREAU_CLOSED_',BUREAU_CLOSED_AGG) del active, closed; gc.collect() for credit_type in ['Consumer credit', 'Credit card', 'Mortgage', 'Car loan', 'Microloan']: type_df = bureau[bureau['CREDIT_TYPE_' + credit_type] == 1] prefix = 'BUREAU_' + credit_type.split(' ')[0].upper() + '_' agg_bureau = group_and_merge(type_df, agg_bureau, prefix, BUREAU_LOAN_TYPE_AGG) del type_df; gc.collect() for time_frame in [6, 12]: prefix = "BUREAU_LAST{}M_".format(time_frame) time_frame_df = bureau[bureau['DAYS_CREDIT'] >= -30*time_frame] agg_bureau = group_and_merge(time_frame_df, agg_bureau, prefix, BUREAU_TIME_AGG) del time_frame_df; gc.collect()<merge>
history = model.fit_generator(( add_train_gen), epochs = 100, steps_per_epoch = x_train1.shape[0] // 64, validation_data =(X_val, y_val), callbacks = add_callbacks, )
Digit Recognizer
11,252,037
sort_bureau = bureau.sort_values(by=['DAYS_CREDIT']) gr = sort_bureau.groupby('SK_ID_CURR')['AMT_CREDIT_MAX_OVERDUE'].last().reset_index() gr.rename({'AMT_CREDIT_MAX_OVERDUE': 'BUREAU_LAST_LOAN_MAX_OVERDUE'}, inplace=True) agg_bureau = agg_bureau.merge(gr, on='SK_ID_CURR', how='left') agg_bureau['BUREAU_DEBT_OVER_CREDIT'] = \ agg_bureau['BUREAU_AMT_CREDIT_SUM_DEBT_SUM']/agg_bureau['BUREAU_AMT_CREDIT_SUM_SUM'] agg_bureau['BUREAU_ACTIVE_DEBT_OVER_CREDIT'] = \ agg_bureau['BUREAU_ACTIVE_AMT_CREDIT_SUM_DEBT_SUM']/agg_bureau['BUREAU_ACTIVE_AMT_CREDIT_SUM_SUM']<merge>
model = load_model('additional_model.best.hdf5' )
Digit Recognizer
11,252,037
df = pd.merge(df, agg_bureau, on='SK_ID_CURR', how='left') del agg_bureau, bureau gc.collect()<load_from_csv>
score = model.evaluate(X_val, y_val, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) print("CNN Error: %.2f%%" %(100-score[1]*100))
Digit Recognizer
11,252,037
<define_variables><EOS>
output = model.predict(X_test) output = np.argmax(output, axis = 1) output = pd.Series(output, name="Label") submission = pd.concat([pd.Series(range(1,28001), name = "ImageId"), output], axis = 1) submission.to_csv("bonus_submission.csv", index=False )
Digit Recognizer
11,273,039
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<categorify>
print(f'Using Tensorflow Version : {tf.__version__}') print(f'Using Keras Version : {keras.__version__}' )
Digit Recognizer
11,273,039
ohe_columns = [ 'NAME_CONTRACT_STATUS', 'NAME_CONTRACT_TYPE', 'CHANNEL_TYPE', 'NAME_TYPE_SUITE', 'NAME_YIELD_GROUP', 'PRODUCT_COMBINATION', 'NAME_PRODUCT_TYPE', 'NAME_CLIENT_TYPE'] prev, categorical_cols = one_hot_encoder(prev, ohe_columns, nan_as_category= False )<feature_engineering>
train = pd.read_csv('.. /input/digit-recognizer/train.csv') test = pd.read_csv('.. /input/digit-recognizer/test.csv') train.head()
Digit Recognizer
11,273,039
prev['APPLICATION_CREDIT_DIFF'] = prev['AMT_APPLICATION'] - prev['AMT_CREDIT'] prev['APPLICATION_CREDIT_RATIO'] = prev['AMT_APPLICATION'] / prev['AMT_CREDIT'] prev['CREDIT_TO_ANNUITY_RATIO'] = prev['AMT_CREDIT']/prev['AMT_ANNUITY'] prev['DOWN_PAYMENT_TO_CREDIT'] = prev['AMT_DOWN_PAYMENT'] / prev['AMT_CREDIT'] total_payment = prev['AMT_ANNUITY'] * prev['CNT_PAYMENT'] prev['SIMPLE_INTERESTS'] =(total_payment/prev['AMT_CREDIT'] - 1)/prev['CNT_PAYMENT']<merge>
X = train.drop(['label'], axis = 1) y = to_categorical(train['label'].values )
Digit Recognizer
11,273,039
approved = prev[prev['NAME_CONTRACT_STATUS_Approved'] == 1] active_df = approved[approved['DAYS_LAST_DUE'] == 365243] active_pay = pay[pay['SK_ID_PREV'].isin(active_df['SK_ID_PREV'])] active_pay_agg = active_pay.groupby('SK_ID_PREV')[['AMT_INSTALMENT', 'AMT_PAYMENT']].sum() active_pay_agg.reset_index(inplace= True) active_pay_agg['INSTALMENT_PAYMENT_DIFF'] = active_pay_agg['AMT_INSTALMENT'] - active_pay_agg['AMT_PAYMENT'] active_df = active_df.merge(active_pay_agg, on= 'SK_ID_PREV', how= 'left') active_df['REMAINING_DEBT'] = active_df['AMT_CREDIT'] - active_df['AMT_PAYMENT'] active_df['REPAYMENT_RATIO'] = active_df['AMT_PAYMENT'] / active_df['AMT_CREDIT'] active_agg_df = group(active_df, 'PREV_ACTIVE_', PREVIOUS_ACTIVE_AGG) active_agg_df['TOTAL_REPAYMENT_RATIO'] = active_agg_df['PREV_ACTIVE_AMT_PAYMENT_SUM']/\ active_agg_df['PREV_ACTIVE_AMT_CREDIT_SUM'] del active_pay, active_pay_agg, active_df; gc.collect()<categorify>
X = X / 255.0 X_test = test / 255.0
Digit Recognizer
11,273,039
prev['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace= True) prev['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True) prev['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True) prev['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True) prev['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True )<feature_engineering>
datagen = ImageDataGenerator(rotation_range=10, zoom_range = 0.10, width_shift_range=0.1, height_shift_range=0.1 )
Digit Recognizer
11,273,039
prev['DAYS_LAST_DUE_DIFF'] = prev['DAYS_LAST_DUE_1ST_VERSION'] - prev['DAYS_LAST_DUE'] approved['DAYS_LAST_DUE_DIFF'] = approved['DAYS_LAST_DUE_1ST_VERSION'] - approved['DAYS_LAST_DUE']<define_variables>
AUG_test(X[0], y[0] )
Digit Recognizer
11,273,039
categorical_agg = {key: ['mean'] for key in categorical_cols}<merge>
def build_model() : model = Sequential() model.add(Conv2D(32, kernel_size = 3, activation='relu', input_shape =(28, 28, 1))) model.add(BatchNormalization()) model.add(Conv2D(32, kernel_size = 3, activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(32, kernel_size = 5, strides=2, padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Conv2D(64, kernel_size = 3, activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(64, kernel_size = 3, activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(64, kernel_size = 5, strides=2, padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.4)) model.add(Conv2D(128, kernel_size = 4, activation='relu')) model.add(BatchNormalization()) model.add(Flatten()) model.add(Dropout(0.4)) model.add(Dense(10, activation='softmax')) model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]) return model
Digit Recognizer
11,273,039
agg_prev = group(prev, 'PREV_', {**PREVIOUS_AGG, **categorical_agg}) agg_prev = agg_prev.merge(active_agg_df, how='left', on='SK_ID_CURR') del active_agg_df; gc.collect()<merge>
tpu = tf.distribute.cluster_resolver.TPUClusterResolver() tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) tpu_strategy = tf.distribute.experimental.TPUStrategy(tpu )
Digit Recognizer
11,273,039
agg_prev = group_and_merge(approved, agg_prev, 'APPROVED_', PREVIOUS_APPROVED_AGG) refused = prev[prev['NAME_CONTRACT_STATUS_Refused'] == 1] agg_prev = group_and_merge(refused, agg_prev, 'REFUSED_', PREVIOUS_REFUSED_AGG) del approved, refused; gc.collect() <merge>
EPOCHS = 45 BATCH_SIZE = 16 * tpu_strategy.num_replicas_in_sync
Digit Recognizer
11,273,039
for loan_type in ['Consumer loans', 'Cash loans']: type_df = prev[prev['NAME_CONTRACT_TYPE_{}'.format(loan_type)] == 1] prefix = 'PREV_' + loan_type.split(" ")[0] + '_' agg_prev = group_and_merge(type_df, agg_prev, prefix, PREVIOUS_LOAN_TYPE_AGG) del type_df; gc.collect()<feature_engineering>
scores, History = [], [] pred = np.zeros(shape =(len(test), 10)) cv = KFold(n_splits=15, shuffle = True, random_state = 42) for fold,(train_index, val_index)in enumerate(cv.split(X)) : start = default_timer() K.clear_session() tf.tpu.experimental.initialize_tpu_system(tpu) X_train , y_train = X[train_index], y[train_index] X_val, y_val = X[val_index], y[val_index] Train_x, Train_y = None, None batch = 0 for x_batch, y_batch in datagen.flow(X_train, y_train, batch_size=BATCH_SIZE): if batch == 0: Train_x, Train_y = x_batch, y_batch elif batch >= X.shape[0] // BATCH_SIZE: break else: Train_x = np.concatenate(( Train_x, x_batch)) Train_y = np.concatenate(( Train_y, y_batch)) batch += 1 with tpu_strategy.scope() : model = build_model() annealer = LearningRateScheduler(lambda x: 1e-3 * 0.95 ** x) sv = ModelCheckpoint(f'Model Fold {fold}.h5', monitor='val_accuracy', save_best_only=True, mode='max') history = model.fit(Train_x, Train_y, batch_size = BATCH_SIZE, epochs = EPOCHS, verbose = 0, callbacks=[annealer, sv], steps_per_epoch = X_train.shape[0]//BATCH_SIZE, validation_data =(X_val, y_val)) History.append(history.history) model = load_model(f'Model Fold {fold}.h5') score = model.evaluate(X_val, y_val, verbose = 0)[1] scores.append(score) pred += model.predict(X_test) time = round(default_timer() - start, 4) print(f'[INFO] Fold {fold + 1} val_accuracy : {round(score, 4)} - Time : {time} s') print() print(f'[INFO] Mean CV scores : {round(sum(scores)/len(scores), 4)}' )
Digit Recognizer
11,273,039
pay['LATE_PAYMENT'] = pay['DAYS_ENTRY_PAYMENT'] - pay['DAYS_INSTALMENT'] pay['LATE_PAYMENT'] = pay['LATE_PAYMENT'].apply(lambda x: 1 if x > 0 else 0) dpd_id = pay[pay['LATE_PAYMENT'] > 0]['SK_ID_PREV'].unique()<merge>
%%HTML <head> <meta name="viewport" content="width=device-width, initial-scale=1"> <style> body {font-family: Arial;} img {max-width:100%; height:auto} .tab { overflow: hidden; border: 1px solid background-color: } .tab button { background-color: float: left; border: none; outline: none; cursor: pointer; padding: 14px 16px; transition: 0.3s; font-size: 17px; } .tab button:hover { background-color: } .tab button.active { background-color: } .tabcontent { display: none; padding: 6px 12px; border: 1px solid border-top: none; } </style> </head> <body> <h1>History</h1> <p>Click on the buttons inside the tabbed menu</p> <div class="tab"> <button class="tablinks" onclick="openCity(event, 'loss')">Loss</button> <button class="tablinks" onclick="openCity(event, 'val_loss')">Val Loss</button> <button class="tablinks" onclick="openCity(event, 'accuracy')">Accuracy</button> <button class="tablinks" onclick="openCity(event, 'val_accuracy')">Val Accuracy</button> </div> <div id="loss" class="tabcontent"> <h3>Loss</h3> <p>Loss History</p> <img src="./loss.png" alt="loss.png"> </div> <div id="val_loss" class="tabcontent"> <h3>Val Loss</h3> <p>Val Loss History</p> <img src="./val_loss.png" alt="val_loss.png"> </div> <div id="accuracy" class="tabcontent"> <h3>Accuracy</h3> <p>Accuracy History</p> <img src="./accuracy.png" alt="accuracy.png"> </div> <div id="val_accuracy" class="tabcontent"> <h3>Val Accuracy</h3> <p>Val Accuracy History</p> <img src="./val_accuracy.png" alt="val_accuracy.png"> </div> <script> function openCity(evt, cityName){ var i, tabcontent, tablinks; tabcontent = document.getElementsByClassName("tabcontent"); for(i = 0; i < tabcontent.length; i++){ tabcontent[i].style.display = "none"; } tablinks = document.getElementsByClassName("tablinks"); for(i = 0; i < tablinks.length; i++){ tablinks[i].className = tablinks[i].className.replace(" active", ""); } document.getElementById(cityName ).style.display = "block"; evt.currentTarget.className += " active"; } </script> </body>
Digit Recognizer
11,273,039
agg_dpd = group_and_merge(prev[prev['SK_ID_PREV'].isin(dpd_id)], agg_prev, 'PREV_LATE_', PREVIOUS_LATE_PAYMENTS_AGG) del agg_dpd, dpd_id; gc.collect()<merge>
sub = pd.read_csv('.. /input/digit-recognizer/sample_submission.csv') sub['Label'] = pred sub.head()
Digit Recognizer
11,273,039
<split><EOS>
sub.to_csv('Submission.csv', index=False )
Digit Recognizer
11,255,647
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<drop_column>
%matplotlib inline
Digit Recognizer
11,255,647
labels = train['TARGET'] train = train.drop(columns=['TARGET']) test = test.drop(columns=['TARGET'] )<prepare_output>
train = pd.read_csv('.. /input/digit-recognizer/train.csv') test = pd.read_csv('.. /input/digit-recognizer/test.csv' )
Digit Recognizer
11,255,647
feature = list(train.columns) train.replace([np.inf, -np.inf], np.nan, inplace=True) test.replace([np.inf, -np.inf], np.nan, inplace=True) test_df = test.copy() train_df = train.copy() train_df['TARGET'] = labels<normalization>
y = train['label']
Digit Recognizer
11,255,647
imputer = SimpleImputer(strategy = 'median') imputer.fit(train) train = imputer.transform(train) test = imputer.transform(test )<normalization>
train = train.drop(['label'],axis =1) train = train/255
Digit Recognizer
11,255,647
scaler = MinMaxScaler(feature_range =(0, 1)) scaler.fit(train) train = scaler.transform(train) est = scaler.transform(test )<train_model>
X=train
Digit Recognizer
11,255,647
log_reg = LogisticRegression(C = 0.0001) log_reg.fit(train, labels )<predict_on_test>
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.01, random_state=42 )
Digit Recognizer
11,255,647
log_reg_pred = log_reg.predict_proba(test)[:, 1] <prepare_output>
m = keras.Sequential() m.add(keras.layers.Conv2D(32,(3, 3), activation="relu", kernel_initializer='he_uniform', input_shape=(28, 28, 1))) m.add(keras.layers.BatchNormalization()) m.add(keras.layers.Conv2D(32,(3, 3), activation="relu", padding='same', kernel_initializer='he_uniform')) m.add(keras.layers.BatchNormalization()) m.add(keras.layers.MaxPool2D(2, 2)) m.add(keras.layers.Dropout(0.20)) m.add(keras.layers.Conv2D(64,(3, 3), activation="relu", padding='same', kernel_initializer='he_uniform')) m.add(keras.layers.BatchNormalization()) m.add(keras.layers.Conv2D(64,(3, 3), activation="relu", padding='same', kernel_initializer='he_uniform')) m.add(keras.layers.BatchNormalization()) m.add(keras.layers.MaxPool2D(2, 2)) m.add(keras.layers.Dropout(0.2)) m.add(keras.layers.Conv2D(128,(3, 3), activation="relu", padding='same', kernel_initializer='he_uniform')) m.add(keras.layers.BatchNormalization()) m.add(keras.layers.Conv2D(128,(3, 3), activation="relu", padding='same', kernel_initializer='he_uniform')) m.add(keras.layers.BatchNormalization()) m.add(keras.layers.MaxPool2D(2, 2)) m.add(keras.layers.Dropout(0.2)) m.add(keras.layers.Flatten()) m.add(keras.layers.Dropout(0.2)) m.add(keras.layers.Dense(128, activation="relu")) m.add(keras.layers.Dense(10, activation="softmax")) m.compile(optimizer=keras.optimizers.RMSprop() , loss="sparse_categorical_crossentropy", metrics=["acc"] )
Digit Recognizer
11,255,647
submit = test_df[['SK_ID_CURR']] submit['TARGET'] = log_reg_pred <save_to_csv>
m.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=50000,batch_size=20, validation_batch_size=20 )
Digit Recognizer
11,255,647
submit.to_csv('log_reg.csv', index = False )<drop_column>
test = test.values.reshape(-1,28,28,1) test = test/255.0 pred = np.argmax(m.predict(test), axis = 1 )
Digit Recognizer
11,255,647
target = train_df.pop('TARGET') len_train = len(train_df) merged_df = pd.concat([train_df, test_df]) meta_df = merged_df.pop('SK_ID_CURR') del test_df, train_df gc.collect()<categorify>
Submission = pd.DataFrame({"ImageId" : range(1, 28001), "Label":pred} )
Digit Recognizer
11,255,647
categorical_feats = merged_df.columns[merged_df.dtypes == 'object'] print('Using %d prediction variables'%(merged_df.shape[1])) print('Encoding %d non-numeric columns...'%(merged_df.columns[merged_df.dtypes == 'object'].shape)) for feat in categorical_feats: merged_df[feat].fillna('MISSING', inplace=True) encoder = LabelBinarizer() new_columns = encoder.fit_transform(merged_df[feat]) i=0 for u in merged_df[feat].unique() : if i<new_columns.shape[1]: merged_df[feat+'_'+u]=new_columns[:,i] i+=1 merged_df.drop(feat, axis=1, inplace=True) print('Now using %d prediction variables'%(merged_df.shape[1])) <filter>
Submission.to_csv('Submission.csv',index=False )
Digit Recognizer
11,255,647
null_counts = merged_df.isnull().sum() null_counts = null_counts[null_counts > 0] null_ratios = null_counts / len(merged_df) <merge>
num = 3 print("Image label is:| ", pred[num]) plt.imshow(test[num][:,:,0] )
Digit Recognizer
14,376,120
<SOS> metric: RMSE Kaggle data source: tabular-playground-series-jan-2021<install_modules>
input_path = Path('/kaggle/input/tabular-playground-series-jan-2021/' )
Tabular Playground Series - Jan 2021
14,376,120
!pip install.. /input/segmentation-models-pytorch-0-1-3/pretrainedmodels-0.7.4/pretrainedmodels-0.7.4 !pip install.. /input/segmentation-models-pytorch-0-1-3/efficientnet_pytorch-0.6.3/efficientnet_pytorch-0.6.3 !pip install.. /input/segmentation-models-pytorch-0-1-3/timm-0.3.2-py3-none-any.whl !pip install.. /input/segmentation-models-pytorch-0-1-3/segmentation_models.pytorch.0.1.3/segmentation_models.pytorch.0.1.3<import_modules>
train = pd.read_csv(input_path / 'train.csv', index_col='id') display(train.head() )
Tabular Playground Series - Jan 2021
14,376,120
import os import gc import cv2 import pdb import glob import pytz import warnings import pickle import random import numpy as np import pandas as pd from tqdm import tqdm_notebook as tqdm from torch.optim.lr_scheduler import ReduceLROnPlateau, ExponentialLR from sklearn.model_selection import KFold import torch import torch.nn as nn from torch.nn import functional as F import torch.backends.cudnn as cudnn from torch.utils.data import DataLoader, Dataset, sampler from albumentations.pytorch import ToTensorV2 import segmentation_models_pytorch as smp import tifffile as tiff import rasterio from rasterio.windows import Window import albumentations as A<import_modules>
test = pd.read_csv(input_path / 'test.csv', index_col='id') display(test.head() )
Tabular Playground Series - Jan 2021
14,376,120
Compose, CenterCrop, CLAHE, Resize, Normalize )<define_variables>
target = train.pop('target' )
Tabular Playground Series - Jan 2021
14,376,120
height, width = 1024, 1024 reduce = 2 THRESHOLD = 0.40 window = 2048 min_overlap = 256 DATA = '.. /input/hubmap-kidney-segmentation/test/' MODELS_EFFNET = [".. /input/effnetb4-20epochs-5folds/FOLD-0-model.pth", ".. /input/effnetb4-20epochs-5folds/FOLD-1-model.pth", ".. /input/effnetb4-20epochs-5folds/FOLD-2-model.pth", ".. /input/effnetb4-20epochs-5folds/FOLD-3-model.pth", ".. /input/effnetb4-20epochs-5folds/FOLD-4-model.pth" ] MODELS_RESNET = [".. /input/resnet34-5folds-original-data-0927/FOLD-0-model_resnet34.pth", ".. /input/resnet34-5folds-original-data-0927/FOLD-1-model_resnet34.pth", ".. /input/resnet34-5folds-original-data-0927/FOLD-2-model_resnet34.pth", ".. /input/resnet34-5folds-original-data-0927/FOLD-3-model_resnet34.pth", ".. /input/resnet34-5folds-original-data-0927/FOLD-4-model_resnet34.pth",] df_sample = pd.read_csv('.. /input/hubmap-kidney-segmentation/sample_submission.csv') batch_size = 15 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )<categorify>
X_train, X_test, y_train, y_test = train_test_split(train, target, train_size=0.80 )
Tabular Playground Series - Jan 2021
14,376,120
def rle_encode_less_memory(img): pixels = img.T.flatten() pixels[0] = 0 pixels[-1] = 0 runs = np.where(pixels[1:] != pixels[:-1])[0] + 2 runs[1::2] -= runs[::2] return ' '.join(str(x)for x in runs )<choose_model_class>
from sklearn.dummy import DummyRegressor from sklearn.svm import SVR from sklearn.preprocessing import StandardScaler from sklearn.datasets import make_regression from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import Ridge from sklearn.linear_model import SGDRegressor from sklearn.linear_model import BayesianRidge from sklearn.linear_model import LassoLars from sklearn.linear_model import ARDRegression from sklearn.linear_model import PassiveAggressiveRegressor from sklearn.linear_model import TheilSenRegressor from sklearn.linear_model import LinearRegression from lightgbm import LGBMRegressor from xgboost import XGBRegressor
Tabular Playground Series - Jan 2021
14,376,120
class HuBMAPEffNet(nn.Module): def __init__(self): super(HuBMAPEffNet, self ).__init__() self.cnn_model = smp.Unet('efficientnet-b4', classes=1, encoder_weights=None) def forward(self, imgs): img_segs = self.cnn_model(imgs) return img_segs class HuBMAPResNet(nn.Module): def __init__(self): super(HuBMAPResNet, self ).__init__() self.cnn_model = smp.Unet('resnet34', classes=1, encoder_weights=None) def forward(self, imgs): img_segs = self.cnn_model(imgs) return img_segs<load_pretrained>
def FitAndScoreModel(df,name, model,X_tr,y_tr,X_tst,y_tst): model.fit(X_tr,y_tr) Y_pred = model.predict(X_tst) score=mean_squared_error(y_tst, Y_pred, squared=False) df = df.append({'Model':name, 'MSE': score},ignore_index = True) return df
Tabular Playground Series - Jan 2021