kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
1,808,700
y_pred_train = LR_model.predict(X_train) y_pred_test = LR_model.predict(X_test )<feature_engineering>
df.pivot_table(values = 'Age', index = ['Pclass', 'SibSp', 'Parch'], aggfunc = 'median' )
Titanic - Machine Learning from Disaster
1,808,700
y_pred_test[y_pred_test>1] = 1 y_pred_test[y_pred_test<0] = 0<save_to_csv>
df.Age.isnull().sum()
Titanic - Machine Learning from Disaster
1,808,700
df_test['winPlacePerc'] = y_pred_test submission = df_test[['Id', 'winPlacePerc']] submission.to_csv('submission.csv', index=False )<load_from_csv>
age_null = df.Age.isnull() group_med_age = df.pivot_table(values = 'Age', index = ['Pclass', 'SibSp'], aggfunc = 'median') df.loc[age_null, 'Age'] = df.loc[age_null, ['Pclass', 'SibSp']].apply(lambda x: group_med_age.loc[(group_med_age.index.get_level_values('Pclass')== x.Pclass)&(group_med_age.index.get_level_values('SibSp')== x.SibSp)].Age.values[0], axis = 1 )
Titanic - Machine Learning from Disaster
1,808,700
train=pd.read_csv(".. /input/train_V2.csv") test=pd.read_csv(".. /input/test_V2.csv" )<categorify>
df.Age.isnull().sum()
Titanic - Machine Learning from Disaster
1,808,700
le=LabelEncoder() enc=OneHotEncoder() train.loc[(train.matchType!='solo')&(train.matchType!='duo')&(train.matchType!='squad')&(train.matchType!='solo-fpp')&(train.matchType!='duo-fpp')&(train.matchType!='squad-fpp'),'matchType']='other' train['matchType']=train['matchType'].map({'solo':0 , 'duo':1, 'squad':2, 'solo-fpp':3, 'duo-fpp':4, 'squad-fpp':5,'other':6} )<count_missing_values>
print("Cabin has", df.Cabin.isnull().sum() , "missing values out of", len(df))
Titanic - Machine Learning from Disaster
1,808,700
train.dropna(inplace=True) train.isnull().sum()<categorify>
df.Cabin.fillna('O', inplace = True )
Titanic - Machine Learning from Disaster
1,808,700
data=enc.fit(train[['matchType']]) temp=enc.transform(train[['matchType']] )<create_dataframe>
df.isnull().sum()
Titanic - Machine Learning from Disaster
1,808,700
temp1=pd.DataFrame(temp.toarray() ,columns=["solo", "duo", "squad", "solo-fpp", "duo-fpp", "squad-fpp","other"]) temp1=temp1.set_index(train.index.values) temp1 train=pd.concat([train,temp1],axis=1 )<drop_column>
df['Fare_log'] = df.Fare.map(lambda i: np.log(i)if i > 0 else 0 )
Titanic - Machine Learning from Disaster
1,808,700
train['killsasist']=train['kills']+train['assists']+train['roadKills'] train['total_distance']=train['swimDistance']+train['rideDistance']+train['walkDistance'] train['external_booster']=train['boosts']+train['weaponsAcquired']+train['heals'] train=train.drop(['assists','kills','swimDistance','rideDistance','walkDistance','boosts','weaponsAcquired','heals','roadKills','rankPoints'],axis=1 )<drop_column>
df['Family_size'] = 1 + df.Parch + df.SibSp df['Alone'] = np.where(df.Family_size == 1, 1, 0 )
Titanic - Machine Learning from Disaster
1,808,700
train=train.drop(['killPoints','maxPlace','winPoints'],axis=1 )<categorify>
print(df.Family_size.value_counts()) print(df.Alone.value_counts() )
Titanic - Machine Learning from Disaster
1,808,700
train['Players_all']=train.groupby('matchId')['Id'].transform('count') train['players_group']=train.groupby('groupId')['Id'].transform('count' )<prepare_x_and_y>
df.loc[df['Family_size'] == 1, 'Family_size_bin'] = 0 df.loc[(df['Family_size'] >= 2)&(df['Family_size'] <= 4), 'Family_size_bin'] = 1 df.loc[df['Family_size'] >=5, 'Family_size_bin'] = 2
Titanic - Machine Learning from Disaster
1,808,700
Y=train.winPlacePerc train = train.drop(["Id", "groupId", "matchId","winPlacePerc"], axis=1) del train['matchType'] train.head()<train_model>
df['Title'] = df.Name.str.split(", ", expand = True)[1].str.split(".", expand = True)[0] df.Title.value_counts()
Titanic - Machine Learning from Disaster
1,808,700
d_train = lgb.Dataset(train, label=Y) params = {} params['learning_rate'] = 0.05 params['boosting_type'] = 'gbdt' params['objective'] = 'regression' params['metric'] = 'mae' params['sub_feature'] = 0.9 params['num_leaves'] = 511 params['min_data'] = 1 params['max_depth'] = 30 params['min_gain_to_split']= 0.00001 clf = lgb.train(params, d_train,2000 )<categorify>
minor_titles = df.Title.value_counts() <= 4 df['Title'] = df.Title.apply(lambda x: 'Others' if minor_titles.loc[x] == True else x) df.Title.value_counts()
Titanic - Machine Learning from Disaster
1,808,700
test.loc[(test.matchType!='solo')&(test.matchType!='duo')&(test.matchType!='squad')&(test.matchType!='solo-fpp')&(test.matchType!='duo-fpp')&(test.matchType!='squad-fpp'),'matchType']='other' test['matchType']=test['matchType'].map({'solo':0 , 'duo':1, 'squad':2, 'solo-fpp':3, 'duo-fpp':4, 'squad-fpp':5,'other':6} )<categorify>
df['Fare_bin'] = pd.qcut(df.Fare, 4, labels = [0,1,2,3] ).astype(int) df['Age_bin'] = pd.cut(df.Age.astype(int), 5, labels = [0,1,2,3,4] ).astype(int )
Titanic - Machine Learning from Disaster
1,808,700
data_test=enc.fit(test[['matchType']]) temp_test=enc.transform(test[['matchType']]) temp2=pd.DataFrame(temp_test.toarray() ,columns=["solo", "duo", "squad", "solo-fpp", "duo-fpp", "squad-fpp","other"]) temp2=temp2.set_index(test.index.values) temp2 test=pd.concat([test,temp2],axis=1) del test['matchType'] <drop_column>
label = LabelEncoder() df['Title'] = label.fit_transform(df.Title) df['Sex'] = label.fit_transform(df.Sex) df['Embarked'] = label.fit_transform(df.Embarked) df['Cabin'] = label.fit_transform(df.Cabin )
Titanic - Machine Learning from Disaster
1,808,700
test['killsasist']=test['kills']+test['assists']+test['roadKills'] test['total_distance']=test['swimDistance']+test['rideDistance']+test['walkDistance'] test['external_booster']=test['boosts']+test['weaponsAcquired']+test['heals'] test=test.drop(['assists','kills','swimDistance','rideDistance','walkDistance','boosts','weaponsAcquired','heals','roadKills','rankPoints'],axis=1 )<drop_column>
corr_columns = list(df.drop(['Name', 'PassengerId', 'Ticket', 'label'], axis = 1 ).columns )
Titanic - Machine Learning from Disaster
1,808,700
test=test.drop(['killPoints','maxPlace','winPoints'],axis=1 )<categorify>
df['Ticket'] = df.Ticket.map(lambda x: re.sub(r'\W+', '', x))
Titanic - Machine Learning from Disaster
1,808,700
test['Players_all']=test.groupby('matchId')['Id'].transform('count') test['players_group']=test.groupby('groupId')['Id'].transform('count' )<drop_column>
Ticket = [] for i in list(df.Ticket): if not i.isdigit() : Ticket.append(i[:2]) else: Ticket.append("X") df['Ticket'] = Ticket
Titanic - Machine Learning from Disaster
1,808,700
test_id=test.Id test = test.drop(["Id", "groupId", "matchId"], axis=1) test.head()<save_to_csv>
df = pd.get_dummies(df, columns = ['Ticket'], prefix = 'T' )
Titanic - Machine Learning from Disaster
1,808,700
out=clf.predict(test) submission=pd.DataFrame({'Id':test_id,'winPlacePerc':out}) submission.to_csv('submission.csv', index=False )<import_modules>
cat_variables = [x for x in df.columns if df.dtypes[x] == 'object'] cat_variables
Titanic - Machine Learning from Disaster
1,808,700
from IPython.display import Image<install_modules>
df.drop(['Name', 'PassengerId'], axis = 1, inplace = True )
Titanic - Machine Learning from Disaster
1,808,700
!pip install image-classifiers<import_modules>
train = df.loc[df.label == 'train'].drop('label', axis = 1) test = df.loc[df.label == 'test'].drop(['label', 'Survived'], axis = 1 )
Titanic - Machine Learning from Disaster
1,808,700
tf.__version__<define_variables>
X_train = train.drop(['Survived'], axis = 1) y_train = train['Survived'].astype(int) X_test = test
Titanic - Machine Learning from Disaster
1,808,700
SEED = 42 def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) tf.random.set_seed(seed) seed_everything(SEED )<choose_model_class>
kfold = StratifiedKFold(n_splits = 5 )
Titanic - Machine Learning from Disaster
1,808,700
ResNet34, preprocess_input = Classifiers.get('resnet34' )<init_hyperparams>
classifiers = [] classifiers.append(KNeighborsClassifier()) classifiers.append(LinearDiscriminantAnalysis()) classifiers.append(LogisticRegression(random_state = 0)) classifiers.append(LinearSVC(random_state = 0)) classifiers.append(SVC(random_state = 0)) classifiers.append(RandomForestClassifier(random_state = 0)) classifiers.append(ExtraTreesClassifier(random_state = 0)) classifiers.append(XGBClassifier(random_state = 0)) classifiers.append(LGBMClassifier(random_state = 0)) classifiers.append(MLPClassifier()) cv_results = [] for classifier in classifiers: cv_results.append(cross_val_score(classifier, X_train, y_train, scoring = 'accuracy', cv = kfold, n_jobs = -1)) cv_means = [] cv_std = [] for cv_result in cv_results: cv_means.append(cv_result.mean()) cv_std.append(cv_result.std()) cv_res = pd.DataFrame({'CV_means':cv_means, 'CV_std':cv_std, 'Algorithm':['KNN', 'LinearDiscriminantAnalysis', 'LogisticRegression', 'LinearSVC', 'SVC', 'RandomForest', 'ExtraTrees', 'XGB', 'LGB', 'MLP']})
Titanic - Machine Learning from Disaster
1,808,700
cfg = { 'parse_params': { 'cut_time': 10, }, 'data_params': { 'sample_time': 6, 'spec_fmax': 24000.0, 'spec_fmin': 40.0, 'spec_mel': 384, 'mel_power': 2, 'img_shape':(384, 768) }, 'model_params': { 'batchsize_per_tpu': 18, 'iteration_per_epoch': 64, 'epoch': 18, 'arch': ResNet34, 'arch_preprocess': preprocess_input, 'freeze_to': 0, 'loss': { 'fn': tfa.losses.SigmoidFocalCrossEntropy, 'params': {}, }, 'optim': { 'fn': tfa.optimizers.RectifiedAdam, 'params': {'lr': 2e-3, 'total_steps': 18*64, 'warmup_proportion': 0.3, 'min_lr': 1e-6}, }, 'mixup': True } }<define_variables>
LDA_best = LinearDiscriminantAnalysis().fit(X_train, y_train )
Titanic - Machine Learning from Disaster
1,808,700
strategy = tf.distribute.experimental.TPUStrategy(tpu) AUTOTUNE = tf.data.experimental.AUTOTUNE GCS_DS_PATH = KaggleDatasets().get_gcs_path('rfcx-species-audio-detection') TRAIN_TFREC = GCS_DS_PATH + "/tfrecords/train" TEST_TFREC = GCS_DS_PATH + "/tfrecords/test"<define_variables>
RF = RandomForestClassifier(random_state = 0) RF_params = {'n_estimators' : [10,50,100], 'criterion' : ['gini', 'entropy'], 'max_depth' : [5,8,None], 'min_samples_split' : [2,5,8], 'min_samples_leaf' : [1,3,5], 'max_features' : ['auto', 'log2', None]} GS_RF = GridSearchCV(RF, param_grid = RF_params, cv = kfold, scoring = 'accuracy', n_jobs = -1, verbose = 1) GS_RF.fit(X_train, y_train) RF_best = GS_RF.best_estimator_ print("Best parameters :", RF_best) print("Best score :", GS_RF.best_score_ )
Titanic - Machine Learning from Disaster
1,808,700
CUT = cfg['parse_params']['cut_time'] SR = 48000 TIME = cfg['data_params']['sample_time'] FMAX = cfg['data_params']['spec_fmax'] FMIN = cfg['data_params']['spec_fmin'] N_MEL = cfg['data_params']['spec_mel'] HEIGHT, WIDTH = cfg['data_params']['img_shape'] CLASS_N = 24<create_dataframe>
ET = ExtraTreesClassifier(random_state = 0) ET_params = {'n_estimators' : [10,50,100], 'criterion' : ['gini', 'entropy'], 'max_depth' : [5,8,None], 'min_samples_split' : [2,5,8], 'min_samples_leaf' : [1,3,5], 'max_features' : ['auto', 'log2', None]} GS_ET = GridSearchCV(ET, param_grid = ET_params, cv= kfold, scoring = 'accuracy', n_jobs = -1, verbose = 1) GS_ET.fit(X_train, y_train) ET_best = GS_ET.best_estimator_ print('Best parameters :', ET_best) print('Best score :', GS_ET.best_score_ )
Titanic - Machine Learning from Disaster
1,808,700
raw_dataset = tf.data.TFRecordDataset([TRAIN_TFREC + '/00-148.tfrec']) raw_dataset<categorify>
warnings.filterwarnings('ignore' )
Titanic - Machine Learning from Disaster
1,808,700
feature_description = { 'recording_id': tf.io.FixedLenFeature([], tf.string, default_value=''), 'audio_wav': tf.io.FixedLenFeature([], tf.string, default_value=''), 'label_info': tf.io.FixedLenFeature([], tf.string, default_value=''), } parse_dtype = { 'audio_wav': tf.float32, 'recording_id': tf.string, 'species_id': tf.int32, 'songtype_id': tf.int32, 't_min': tf.float32, 'f_min': tf.float32, 't_max': tf.float32, 'f_max':tf.float32, 'is_tp': tf.int32 } @tf.function def _parse_function(example_proto): sample = tf.io.parse_single_example(example_proto, feature_description) wav, _ = tf.audio.decode_wav(sample['audio_wav'], desired_channels=1) label_info = tf.strings.split(sample['label_info'], sep='"')[1] labels = tf.strings.split(label_info, sep=';') @tf.function def _cut_audio(label): items = tf.strings.split(label, sep=',') spid = tf.squeeze(tf.strings.to_number(items[0], tf.int32)) soid = tf.squeeze(tf.strings.to_number(items[1], tf.int32)) tmin = tf.squeeze(tf.strings.to_number(items[2])) fmin = tf.squeeze(tf.strings.to_number(items[3])) tmax = tf.squeeze(tf.strings.to_number(items[4])) fmax = tf.squeeze(tf.strings.to_number(items[5])) tp = tf.squeeze(tf.strings.to_number(items[6], tf.int32)) tmax_s = tmax * tf.cast(SR, tf.float32) tmin_s = tmin * tf.cast(SR, tf.float32) cut_s = tf.cast(CUT * SR, tf.float32) all_s = tf.cast(60 * SR, tf.float32) tsize_s = tmax_s - tmin_s cut_min = tf.cast( tf.maximum(0.0, tf.minimum(tmin_s -(cut_s - tsize_s)/ 2, tf.minimum(tmax_s +(cut_s - tsize_s)/ 2, all_s)- cut_s) ), tf.int32 ) cut_max = cut_min + CUT * SR _sample = { 'audio_wav': tf.reshape(wav[cut_min:cut_max], [CUT*SR]), 'recording_id': sample['recording_id'], 'species_id': spid, 'songtype_id': soid, 't_min': tmin - tf.cast(cut_min, tf.float32)/tf.cast(SR, tf.float32), 'f_min': fmin, 't_max': tmax - tf.cast(cut_min, tf.float32)/tf.cast(SR, tf.float32), 'f_max': fmax, 'is_tp': tp } return _sample samples = tf.map_fn(_cut_audio, labels, dtype=parse_dtype) return samples parsed_dataset = raw_dataset.map(_parse_function ).unbatch()<feature_engineering>
XGB = XGBClassifier(random_state = 0) XGB_params = {'n_estimators' : [100,200,500], 'max_depth' : [3,4,5], 'learning_rate' : [0.01,0.05,0.1,0.2], 'booster' : ['gbtree', 'gblinear', 'dart']} GS_XGB = GridSearchCV(XGB, param_grid = XGB_params, cv= kfold, scoring = 'accuracy', n_jobs = -1, verbose = 1) GS_XGB.fit(X_train, y_train) XGB_best = GS_XGB.best_estimator_ print('Best parameters :', XGB_best) print('Best score :', GS_XGB.best_score_ )
Titanic - Machine Learning from Disaster
1,808,700
@tf.function def _cut_wav(x): cut_min = tf.random.uniform([], maxval=(CUT-TIME)*SR, dtype=tf.int32) cut_max = cut_min + TIME * SR cutwave = tf.reshape(x['audio_wav'][cut_min:cut_max], [TIME*SR]) y = {} y.update(x) y['audio_wav'] = cutwave y['t_min'] = tf.maximum(0.0, x['t_min'] - tf.cast(cut_min, tf.float32)/ SR) y['t_max'] = tf.maximum(0.0, x['t_max'] - tf.cast(cut_min, tf.float32)/ SR) return y @tf.function def _cut_wav_val(x): cut_min =(CUT-TIME)*SR // 2 cut_max = cut_min + TIME * SR cutwave = tf.reshape(x['audio_wav'][cut_min:cut_max], [TIME*SR]) y = {} y.update(x) y['audio_wav'] = cutwave y['t_min'] = tf.maximum(0.0, x['t_min'] - tf.cast(cut_min, tf.float32)/ SR) y['t_max'] = tf.maximum(0.0, x['t_max'] - tf.cast(cut_min, tf.float32)/ SR) return y<feature_engineering>
LGB = LGBMClassifier(random_state = 0) LGB_params = {'n_estimators' : [100,200,500], 'max_depth' : [5,8,-1], 'learning_rate' : [0.01,0.05,0.1,0.2], 'boosting_type' : ['gbdt', 'goss', 'dart']} GS_LGB = GridSearchCV(LGB, param_grid = LGB_params, cv= kfold, scoring = 'accuracy', n_jobs = -1, verbose = 1) GS_LGB.fit(X_train, y_train) LGB_best = GS_LGB.best_estimator_ print('Best parameters :', LGB_best) print('Best score :', GS_LGB.best_score_ )
Titanic - Machine Learning from Disaster
1,808,700
@tf.function def _filtTP(x): return x['is_tp'] == 1<data_type_conversions>
MLP = MLPClassifier(random_state = 0) MLP_params = {'hidden_layer_sizes' : [[10], [10,10], [10,100], [100,100]], 'activation' : ['relu', 'tanh', 'logistic'], 'alpha' : [0.0001,0.001,0.01]} GS_MLP = GridSearchCV(MLP, param_grid = MLP_params, cv= kfold, scoring = 'accuracy', n_jobs = -1, verbose = 1) GS_MLP.fit(X_train, y_train) MLP_best = GS_MLP.best_estimator_ print('Best parameters :', MLP_best) print('Best score :', GS_MLP.best_score_ )
Titanic - Machine Learning from Disaster
1,808,700
def show_wav(sample, ax): wav = sample["audio_wav"].numpy() rate = SR ax.plot(np.arange(len(wav)) / rate, wav) ax.set_title( sample["recording_id"].numpy().decode() +("/%d" % sample["species_id"]) +("TP" if sample["is_tp"] else "FP")) return Audio(( wav * 2**15 ).astype(np.int16), rate=rate) fig, ax = plt.subplots(figsize=(15, 3)) show_wav(next(iter(parsed_dataset)) , ax )<normalization>
voting = VotingClassifier(estimators = [['LDA', LDA_best], ["MLP", MLP_best], ['RFC', RF_best], ['ETC', ET_best], ['XGB', XGB_best], ['LGB', LGB_best]], voting = 'soft', n_jobs = -1) voting = voting.fit(X_train, y_train )
Titanic - Machine Learning from Disaster
1,808,700
<categorify><EOS>
results = pd.DataFrame(test_passengerId, columns = ['PassengerId'] ).assign(Survived = pd.Series(voting.predict(X_test))) results.to_csv('models_voting.csv', index = None )
Titanic - Machine Learning from Disaster
9,251,987
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<normalization>
warnings.filterwarnings('ignore') pd.options.display.max_columns = 50 plt.style.use('ggplot')
Titanic - Machine Learning from Disaster
9,251,987
@tf.function def _preprocess_img(x, training=False, test=False): image = tf.expand_dims(x, axis=-1) image = tf.image.resize(image, [HEIGHT, WIDTH]) image = tf.image.per_image_standardization(image) @tf.function def _specaugment(image): ERASE_TIME = 50 ERASE_MEL = 16 image = tf.expand_dims(image, axis=0) xoff = tf.random.uniform([2], minval=ERASE_TIME//2, maxval=WIDTH-ERASE_TIME//2, dtype=tf.int32) xsize = tf.random.uniform([2], minval=ERASE_TIME//2, maxval=ERASE_TIME, dtype=tf.int32) yoff = tf.random.uniform([2], minval=ERASE_MEL//2, maxval=HEIGHT-ERASE_MEL//2, dtype=tf.int32) ysize = tf.random.uniform([2], minval=ERASE_MEL//2, maxval=ERASE_MEL, dtype=tf.int32) image = tfa.image.cutout(image, [HEIGHT, xsize[0]], offset=[HEIGHT//2, xoff[0]]) image = tfa.image.cutout(image, [HEIGHT, xsize[1]], offset=[HEIGHT//2, xoff[1]]) image = tfa.image.cutout(image, [ysize[0], WIDTH], offset=[yoff[0], WIDTH//2]) image = tfa.image.cutout(image, [ysize[1], WIDTH], offset=[yoff[1], WIDTH//2]) image = tf.squeeze(image, axis=0) return image if training: gau = tf.keras.layers.GaussianNoise(0.3) image = tf.cond(tf.random.uniform([])< 0.5, lambda: gau(image, training=True), lambda: image) image = tf.image.random_brightness(image, 0.2) image = tf.image.random_flip_left_right(image) image = tf.cond(tf.random.uniform([])< 0.5, lambda: _specaugment(image), lambda: image) if test: pass image =(image - tf.reduce_min(image)) /(tf.reduce_max(image)- tf.reduce_min(image)) * 255.0 image = tf.image.grayscale_to_rgb(image) image = cfg['model_params']['arch_preprocess'](image) return image @tf.function def _preprocess(x): image = _preprocess_img(x['input'], training=True, test=False) return(image, x["target"]) @tf.function def _preprocess_val(x): image = _preprocess_img(x['input'], training=False, test=False) return(image, x["target"]) @tf.function def _preprocess_test(x): image = _preprocess_img(x['audio_spec'], training=False, test=True) return(image, x["recording_id"] )<choose_model_class>
v_train = pd.read_csv('/kaggle/input/titanic/train.csv') v_test = pd.read_csv('/kaggle/input/titanic/test.csv') idx = len(v_train )
Titanic - Machine Learning from Disaster
9,251,987
def create_model() : with strategy.scope() : backbone = cfg['model_params']['arch'](( 224, 224, 3), include_top=False, weights='imagenet') if cfg['model_params']['freeze_to'] is None: for layer in backbone.layers: layer.trainable = False else: for layer in backbone.layers[:cfg['model_params']['freeze_to']]: layer.trainable = False model = tf.keras.Sequential([ backbone, tf.keras.layers.GlobalAveragePooling2D() , tf.keras.layers.BatchNormalization() , tf.keras.layers.Dropout(0.4), tf.keras.layers.Dense(1024, activation='relu', kernel_initializer=tf.keras.initializers.he_normal()), tf.keras.layers.BatchNormalization() , tf.keras.layers.Dropout(0.4), tf.keras.layers.Dense(CLASS_N, bias_initializer=tf.keras.initializers.Constant(-2.))]) return model model = create_model() model.summary()<prepare_x_and_y>
v_train.drop('PassengerId', axis=1, inplace=True) v_test.drop('PassengerId', axis=1, inplace=True) v_merged = pd.concat([v_train, v_test], sort=False ).reset_index(drop=True )
Titanic - Machine Learning from Disaster
9,251,987
@tf.function def _mixup(inp, targ): indice = tf.range(len(inp)) indice = tf.random.shuffle(indice) sinp = tf.gather(inp, indice, axis=0) starg = tf.gather(targ, indice, axis=0) alpha = 0.2 t = tf.compat.v1.distributions.Beta(alpha, alpha ).sample([len(inp)]) tx = tf.reshape(t, [-1, 1, 1, 1]) ty = tf.reshape(t, [-1, 1]) x = inp * tx + sinp *(1-tx) y = targ * ty + starg *(1-ty) return x, y<create_dataframe>
def get_combined_data() : train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') targets = train.Survived combined = train.append(test) combined.reset_index(inplace=True, drop=True) return combined
Titanic - Machine Learning from Disaster
9,251,987
tfrecs = sorted(tf.io.gfile.glob(TRAIN_TFREC + '/*.tfrec')) parsed_trainval =(tf.data.TFRecordDataset(tfrecs, num_parallel_reads=AUTOTUNE) .map(_parse_function, num_parallel_calls=AUTOTUNE ).unbatch() .filter(_filtTP ).enumerate() )<data_type_conversions>
def process_family() : global combined combined['FamilySize'] = combined['Parch'] + combined['SibSp'] + 1 combined['Alone'] = combined['FamilySize'].map(lambda s: 1 if s == 1 else 0) status('Family') return combined
Titanic - Machine Learning from Disaster
9,251,987
indices = [] spid = [] recid = [] for i, sample in tqdm(parsed_trainval.prefetch(AUTOTUNE)) : indices.append(i.numpy()) spid.append(sample['species_id'].numpy()) recid.append(sample['recording_id'].numpy().decode() )<create_dataframe>
def family_survival() : global combined combined['Last_Name'] = combined['Name'].apply( lambda x: str.split(x, ",")[0]) default_survival_rate = 0.5 combined['Family_Survival'] = default_survival_rate for grp, grp_df in combined[[ 'Survived', 'Name', 'Last_Name', 'Fare', 'Ticket', 'PassengerId', 'SibSp', 'Parch', 'Age', 'Cabin' ]].groupby(['Last_Name', 'Fare']): if(len(grp_df)!= 1): for ind, row in grp_df.iterrows() : smax = grp_df.drop(ind)['Survived'].max() smin = grp_df.drop(ind)['Survived'].min() passID = row['PassengerId'] if(smax == 1.0): combined.loc[combined['PassengerId'] == passID, 'Family_Survival'] = 1 elif(smin == 0.0): combined.loc[combined['PassengerId'] == passID, 'Family_Survival'] = 0 for _, grp_df in combined.groupby('Ticket'): if(len(grp_df)!= 1): for ind, row in grp_df.iterrows() : if(row['Family_Survival'] == 0)|( row['Family_Survival'] == 0.5): smax = grp_df.drop(ind)['Survived'].max() smin = grp_df.drop(ind)['Survived'].min() passID = row['PassengerId'] if(smax == 1.0): combined.loc[combined['PassengerId'] == passID, 'Family_Survival'] = 1 elif(smin == 0.0): combined.loc[combined['PassengerId'] == passID, 'Family_Survival'] = 0 status('FamilySurvival') return combined
Titanic - Machine Learning from Disaster
9,251,987
table = pd.DataFrame({'indices': indices, 'species_id': spid, 'recording_id': recid}) table<categorify>
def get_titles() : title_dictionary = { 'Capt': 'Dr/Clergy/Mil', 'Col': 'Dr/Clergy/Mil', 'Major': 'Dr/Clergy/Mil', 'Jonkheer': 'Honorific', 'Don': 'Honorific', 'Dona': 'Honorific', 'Sir': 'Honorific', 'Dr': 'Dr/Clergy/Mil', 'Rev': 'Dr/Clergy/Mil', 'the Countess': 'Honorific', 'Mme': 'Mrs', 'Mlle': 'Miss', 'Ms': 'Mrs', 'Mr': 'Mr', 'Mrs': 'Mrs', 'Miss': 'Miss', 'Master': 'Master', 'Lady': 'Honorific' } combined['Title'] = combined['Name'].map( lambda name: name.split(',')[1].split('.')[0].strip()) combined['Title'] = combined.Title.map(title_dictionary) status('Title') return combined
Titanic - Machine Learning from Disaster
9,251,987
def create_idx_filter(indice): @tf.function def _filt(i, x): return tf.reduce_any(indice == i) return _filt @tf.function def _remove_idx(i, x): return x<categorify>
def process_names() : global combined combined.drop('Name', axis=1, inplace=True) titles_dummies = pd.get_dummies(combined['Title'], prefix='Title') combined = pd.concat([combined, titles_dummies], axis=1) combined.drop('Title', axis=1, inplace=True) status('names') return combined
Titanic - Machine Learning from Disaster
9,251,987
def create_train_dataset(batchsize, train_idx): global parsed_trainval parsed_train =(parsed_trainval .filter(create_idx_filter(train_idx)) .map(_remove_idx)) dataset =(parsed_train.cache() .shuffle(len(train_idx)) .repeat() .map(_cut_wav, num_parallel_calls=AUTOTUNE) .map(_wav_to_spec, num_parallel_calls=AUTOTUNE) .map(_create_annot, num_parallel_calls=AUTOTUNE) .map(_preprocess, num_parallel_calls=AUTOTUNE) .batch(batchsize)) if cfg['model_params']['mixup']: dataset =(dataset.map(_mixup, num_parallel_calls=AUTOTUNE) .prefetch(AUTOTUNE)) else: dataset = dataset.prefetch(AUTOTUNE) return dataset def create_val_dataset(batchsize, val_idx): global parsed_trainval parsed_val =(parsed_trainval .filter(create_idx_filter(val_idx)) .map(_remove_idx)) vdataset =(parsed_val .map(_cut_wav_val, num_parallel_calls=AUTOTUNE) .map(_wav_to_spec, num_parallel_calls=AUTOTUNE) .map(_create_annot, num_parallel_calls=AUTOTUNE) .map(_preprocess_val, num_parallel_calls=AUTOTUNE) .batch(8*strategy.num_replicas_in_sync) .cache()) return vdataset<categorify>
def process_age() : global combined combined['Age'] = combined.groupby( ['Pclass', 'Sex'])['Age'].apply(lambda x: x.fillna(x.median())) status('Age') return combined
Titanic - Machine Learning from Disaster
9,251,987
@tf.function def _one_sample_positive_class_precisions(example): y_true, y_pred = example retrieved_classes = tf.argsort(y_pred, direction='DESCENDING') class_rankings = tf.argsort(retrieved_classes) retrieved_class_true = tf.gather(y_true, retrieved_classes) retrieved_cumulative_hits = tf.math.cumsum(tf.cast(retrieved_class_true, tf.float32)) idx = tf.where(y_true)[:, 0] i = tf.boolean_mask(class_rankings, y_true) r = tf.gather(retrieved_cumulative_hits, i) c = 1 + tf.cast(i, tf.float32) precisions = r / c dense = tf.scatter_nd(idx[:, None], precisions, [y_pred.shape[0]]) return dense class LWLRAP(tf.keras.metrics.Metric): def __init__(self, num_classes, name='lwlrap'): super().__init__(name=name) self._precisions = self.add_weight( name='per_class_cumulative_precision', shape=[num_classes], initializer='zeros', ) self._counts = self.add_weight( name='per_class_cumulative_count', shape=[num_classes], initializer='zeros', ) def update_state(self, y_true, y_pred, sample_weight=None): precisions = tf.map_fn( fn=_one_sample_positive_class_precisions, elems=(y_true, y_pred), dtype=(tf.float32), ) increments = tf.cast(precisions > 0, tf.float32) total_increments = tf.reduce_sum(increments, axis=0) total_precisions = tf.reduce_sum(precisions, axis=0) self._precisions.assign_add(total_precisions) self._counts.assign_add(total_increments) def result(self): per_class_lwlrap = self._precisions / tf.maximum(self._counts, 1.0) per_class_weight = self._counts / tf.reduce_sum(self._counts) overall_lwlrap = tf.reduce_sum(per_class_lwlrap * per_class_weight) return overall_lwlrap def reset_states(self): self._precisions.assign(self._precisions * 0) self._counts.assign(self._counts * 0 )<categorify>
def age_binner() : global combined names = ['less2', '2-18', '18-35', '35-65', '65plus'] combined['AgeBin'] = pd.qcut(combined['Age'],q = 5, labels = names) age_dummies = pd.get_dummies(combined['AgeBin'], prefix='AgeBin') combined = pd.concat([combined, age_dummies], axis=1) combined.drop('AgeBin', inplace=True, axis=1) combined.drop('Age', inplace=True, axis=1) status('Age Bins') return combined
Titanic - Machine Learning from Disaster
9,251,987
def _parse_function_test(example_proto): sample = tf.io.parse_single_example(example_proto, feature_description) wav, _ = tf.audio.decode_wav(sample['audio_wav'], desired_channels=1) @tf.function def _cut_audio(i): _sample = { 'audio_wav': tf.reshape(wav[i*SR*TIME:(i+1)*SR*TIME], [SR*TIME]), 'recording_id': sample['recording_id'] } return _sample return tf.map_fn(_cut_audio, tf.range(60//TIME), dtype={ 'audio_wav': tf.float32, 'recording_id': tf.string }) def inference(model): tdataset =(tf.data.TFRecordDataset(tf.io.gfile.glob(TEST_TFREC + '/*.tfrec'), num_parallel_reads=AUTOTUNE) .map(_parse_function_test, num_parallel_calls=AUTOTUNE ).unbatch() .map(_wav_to_spec, num_parallel_calls=AUTOTUNE) .map(_preprocess_test, num_parallel_calls=AUTOTUNE) .batch(128*(60//TIME)).prefetch(AUTOTUNE)) rec_ids = [] probs = [] for inp, rec_id in tqdm(tdataset): with strategy.scope() : pred = model.predict_on_batch(tf.reshape(inp, [-1, HEIGHT, WIDTH, 3])) prob = tf.sigmoid(pred) prob = tf.reduce_max(tf.reshape(prob, [-1, 60//TIME, CLASS_N]), axis=1) rec_id_stack = tf.reshape(rec_id, [-1, 60//TIME]) for rec in rec_id.numpy() : assert len(np.unique(rec)) == 1 rec_ids.append(rec_id_stack.numpy() [:,0]) probs.append(prob.numpy()) crec_ids = np.concatenate(rec_ids) cprobs = np.concatenate(probs) sub = pd.DataFrame({ 'recording_id': list(map(lambda x: x.decode() , crec_ids.tolist())) , **{f's{i}': cprobs[:,i] for i in range(CLASS_N)} }) sub = sub.sort_values('recording_id') return sub<choose_model_class>
def process_fares() : global combined combined['Fare'] = combined.groupby( ['Pclass', 'Sex'])['Fare'].apply(lambda x: x.fillna(x.median())) status('fare') return combined
Titanic - Machine Learning from Disaster
9,251,987
def train_and_inference(splits, split_id): batchsize = cfg['model_params']['batchsize_per_tpu'] * strategy.num_replicas_in_sync print("batchsize", batchsize) loss_fn = cfg['model_params']['loss']['fn'](from_logits=True, **cfg['model_params']['loss']['params']) optimizer = cfg['model_params']['optim']['fn'](**cfg['model_params']['optim']['params']) model = create_model() with strategy.scope() : model.compile(optimizer=optimizer, loss=loss_fn, metrics=[LWLRAP(CLASS_N)]) model.load_weights('.. /input/rcfx-resnet34-weights/model_best_%d.h5' % split_id) return inference(model )<train_model>
def process_fare_bin(onehot='None'): global combined bins = [-1, 7.91, 14.454, 31, 99, 250, np.inf] names = [0, 1, 2, 3, 4, 5] combined['FareBin'] = pd.cut(combined['Fare'], bins, labels=names ).astype('int') if onehot == 'yes': farebin_dummies = pd.get_dummies(combined['FareBin'], prefix='FareBin') combined = pd.concat([combined, farebin_dummies], axis=1) combined.drop('FareBin', inplace=True, axis=1) combined.drop('Fare', inplace=True, axis=1) elif onehot == 'both': farebin_dummies = pd.get_dummies(combined['FareBin'], prefix='FareBin') combined = pd.concat([combined, farebin_dummies], axis=1) combined.drop('FareBin', inplace=True, axis=1) else: combined.drop('Fare', inplace=True, axis=1) status('FareBin') return combined
Titanic - Machine Learning from Disaster
9,251,987
sub = sum( map( lambda i: train_and_inference(splits, i ).set_index('recording_id'), range(len(splits)) ) ).reset_index()<save_to_csv>
def scale_fare() : global combined combined['Fare'] = boxcox1p(combined['Fare'], boxcox_normmax(combined['Fare'] + 1)) status('NFareBin') return combined
Titanic - Machine Learning from Disaster
9,251,987
sub.to_csv("submission.csv", index=False )<save_to_csv>
def process_embarked() : global combined combined.Embarked.fillna(combined.Embarked.mode() [0], inplace=True) embarked_dummies = pd.get_dummies(combined['Embarked'], prefix='Embarked') combined = pd.concat([combined, embarked_dummies], axis=1) combined.drop('Embarked', axis=1, inplace=True) status('Embarked') return combined
Titanic - Machine Learning from Disaster
9,251,987
FileLink(r'submission.csv' )<import_modules>
def process_cabin() : global combined combined['Cabin_Informed'] = [ 1 if pd.notnull(cab)else 0 for cab in combined['Cabin'] ] combined.Cabin.fillna('M', inplace=True) combined['Deck'] = combined['Cabin'].map(lambda c: c[0]) combined['Deck'].replace('T', 'A', inplace=True) cabin_dummies = pd.get_dummies(combined['Deck'], prefix='Deck') combined = pd.concat([combined, cabin_dummies], axis=1) combined.drop('Cabin', axis=1, inplace=True) combined.drop('Deck', axis=1, inplace=True) status('Cabin') return combined
Titanic - Machine Learning from Disaster
9,251,987
import pandas as pd, numpy as np import os<define_variables>
def process_sex() : global combined combined['Sex'] = combined['Sex'].map({'male': 1, 'female': 0}) status('Sex') return combined
Titanic - Machine Learning from Disaster
9,251,987
paths = [ ".. /input/best-submissions/submission_866.csv", ".. /input/best-submissions/submission_869.csv", ".. /input/bestsubmission/submission_879.csv", ".. /input/best-submission/submission_876.csv", ] weights = np.array([0.038, 0.070, 0.745, 0.147]) sum(weights )<define_variables>
def process_pclass() : global combined pclass_dummies = pd.get_dummies(combined['Pclass'], prefix='Pclass') combined = pd.concat([combined, pclass_dummies], axis=1) combined.drop('Pclass', axis=1, inplace=True) status('Pclass') return combined
Titanic - Machine Learning from Disaster
9,251,987
cols = [f"s{i}" for i in range(24)]<load_from_csv>
def process_ticket() : global combined def cleanTicket(ticket): ticket = ticket.replace('.', '') ticket = ticket.replace('/', '') ticket = ticket.split() ticket = map(lambda t: t.strip() , ticket) ticket = list(filter(lambda t: not t.isdigit() , ticket)) if len(ticket)> 0: return ticket[0] else: return 'Unknown' combined['Ticket'] = combined['Ticket'].map(cleanTicket) tickets_dummies = pd.get_dummies(combined['Ticket'], prefix='Ticket') combined = pd.concat([combined, tickets_dummies], axis=1) combined.drop('Ticket', inplace=True, axis=1) status('Ticket') return combined
Titanic - Machine Learning from Disaster
9,251,987
scores = [] for path in paths: df = pd.read_csv(path ).sort_values("recording_id" ).reset_index(drop=True) score = np.empty(( len(df), 24)) o = df[cols].values.argsort(1) score[np.arange(len(df)) [:, None], o] = np.arange(24)[None] scores.append(score) scores = np.stack(scores) scores.shape<compute_test_metric>
def dropper() : global combined combined.drop('Cabin', axis=1, inplace=True) combined.drop('PassengerId', inplace=True, axis=1) combined.drop('Last_Name', inplace=True, axis=1) combined.drop('Survived', inplace=True, axis=1) combined.drop('Ticket', inplace=True, axis=1) return combined
Titanic - Machine Learning from Disaster
9,251,987
sub_score = np.sum(scores*weights[:, None, None], 0) print(sub_score.shape) sub_score<prepare_output>
combined = get_combined_data() combined = family_survival() combined = process_family() combined = get_titles() combined = process_names() combined = process_age() combined = age_binner() combined = process_fares() combined = process_fare_bin(onehot='no') combined = process_embarked() combined = process_sex() combined = dropper() print( f'Processed everything.Missing values left: {combined.isna().sum().sum() }' )
Titanic - Machine Learning from Disaster
9,251,987
sub = pd.DataFrame(sub_score, columns=cols) sub["recording_id"] = df["recording_id"] sub = sub[["recording_id"] + cols] print(sub.shape) sub.head()<save_to_csv>
v_merged = combined.copy() v_merged['Survived'] = v_train['Survived'] v_merged.head()
Titanic - Machine Learning from Disaster
9,251,987
sub.to_csv("submission.csv", index=False )<create_dataframe>
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier from sklearn.model_selection import StratifiedKFold, cross_val_score, learning_curve, cross_validate, train_test_split, KFold from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, roc_auc_score, roc_curve, plot_roc_curve, auc from sklearn.svm import SVC from sklearn.neighbors import KNeighborsClassifier from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.naive_bayes import GaussianNB from sklearn.neural_network import MLPClassifier from sklearn.decomposition import PCA from sklearn.pipeline import Pipeline, make_pipeline from sklearn.preprocessing import StandardScaler from sklearn.feature_selection import RFE import xgboost as xgb import lightgbm as lgb from mlxtend.plotting import plot_decision_regions
Titanic - Machine Learning from Disaster
9,251,987
%matplotlib inline ked = pd.DataFrame({ 'Kernel ID': ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I'], 'Symbol': ['SoliSet', '[Inference] ResNest RFCX Audio Detection', 'notebookba481ef16a', 'All-in-one RFCX baseline for beginners', 'RFCX: train resnet50 with TPU', 'RFCX Resnet50 TPU', 'ResNet34 More Augmentations+Mixup+TTA(Inference)', '[Inference][TPU] RFCX Audio Detection Fast++', 'RFCX Bagging'], 'Score': [ 0.589 , 0.594 , 0.613 , 0.748 , 0.793 , 0.824 , 0.845 , 0.861 , 0.871 ], 'File Path': ['.. /input/audio-detection-soliset-201/submission.csv', '.. /input/inference-resnest-rfcx-audio-detection/submission.csv', '.. /input/minimal-fastai-solution-score-0-61/submission.csv', '.. /input/all-in-one-rfcx-baseline-for-beginners/submission.csv', '.. /input/rfcx-train-resnet50-with-tpu/submission.csv', '.. /input/rfcx-resnet50-tpu/submission.csv', '.. /input/resnet34-more-augmentations-mixup-tta-inference/submission.csv', '.. /input/inference-tpu-rfcx-audio-detection-fast/submission.csv', '.. /input/rfcx-bagging-with-different-weights-0871-score/submission.csv'], 'Note' : ['xgboost & cuml(https://rapids.ai)', 'torch & resnest50', 'fastai.vision & torchaudio', 'torch & resnest50', 'tensorflow & tf.keras.Sequential', 'tensorflow & tf.keras.Sequential', 'tensorflow & classification_models.keras', 'torch & resnest50', 'To sort the scores and use their ranks.'] }) ked<load_from_csv>
def recover_train_test_target() : global combined y = pd.read_csv('/kaggle/input/titanic/train.csv', usecols=['Survived'])['Survived'] X = combined.iloc[:idx] X_test = combined.iloc[idx:] return X, X_test, y X, X_test, y = recover_train_test_target()
Titanic - Machine Learning from Disaster
9,251,987
class Kernel() : def __init__(self, symbol, score, file_path): self.symbol = symbol self.score = score self.file_path = file_path self.sub = pd.read_csv(self.file_path) def __str__(self): return f'Kernel: {self.symbol}\t| Score: {self.score}' def __repr__(self): return f'Class: {self.__class__.__name__} Name: {repr(self.symbol)}\t| Score: {self.score}' def print_head(self): print(self) print(f' Head: ') print(self.sub.head()) def print_description(self): print(self) print(f' Description: ') print(self.sub.describe()) def generation(self, other, coeff): g1 = self.sub.copy() g2 = self.sub.copy() g3 = self.sub.copy() g4 = self.sub.copy() if isinstance(other, Kernel): for i in self.sub.columns[1:]: lm, Is = [], [] lm = self.sub[i].tolist() ls = other.sub[i].tolist() res1, res2, res3, res4 = [], [], [], [] for j in range(len(self.sub)) : res1.append(max(lm[j] , ls[j])) res2.append(min(lm[j] , ls[j])) res3.append(( lm[j] + ls[j])/ 2) res4.append(( lm[j] * coeff)+(ls[j] *(1.- coeff))) g1[i] = res1 g2[i] = res2 g3[i] = res3 g4[i] = res4 return g1,g2,g3,g4 for i in range(9): ked.iloc[i, 0] = Kernel(ked.iloc[i, 1], ked.iloc[i, 2], ked.iloc[i, 3]) <categorify>
cv = StratifiedKFold(10, shuffle=True, random_state=42) rf = RandomForestClassifier(criterion='gini', n_estimators=1750, max_depth=7, min_samples_split=6, min_samples_leaf=6, max_features='auto', oob_score=True, random_state=42, n_jobs=-1, verbose=0) lg = lgb.LGBMClassifier(max_bin=4, num_iterations=550, learning_rate=0.0114, max_depth=3, num_leaves=7, colsample_bytree=0.35, random_state=42, n_jobs=-1) xg = xgb.XGBClassifier( n_estimators=2800, min_child_weight=0.1, learning_rate=0.002, max_depth=2, subsample=0.47, colsample_bytree=0.35, gamma=0.4, reg_lambda=0.4, random_state=42, n_jobs=-1, ) sv = SVC(probability=True) logreg = LogisticRegression(n_jobs=-1, solver='newton-cg') gb = GradientBoostingClassifier(random_state=42) gnb = GaussianNB() mlp = MLPClassifier(random_state=42 )
Titanic - Machine Learning from Disaster
9,251,987
def generate(main, support, coeff): g1 = main.copy() g2 = main.copy() g3 = main.copy() g4 = main.copy() for i in main.columns[1:]: lm, Is = [], [] lm = main[i].tolist() ls = support[i].tolist() res1, res2, res3, res4 = [], [], [], [] for j in range(len(main)) : res1.append(max(lm[j] , ls[j])) res2.append(min(lm[j] , ls[j])) res3.append(( lm[j] + ls[j])/ 2) res4.append(( lm[j] * coeff)+(ls[j] *(1.- coeff))) g1[i] = res1 g2[i] = res2 g3[i] = res3 g4[i] = res4 return g1,g2,g3,g4 <categorify>
estimators = [rf, lg, xg, gb, sv, logreg, gnb, mlp]
Titanic - Machine Learning from Disaster
9,251,987
g1,g2,g3,g4 = generate(ked.iloc[6, 0].sub, ked.iloc[5, 0].sub, 0.8) <categorify>
def model_check(X, y, estimators, cv): model_table = pd.DataFrame() row_index = 0 for est in estimators: MLA_name = est.__class__.__name__ model_table.loc[row_index, 'Model Name'] = MLA_name cv_results = cross_validate( est, X, y, cv=cv, scoring='accuracy', return_train_score=True, n_jobs=-1 ) model_table.loc[row_index, 'Train Accuracy Mean'] = cv_results[ 'train_score'].mean() model_table.loc[row_index, 'Test Accuracy Mean'] = cv_results[ 'test_score'].mean() model_table.loc[row_index, 'Test Std'] = cv_results['test_score'].std() model_table.loc[row_index, 'Time'] = cv_results['fit_time'].mean() row_index += 1 model_table.sort_values(by=['Test Accuracy Mean'], ascending=False, inplace=True) return model_table
Titanic - Machine Learning from Disaster
9,251,987
f1,f2,f3,f4 = generate(ked.iloc[7, 0].sub, g2, 0.8) <categorify>
raw_models = model_check(X, y, estimators, cv) display(raw_models.style.background_gradient(cmap='summer_r'))
Titanic - Machine Learning from Disaster
9,251,987
e1,e2,e3,e4 = generate(ked.iloc[8, 0].sub, f2, 0.7) <categorify>
def m_roc(estimators, cv, X, y): fig, axes = plt.subplots(math.ceil(len(estimators)/ 2), 2, figsize=(25, 50)) axes = axes.flatten() for ax, estimator in zip(axes, estimators): tprs = [] aucs = [] mean_fpr = np.linspace(0, 1, 100) for i,(train, test)in enumerate(cv.split(X, y)) : estimator.fit(X.loc[train], y.loc[train]) viz = plot_roc_curve(estimator, X.loc[test], y.loc[test], name='ROC fold {}'.format(i), alpha=0.3, lw=1, ax=ax) interp_tpr = interp(mean_fpr, viz.fpr, viz.tpr) interp_tpr[0] = 0.0 tprs.append(interp_tpr) aucs.append(viz.roc_auc) ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', label='Chance', alpha=.8) mean_tpr = np.mean(tprs, axis=0) mean_tpr[-1] = 1.0 mean_auc = auc(mean_fpr, mean_tpr) std_auc = np.std(aucs) ax.plot(mean_fpr, mean_tpr, color='b', label=r'Mean ROC(AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc), lw=2, alpha=.8) std_tpr = np.std(tprs, axis=0) tprs_upper = np.minimum(mean_tpr + std_tpr, 1) tprs_lower = np.maximum(mean_tpr - std_tpr, 0) ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2, label=r'$\pm$ 1 std.dev.') ax.set(xlim=[-0.02, 1.02], ylim=[-0.02, 1.02], title=f'{estimator.__class__.__name__} ROC') ax.legend(loc='lower right', prop={'size': 18}) plt.show()
Titanic - Machine Learning from Disaster
9,251,987
d1,d2,d3,d4 = generate(ked.iloc[8, 0].sub, f2, 0.45) <find_best_params>
m_roc(estimators, cv, X, y )
Titanic - Machine Learning from Disaster
9,251,987
c1,c2,c3,c4 = generate(ked.iloc[8, 0].sub, f2, 0.475 )<find_best_params>
f_imp(estimators, X, y, 14 )
Titanic - Machine Learning from Disaster
9,251,987
sub = c4<save_to_csv>
rf.fit(X, y) estimator = rf.estimators_[0] export_graphviz(estimator, out_file='tree.dot', feature_names = X.columns, class_names = ['Not Survived','Survived'], rounded = True, proportion = False, precision = 2, filled = True) call(['dot', '-Tpng', 'tree.dot', '-o', 'tree.png', '-Gdpi=600']) plt.figure(figsize =(40, 20)) plt.imshow(plt.imread('tree.png')) plt.axis('off'); plt.show() ;
Titanic - Machine Learning from Disaster
9,251,987
sub.to_csv("submission.csv", index=False) c1.to_csv("submission1.csv", index=False) c2.to_csv("submission2.csv", index=False) c3.to_csv("submission3.csv", index=False) c4.to_csv("submission4.csv", index=False) !ls<import_modules>
def f_selector(X, y, est, features): X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.4, random_state=42) rfe = RFE(estimator=est, n_features_to_select=features, verbose=1) rfe.fit(X_train, y_train) print(dict(zip(X.columns, rfe.ranking_))) print(X.columns[rfe.support_]) acc = accuracy_score(y_valid, rfe.predict(X_valid)) print("{0:.1%} accuracy on test set.".format(acc)) X_red = X[X_train.columns[rfe.support_].to_list() ] X_te_red = X_test[X_train.columns[rfe.support_].to_list() ] return X_red, X_te_red
Titanic - Machine Learning from Disaster
9,251,987
from IPython.display import Image<train_model>
X_sel, X_test_sel = f_selector(X, y, rf, 11 )
Titanic - Machine Learning from Disaster
9,251,987
Image(".. /input/rcfx-training-logs/history_0.png" )<train_model>
pipe = Pipeline([ ('scaler', StandardScaler()), ('reducer', PCA(n_components=2)) , ]) X_sel_red = pipe.fit_transform(X_sel) X_test_sel_red = pipe.transform(X_test_sel )
Titanic - Machine Learning from Disaster
9,251,987
Image(".. /input/rcfx-training-logs/history_1.png" )<train_model>
def prob_reg(X, y): figure = plt.figure(figsize=(20, 40)) h =.02 i = 1 X_train, X_test, y_train, y_test = \ train_test_split(X, y, test_size=.4, random_state=42) x_min, x_max = X_sel_red[:, 0].min() -.5, X_sel_red[:, 0].max() +.5 y_min, y_max = X_sel_red[:, 1].min() -.5, X_sel_red[:, 1].max() +.5 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) cm = plt.cm.RdYlGn cm_bright = ListedColormap([' ax = plt.subplot(5, 2, i) for clf in estimators: ax = plt.subplot(math.ceil(len(estimators)/ 2), 2, i) clf.fit(X_train, y_train) score = clf.score(X_test, y_test) if hasattr(clf, "decision_function"): Z = clf.decision_function(np.c_[xx.ravel() , yy.ravel() ]) else: Z = clf.predict_proba(np.c_[xx.ravel() , yy.ravel() ])[:, 1] Z = Z.reshape(xx.shape) ax.contourf(xx, yy, Z, cmap=cm, alpha=.8) g = ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors='k') ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, edgecolors='k', alpha=0.6) ax.set_xlim(xx.min() , xx.max()) ax.set_ylim(yy.min() , yy.max()) ax.set_title(clf.__class__.__name__) ax.set_xlabel('PCA 1') ax.set_ylabel('PCA 2') plt.legend(handles=g.legend_elements() [0], labels=['Not Survived', 'Survived'], framealpha=0.3, scatterpoints=1) i += 1 plt.tight_layout() plt.show()
Titanic - Machine Learning from Disaster
9,251,987
Image(".. /input/rcfx-training-logs/history_2.png" )<train_model>
dec_regs(X_sel_red, y, estimators )
Titanic - Machine Learning from Disaster
9,251,987
Image(".. /input/rcfx-training-logs/history_3.png" )<train_model>
prob_reg(X_sel_red, y )
Titanic - Machine Learning from Disaster
9,251,987
Image(".. /input/rcfx-training-logs/history_4.png" )<install_modules>
pca_models = model_check(X_sel_red, y, estimators, cv) display(pca_models.style.background_gradient(cmap='summer_r'))
Titanic - Machine Learning from Disaster
9,251,987
!pip install image-classifiers<import_modules>
rand_model_full_data = rf.fit(X, y) print(accuracy_score(y, rand_model_full_data.predict(X))) y_pred = rand_model_full_data.predict(X_test )
Titanic - Machine Learning from Disaster
9,251,987
tf.__version__<define_variables>
test_df = pd.read_csv('/kaggle/input/titanic/test.csv') submission_df = pd.DataFrame(columns=['PassengerId', 'Survived']) submission_df['PassengerId'] = test_df['PassengerId'] submission_df['Survived'] = y_pred submission_df.to_csv('submission.csv', header=True, index=False) submission_df.head(10 )
Titanic - Machine Learning from Disaster
9,251,987
SEED = 42 def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) tf.random.set_seed(seed) seed_everything(SEED )<choose_model_class>
warnings.filterwarnings('ignore') pd.options.display.max_columns = 50 plt.style.use('ggplot')
Titanic - Machine Learning from Disaster
9,251,987
ResNet34, preprocess_input = Classifiers.get('resnet34' )<init_hyperparams>
v_train = pd.read_csv('/kaggle/input/titanic/train.csv') v_test = pd.read_csv('/kaggle/input/titanic/test.csv') idx = len(v_train )
Titanic - Machine Learning from Disaster
9,251,987
cfg = { 'parse_params': { 'cut_time': 10, }, 'data_params': { 'sample_time': 6, 'spec_fmax': 24000.0, 'spec_fmin': 40.0, 'spec_mel': 384, 'mel_power': 2, 'img_shape':(384, 768) }, 'model_params': { 'batchsize_per_tpu': 16, 'iteration_per_epoch': 64, 'epoch': 18, 'arch': ResNet34, 'arch_preprocess': preprocess_input, 'freeze_to': 0, 'loss': { 'fn': tfa.losses.SigmoidFocalCrossEntropy, 'params': {}, }, 'optim': { 'fn': tfa.optimizers.RectifiedAdam, 'params': {'lr': 2e-3, 'total_steps': 18*64, 'warmup_proportion': 0.3, 'min_lr': 1e-6}, }, 'mixup': True } }<define_variables>
v_train.drop('PassengerId', axis=1, inplace=True) v_test.drop('PassengerId', axis=1, inplace=True) v_merged = pd.concat([v_train, v_test], sort=False ).reset_index(drop=True )
Titanic - Machine Learning from Disaster
9,251,987
strategy = tf.distribute.experimental.TPUStrategy(tpu) AUTOTUNE = tf.data.experimental.AUTOTUNE GCS_DS_PATH = KaggleDatasets().get_gcs_path('rfcx-species-audio-detection') TRAIN_TFREC = GCS_DS_PATH + "/tfrecords/train" TEST_TFREC = GCS_DS_PATH + "/tfrecords/test"<define_variables>
def get_combined_data() : train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') targets = train.Survived combined = train.append(test) combined.reset_index(inplace=True, drop=True) return combined
Titanic - Machine Learning from Disaster
9,251,987
CUT = cfg['parse_params']['cut_time'] SR = 48000 TIME = cfg['data_params']['sample_time'] FMAX = cfg['data_params']['spec_fmax'] FMIN = cfg['data_params']['spec_fmin'] N_MEL = cfg['data_params']['spec_mel'] HEIGHT, WIDTH = cfg['data_params']['img_shape'] CLASS_N = 24<create_dataframe>
def process_family() : global combined combined['FamilySize'] = combined['Parch'] + combined['SibSp'] + 1 combined['Alone'] = combined['FamilySize'].map(lambda s: 1 if s == 1 else 0) status('Family') return combined
Titanic - Machine Learning from Disaster
9,251,987
raw_dataset = tf.data.TFRecordDataset([TRAIN_TFREC + '/00-148.tfrec']) raw_dataset<categorify>
def family_survival() : global combined combined['Last_Name'] = combined['Name'].apply( lambda x: str.split(x, ",")[0]) default_survival_rate = 0.5 combined['Family_Survival'] = default_survival_rate for grp, grp_df in combined[[ 'Survived', 'Name', 'Last_Name', 'Fare', 'Ticket', 'PassengerId', 'SibSp', 'Parch', 'Age', 'Cabin' ]].groupby(['Last_Name', 'Fare']): if(len(grp_df)!= 1): for ind, row in grp_df.iterrows() : smax = grp_df.drop(ind)['Survived'].max() smin = grp_df.drop(ind)['Survived'].min() passID = row['PassengerId'] if(smax == 1.0): combined.loc[combined['PassengerId'] == passID, 'Family_Survival'] = 1 elif(smin == 0.0): combined.loc[combined['PassengerId'] == passID, 'Family_Survival'] = 0 for _, grp_df in combined.groupby('Ticket'): if(len(grp_df)!= 1): for ind, row in grp_df.iterrows() : if(row['Family_Survival'] == 0)|( row['Family_Survival'] == 0.5): smax = grp_df.drop(ind)['Survived'].max() smin = grp_df.drop(ind)['Survived'].min() passID = row['PassengerId'] if(smax == 1.0): combined.loc[combined['PassengerId'] == passID, 'Family_Survival'] = 1 elif(smin == 0.0): combined.loc[combined['PassengerId'] == passID, 'Family_Survival'] = 0 status('FamilySurvival') return combined
Titanic - Machine Learning from Disaster
9,251,987
feature_description = { 'recording_id': tf.io.FixedLenFeature([], tf.string, default_value=''), 'audio_wav': tf.io.FixedLenFeature([], tf.string, default_value=''), 'label_info': tf.io.FixedLenFeature([], tf.string, default_value=''), } parse_dtype = { 'audio_wav': tf.float32, 'recording_id': tf.string, 'species_id': tf.int32, 'songtype_id': tf.int32, 't_min': tf.float32, 'f_min': tf.float32, 't_max': tf.float32, 'f_max':tf.float32, 'is_tp': tf.int32 } @tf.function def _parse_function(example_proto): sample = tf.io.parse_single_example(example_proto, feature_description) wav, _ = tf.audio.decode_wav(sample['audio_wav'], desired_channels=1) label_info = tf.strings.split(sample['label_info'], sep='"')[1] labels = tf.strings.split(label_info, sep=';') @tf.function def _cut_audio(label): items = tf.strings.split(label, sep=',') spid = tf.squeeze(tf.strings.to_number(items[0], tf.int32)) soid = tf.squeeze(tf.strings.to_number(items[1], tf.int32)) tmin = tf.squeeze(tf.strings.to_number(items[2])) fmin = tf.squeeze(tf.strings.to_number(items[3])) tmax = tf.squeeze(tf.strings.to_number(items[4])) fmax = tf.squeeze(tf.strings.to_number(items[5])) tp = tf.squeeze(tf.strings.to_number(items[6], tf.int32)) tmax_s = tmax * tf.cast(SR, tf.float32) tmin_s = tmin * tf.cast(SR, tf.float32) cut_s = tf.cast(CUT * SR, tf.float32) all_s = tf.cast(60 * SR, tf.float32) tsize_s = tmax_s - tmin_s cut_min = tf.cast( tf.maximum(0.0, tf.minimum(tmin_s -(cut_s - tsize_s)/ 2, tf.minimum(tmax_s +(cut_s - tsize_s)/ 2, all_s)- cut_s) ), tf.int32 ) cut_max = cut_min + CUT * SR _sample = { 'audio_wav': tf.reshape(wav[cut_min:cut_max], [CUT*SR]), 'recording_id': sample['recording_id'], 'species_id': spid, 'songtype_id': soid, 't_min': tmin - tf.cast(cut_min, tf.float32)/tf.cast(SR, tf.float32), 'f_min': fmin, 't_max': tmax - tf.cast(cut_min, tf.float32)/tf.cast(SR, tf.float32), 'f_max': fmax, 'is_tp': tp } return _sample samples = tf.map_fn(_cut_audio, labels, dtype=parse_dtype) return samples parsed_dataset = raw_dataset.map(_parse_function ).unbatch()<feature_engineering>
def get_titles() : title_dictionary = { 'Capt': 'Dr/Clergy/Mil', 'Col': 'Dr/Clergy/Mil', 'Major': 'Dr/Clergy/Mil', 'Jonkheer': 'Honorific', 'Don': 'Honorific', 'Dona': 'Honorific', 'Sir': 'Honorific', 'Dr': 'Dr/Clergy/Mil', 'Rev': 'Dr/Clergy/Mil', 'the Countess': 'Honorific', 'Mme': 'Mrs', 'Mlle': 'Miss', 'Ms': 'Mrs', 'Mr': 'Mr', 'Mrs': 'Mrs', 'Miss': 'Miss', 'Master': 'Master', 'Lady': 'Honorific' } combined['Title'] = combined['Name'].map( lambda name: name.split(',')[1].split('.')[0].strip()) combined['Title'] = combined.Title.map(title_dictionary) status('Title') return combined
Titanic - Machine Learning from Disaster
9,251,987
@tf.function def _cut_wav(x): cut_min = tf.random.uniform([], maxval=(CUT-TIME)*SR, dtype=tf.int32) cut_max = cut_min + TIME * SR cutwave = tf.reshape(x['audio_wav'][cut_min:cut_max], [TIME*SR]) y = {} y.update(x) y['audio_wav'] = cutwave y['t_min'] = tf.maximum(0.0, x['t_min'] - tf.cast(cut_min, tf.float32)/ SR) y['t_max'] = tf.maximum(0.0, x['t_max'] - tf.cast(cut_min, tf.float32)/ SR) return y @tf.function def _cut_wav_val(x): cut_min =(CUT-TIME)*SR // 2 cut_max = cut_min + TIME * SR cutwave = tf.reshape(x['audio_wav'][cut_min:cut_max], [TIME*SR]) y = {} y.update(x) y['audio_wav'] = cutwave y['t_min'] = tf.maximum(0.0, x['t_min'] - tf.cast(cut_min, tf.float32)/ SR) y['t_max'] = tf.maximum(0.0, x['t_max'] - tf.cast(cut_min, tf.float32)/ SR) return y<feature_engineering>
def process_names() : global combined combined.drop('Name', axis=1, inplace=True) titles_dummies = pd.get_dummies(combined['Title'], prefix='Title') combined = pd.concat([combined, titles_dummies], axis=1) combined.drop('Title', axis=1, inplace=True) status('names') return combined
Titanic - Machine Learning from Disaster
9,251,987
@tf.function def _filtTP(x): return x['is_tp'] == 1<data_type_conversions>
def process_age() : global combined combined['Age'] = combined.groupby( ['Pclass', 'Sex'])['Age'].apply(lambda x: x.fillna(x.median())) status('Age') return combined
Titanic - Machine Learning from Disaster
9,251,987
def show_wav(sample, ax): wav = sample["audio_wav"].numpy() rate = SR ax.plot(np.arange(len(wav)) / rate, wav) ax.set_title( sample["recording_id"].numpy().decode() +("/%d" % sample["species_id"]) +("TP" if sample["is_tp"] else "FP")) return Audio(( wav * 2**15 ).astype(np.int16), rate=rate) fig, ax = plt.subplots(figsize=(15, 3)) show_wav(next(iter(parsed_dataset)) , ax )<normalization>
def age_binner() : global combined names = ['less2', '2-18', '18-35', '35-65', '65plus'] combined['AgeBin'] = pd.qcut(combined['Age'],q = 5, labels = names) age_dummies = pd.get_dummies(combined['AgeBin'], prefix='AgeBin') combined = pd.concat([combined, age_dummies], axis=1) combined.drop('AgeBin', inplace=True, axis=1) combined.drop('Age', inplace=True, axis=1) status('Age Bins') return combined
Titanic - Machine Learning from Disaster
9,251,987
@tf.function def _wav_to_spec(x): mel_power = cfg['data_params']['mel_power'] stfts = tf.signal.stft(x["audio_wav"], frame_length=2048, frame_step=512, fft_length=2048) spectrograms = tf.abs(stfts)** mel_power num_spectrogram_bins = stfts.shape[-1] lower_edge_hertz, upper_edge_hertz, num_mel_bins = FMIN, FMAX, N_MEL linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix( num_mel_bins, num_spectrogram_bins, SR, lower_edge_hertz, upper_edge_hertz) mel_spectrograms = tf.tensordot( spectrograms, linear_to_mel_weight_matrix, 1) mel_spectrograms.set_shape(spectrograms.shape[:-1].concatenate( linear_to_mel_weight_matrix.shape[-1:])) log_mel_spectrograms = tf.math.log(mel_spectrograms + 1e-6) y = { 'audio_spec': tf.transpose(log_mel_spectrograms), } y.update(x) return y spec_dataset = parsed_dataset.filter(_filtTP ).map(_cut_wav ).map(_wav_to_spec )<categorify>
def process_fares() : global combined combined['Fare'] = combined.groupby( ['Pclass', 'Sex'])['Fare'].apply(lambda x: x.fillna(x.median())) status('fare') return combined
Titanic - Machine Learning from Disaster
9,251,987
@tf.function def _create_annot(x): targ = tf.one_hot(x["species_id"], CLASS_N, on_value=x["is_tp"], off_value=0) return { 'input': x["audio_spec"], 'target': tf.cast(targ, tf.float32) } annot_dataset = spec_dataset.map(_create_annot )<normalization>
def process_fare_bin(onehot='None'): global combined bins = [-1, 7.91, 14.454, 31, 99, 250, np.inf] names = [0, 1, 2, 3, 4, 5] combined['FareBin'] = pd.cut(combined['Fare'], bins, labels=names ).astype('int') if onehot == 'yes': farebin_dummies = pd.get_dummies(combined['FareBin'], prefix='FareBin') combined = pd.concat([combined, farebin_dummies], axis=1) combined.drop('FareBin', inplace=True, axis=1) combined.drop('Fare', inplace=True, axis=1) elif onehot == 'both': farebin_dummies = pd.get_dummies(combined['FareBin'], prefix='FareBin') combined = pd.concat([combined, farebin_dummies], axis=1) combined.drop('FareBin', inplace=True, axis=1) else: combined.drop('Fare', inplace=True, axis=1) status('FareBin') return combined
Titanic - Machine Learning from Disaster
9,251,987
@tf.function def _preprocess_img(x, training=False, test=False): image = tf.expand_dims(x, axis=-1) image = tf.image.resize(image, [HEIGHT, WIDTH]) image = tf.image.per_image_standardization(image) @tf.function def _specaugment(image): ERASE_TIME = 50 ERASE_MEL = 16 image = tf.expand_dims(image, axis=0) xoff = tf.random.uniform([2], minval=ERASE_TIME//2, maxval=WIDTH-ERASE_TIME//2, dtype=tf.int32) xsize = tf.random.uniform([2], minval=ERASE_TIME//2, maxval=ERASE_TIME, dtype=tf.int32) yoff = tf.random.uniform([2], minval=ERASE_MEL//2, maxval=HEIGHT-ERASE_MEL//2, dtype=tf.int32) ysize = tf.random.uniform([2], minval=ERASE_MEL//2, maxval=ERASE_MEL, dtype=tf.int32) image = tfa.image.cutout(image, [HEIGHT, xsize[0]], offset=[HEIGHT//2, xoff[0]]) image = tfa.image.cutout(image, [HEIGHT, xsize[1]], offset=[HEIGHT//2, xoff[1]]) image = tfa.image.cutout(image, [ysize[0], WIDTH], offset=[yoff[0], WIDTH//2]) image = tfa.image.cutout(image, [ysize[1], WIDTH], offset=[yoff[1], WIDTH//2]) image = tf.squeeze(image, axis=0) return image if training: gau = tf.keras.layers.GaussianNoise(0.3) image = tf.cond(tf.random.uniform([])< 0.5, lambda: gau(image, training=True), lambda: image) image = tf.image.random_brightness(image, 0.2) image = tf.image.random_flip_left_right(image) image = tf.cond(tf.random.uniform([])< 0.5, lambda: _specaugment(image), lambda: image) if test: pass image =(image - tf.reduce_min(image)) /(tf.reduce_max(image)- tf.reduce_min(image)) * 255.0 image = tf.image.grayscale_to_rgb(image) image = cfg['model_params']['arch_preprocess'](image) return image @tf.function def _preprocess(x): image = _preprocess_img(x['input'], training=True, test=False) return(image, x["target"]) @tf.function def _preprocess_val(x): image = _preprocess_img(x['input'], training=False, test=False) return(image, x["target"]) @tf.function def _preprocess_test(x): image = _preprocess_img(x['audio_spec'], training=False, test=True) return(image, x["recording_id"] )<choose_model_class>
def scale_fare() : global combined combined['Fare'] = boxcox1p(combined['Fare'], boxcox_normmax(combined['Fare'] + 1)) status('NFareBin') return combined
Titanic - Machine Learning from Disaster
9,251,987
def create_model() : with strategy.scope() : backbone = cfg['model_params']['arch'](( 224, 224, 3), include_top=False, weights='imagenet') if cfg['model_params']['freeze_to'] is None: for layer in backbone.layers: layer.trainable = False else: for layer in backbone.layers[:cfg['model_params']['freeze_to']]: layer.trainable = False model = tf.keras.Sequential([ backbone, tf.keras.layers.GlobalAveragePooling2D() , tf.keras.layers.BatchNormalization() , tf.keras.layers.Dropout(0.4), tf.keras.layers.Dense(1024, activation='relu', kernel_initializer=tf.keras.initializers.he_normal()), tf.keras.layers.BatchNormalization() , tf.keras.layers.Dropout(0.4), tf.keras.layers.Dense(CLASS_N, bias_initializer=tf.keras.initializers.Constant(-2.))]) return model model = create_model() model.summary()<prepare_x_and_y>
def process_embarked() : global combined combined.Embarked.fillna(combined.Embarked.mode() [0], inplace=True) embarked_dummies = pd.get_dummies(combined['Embarked'], prefix='Embarked') combined = pd.concat([combined, embarked_dummies], axis=1) combined.drop('Embarked', axis=1, inplace=True) status('Embarked') return combined
Titanic - Machine Learning from Disaster
9,251,987
@tf.function def _mixup(inp, targ): indice = tf.range(len(inp)) indice = tf.random.shuffle(indice) sinp = tf.gather(inp, indice, axis=0) starg = tf.gather(targ, indice, axis=0) alpha = 0.2 t = tf.compat.v1.distributions.Beta(alpha, alpha ).sample([len(inp)]) tx = tf.reshape(t, [-1, 1, 1, 1]) ty = tf.reshape(t, [-1, 1]) x = inp * tx + sinp *(1-tx) y = targ * ty + starg *(1-ty) return x, y<create_dataframe>
def process_cabin() : global combined combined['Cabin_Informed'] = [ 1 if pd.notnull(cab)else 0 for cab in combined['Cabin'] ] combined.Cabin.fillna('M', inplace=True) combined['Deck'] = combined['Cabin'].map(lambda c: c[0]) combined['Deck'].replace('T', 'A', inplace=True) cabin_dummies = pd.get_dummies(combined['Deck'], prefix='Deck') combined = pd.concat([combined, cabin_dummies], axis=1) combined.drop('Cabin', axis=1, inplace=True) combined.drop('Deck', axis=1, inplace=True) status('Cabin') return combined
Titanic - Machine Learning from Disaster
9,251,987
tfrecs = sorted(tf.io.gfile.glob(TRAIN_TFREC + '/*.tfrec')) parsed_trainval =(tf.data.TFRecordDataset(tfrecs, num_parallel_reads=AUTOTUNE) .map(_parse_function, num_parallel_calls=AUTOTUNE ).unbatch() .filter(_filtTP ).enumerate() )<data_type_conversions>
def process_sex() : global combined combined['Sex'] = combined['Sex'].map({'male': 1, 'female': 0}) status('Sex') return combined
Titanic - Machine Learning from Disaster
9,251,987
indices = [] spid = [] recid = [] for i, sample in tqdm(parsed_trainval.prefetch(AUTOTUNE)) : indices.append(i.numpy()) spid.append(sample['species_id'].numpy()) recid.append(sample['recording_id'].numpy().decode() )<create_dataframe>
def process_pclass() : global combined pclass_dummies = pd.get_dummies(combined['Pclass'], prefix='Pclass') combined = pd.concat([combined, pclass_dummies], axis=1) combined.drop('Pclass', axis=1, inplace=True) status('Pclass') return combined
Titanic - Machine Learning from Disaster
9,251,987
table = pd.DataFrame({'indices': indices, 'species_id': spid, 'recording_id': recid}) table<categorify>
def process_ticket() : global combined def cleanTicket(ticket): ticket = ticket.replace('.', '') ticket = ticket.replace('/', '') ticket = ticket.split() ticket = map(lambda t: t.strip() , ticket) ticket = list(filter(lambda t: not t.isdigit() , ticket)) if len(ticket)> 0: return ticket[0] else: return 'Unknown' combined['Ticket'] = combined['Ticket'].map(cleanTicket) tickets_dummies = pd.get_dummies(combined['Ticket'], prefix='Ticket') combined = pd.concat([combined, tickets_dummies], axis=1) combined.drop('Ticket', inplace=True, axis=1) status('Ticket') return combined
Titanic - Machine Learning from Disaster
9,251,987
def create_idx_filter(indice): @tf.function def _filt(i, x): return tf.reduce_any(indice == i) return _filt @tf.function def _remove_idx(i, x): return x<categorify>
def dropper() : global combined combined.drop('Cabin', axis=1, inplace=True) combined.drop('PassengerId', inplace=True, axis=1) combined.drop('Last_Name', inplace=True, axis=1) combined.drop('Survived', inplace=True, axis=1) combined.drop('Ticket', inplace=True, axis=1) return combined
Titanic - Machine Learning from Disaster
9,251,987
def create_train_dataset(batchsize, train_idx): global parsed_trainval parsed_train =(parsed_trainval .filter(create_idx_filter(train_idx)) .map(_remove_idx)) dataset =(parsed_train.cache() .shuffle(len(train_idx)) .repeat() .map(_cut_wav, num_parallel_calls=AUTOTUNE) .map(_wav_to_spec, num_parallel_calls=AUTOTUNE) .map(_create_annot, num_parallel_calls=AUTOTUNE) .map(_preprocess, num_parallel_calls=AUTOTUNE) .batch(batchsize)) if cfg['model_params']['mixup']: dataset =(dataset.map(_mixup, num_parallel_calls=AUTOTUNE) .prefetch(AUTOTUNE)) else: dataset = dataset.prefetch(AUTOTUNE) return dataset def create_val_dataset(batchsize, val_idx): global parsed_trainval parsed_val =(parsed_trainval .filter(create_idx_filter(val_idx)) .map(_remove_idx)) vdataset =(parsed_val .map(_cut_wav_val, num_parallel_calls=AUTOTUNE) .map(_wav_to_spec, num_parallel_calls=AUTOTUNE) .map(_create_annot, num_parallel_calls=AUTOTUNE) .map(_preprocess_val, num_parallel_calls=AUTOTUNE) .batch(8*strategy.num_replicas_in_sync) .cache()) return vdataset<categorify>
combined = get_combined_data() combined = family_survival() combined = process_family() combined = get_titles() combined = process_names() combined = process_age() combined = age_binner() combined = process_fares() combined = process_fare_bin(onehot='no') combined = process_embarked() combined = process_sex() combined = dropper() print( f'Processed everything.Missing values left: {combined.isna().sum().sum() }' )
Titanic - Machine Learning from Disaster
9,251,987
@tf.function def _one_sample_positive_class_precisions(example): y_true, y_pred = example retrieved_classes = tf.argsort(y_pred, direction='DESCENDING') class_rankings = tf.argsort(retrieved_classes) retrieved_class_true = tf.gather(y_true, retrieved_classes) retrieved_cumulative_hits = tf.math.cumsum(tf.cast(retrieved_class_true, tf.float32)) idx = tf.where(y_true)[:, 0] i = tf.boolean_mask(class_rankings, y_true) r = tf.gather(retrieved_cumulative_hits, i) c = 1 + tf.cast(i, tf.float32) precisions = r / c dense = tf.scatter_nd(idx[:, None], precisions, [y_pred.shape[0]]) return dense class LWLRAP(tf.keras.metrics.Metric): def __init__(self, num_classes, name='lwlrap'): super().__init__(name=name) self._precisions = self.add_weight( name='per_class_cumulative_precision', shape=[num_classes], initializer='zeros', ) self._counts = self.add_weight( name='per_class_cumulative_count', shape=[num_classes], initializer='zeros', ) def update_state(self, y_true, y_pred, sample_weight=None): precisions = tf.map_fn( fn=_one_sample_positive_class_precisions, elems=(y_true, y_pred), dtype=(tf.float32), ) increments = tf.cast(precisions > 0, tf.float32) total_increments = tf.reduce_sum(increments, axis=0) total_precisions = tf.reduce_sum(precisions, axis=0) self._precisions.assign_add(total_precisions) self._counts.assign_add(total_increments) def result(self): per_class_lwlrap = self._precisions / tf.maximum(self._counts, 1.0) per_class_weight = self._counts / tf.reduce_sum(self._counts) overall_lwlrap = tf.reduce_sum(per_class_lwlrap * per_class_weight) return overall_lwlrap def reset_states(self): self._precisions.assign(self._precisions * 0) self._counts.assign(self._counts * 0 )<categorify>
v_merged = combined.copy() v_merged['Survived'] = v_train['Survived'] v_merged.head()
Titanic - Machine Learning from Disaster
9,251,987
def _parse_function_test(example_proto): sample = tf.io.parse_single_example(example_proto, feature_description) wav, _ = tf.audio.decode_wav(sample['audio_wav'], desired_channels=1) @tf.function def _cut_audio(i): _sample = { 'audio_wav': tf.reshape(wav[i*SR*TIME:(i+1)*SR*TIME], [SR*TIME]), 'recording_id': sample['recording_id'] } return _sample return tf.map_fn(_cut_audio, tf.range(60//TIME), dtype={ 'audio_wav': tf.float32, 'recording_id': tf.string }) def inference(model): tdataset =(tf.data.TFRecordDataset(tf.io.gfile.glob(TEST_TFREC + '/*.tfrec'), num_parallel_reads=AUTOTUNE) .map(_parse_function_test, num_parallel_calls=AUTOTUNE ).unbatch() .map(_wav_to_spec, num_parallel_calls=AUTOTUNE) .map(_preprocess_test, num_parallel_calls=AUTOTUNE) .batch(128*(60//TIME)).prefetch(AUTOTUNE)) rec_ids = [] probs = [] for inp, rec_id in tqdm(tdataset): with strategy.scope() : pred = model.predict_on_batch(tf.reshape(inp, [-1, HEIGHT, WIDTH, 3])) prob = tf.sigmoid(pred) prob = tf.reduce_max(tf.reshape(prob, [-1, 60//TIME, CLASS_N]), axis=1) rec_id_stack = tf.reshape(rec_id, [-1, 60//TIME]) for rec in rec_id.numpy() : assert len(np.unique(rec)) == 1 rec_ids.append(rec_id_stack.numpy() [:,0]) probs.append(prob.numpy()) crec_ids = np.concatenate(rec_ids) cprobs = np.concatenate(probs) sub = pd.DataFrame({ 'recording_id': list(map(lambda x: x.decode() , crec_ids.tolist())) , **{f's{i}': cprobs[:,i] for i in range(CLASS_N)} }) sub = sub.sort_values('recording_id') return sub<choose_model_class>
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier from sklearn.model_selection import StratifiedKFold, cross_val_score, learning_curve, cross_validate, train_test_split, KFold from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, roc_auc_score, roc_curve, plot_roc_curve, auc from sklearn.svm import SVC from sklearn.neighbors import KNeighborsClassifier from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.naive_bayes import GaussianNB from sklearn.neural_network import MLPClassifier from sklearn.decomposition import PCA from sklearn.pipeline import Pipeline, make_pipeline from sklearn.preprocessing import StandardScaler from sklearn.feature_selection import RFE import xgboost as xgb import lightgbm as lgb from mlxtend.plotting import plot_decision_regions
Titanic - Machine Learning from Disaster
9,251,987
def train_and_inference(splits, split_id): batchsize = cfg['model_params']['batchsize_per_tpu'] * strategy.num_replicas_in_sync print("batchsize", batchsize) loss_fn = cfg['model_params']['loss']['fn'](from_logits=True, **cfg['model_params']['loss']['params']) optimizer = cfg['model_params']['optim']['fn'](**cfg['model_params']['optim']['params']) model = create_model() with strategy.scope() : model.compile(optimizer=optimizer, loss=loss_fn, metrics=[LWLRAP(CLASS_N)]) model.load_weights('.. /input/rcfx-resnet34-weights/model_best_%d.h5' % split_id) return inference(model )<train_model>
def recover_train_test_target() : global combined y = pd.read_csv('/kaggle/input/titanic/train.csv', usecols=['Survived'])['Survived'] X = combined.iloc[:idx] X_test = combined.iloc[idx:] return X, X_test, y X, X_test, y = recover_train_test_target()
Titanic - Machine Learning from Disaster
9,251,987
sub = sum( map( lambda i: train_and_inference(splits, i ).set_index('recording_id'), range(len(splits)) ) ).reset_index()<save_to_csv>
cv = StratifiedKFold(10, shuffle=True, random_state=42) rf = RandomForestClassifier(criterion='gini', n_estimators=1750, max_depth=7, min_samples_split=6, min_samples_leaf=6, max_features='auto', oob_score=True, random_state=42, n_jobs=-1, verbose=0) lg = lgb.LGBMClassifier(max_bin=4, num_iterations=550, learning_rate=0.0114, max_depth=3, num_leaves=7, colsample_bytree=0.35, random_state=42, n_jobs=-1) xg = xgb.XGBClassifier( n_estimators=2800, min_child_weight=0.1, learning_rate=0.002, max_depth=2, subsample=0.47, colsample_bytree=0.35, gamma=0.4, reg_lambda=0.4, random_state=42, n_jobs=-1, ) sv = SVC(probability=True) logreg = LogisticRegression(n_jobs=-1, solver='newton-cg') gb = GradientBoostingClassifier(random_state=42) gnb = GaussianNB() mlp = MLPClassifier(random_state=42 )
Titanic - Machine Learning from Disaster
9,251,987
sub.to_csv("submission.csv", index=False )<import_modules>
estimators = [rf, lg, xg, gb, sv, logreg, gnb, mlp]
Titanic - Machine Learning from Disaster
9,251,987
import transformers import numpy as np import pandas as pd import torch import torch.nn as nn import torch.nn.functional as F from sklearn.model_selection import train_test_split<load_from_csv>
def model_check(X, y, estimators, cv): model_table = pd.DataFrame() row_index = 0 for est in estimators: MLA_name = est.__class__.__name__ model_table.loc[row_index, 'Model Name'] = MLA_name cv_results = cross_validate( est, X, y, cv=cv, scoring='accuracy', return_train_score=True, n_jobs=-1 ) model_table.loc[row_index, 'Train Accuracy Mean'] = cv_results[ 'train_score'].mean() model_table.loc[row_index, 'Test Accuracy Mean'] = cv_results[ 'test_score'].mean() model_table.loc[row_index, 'Test Std'] = cv_results['test_score'].std() model_table.loc[row_index, 'Time'] = cv_results['fit_time'].mean() row_index += 1 model_table.sort_values(by=['Test Accuracy Mean'], ascending=False, inplace=True) return model_table
Titanic - Machine Learning from Disaster