kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
9,235,765
X_val['answered_correctly_user'].fillna(0.65, inplace=True) X_val['explanation_mean_user'].fillna(prior_mean_user, inplace=True) X_val['quest_pct'].fillna(content_mean, inplace=True) X_val['part'].fillna(4, inplace = True) X['avg_questions_seen'].fillna(1, inplace = True) X_val['prior_question_elapsed_time'].fillna(elapsed_mean, inplace = True) X_val['prior_question_had_explanation_enc'].fillna(0, inplace = True )<create_dataframe>
traintest_drop3[traintest_drop3['Pclass']==1]
Titanic - Machine Learning from Disaster
9,235,765
lgb_train = lgb.Dataset(X, y, categorical_feature = ['part', 'prior_question_had_explanation_enc'],free_raw_data=False) lgb_eval = lgb.Dataset(X_val, y_val, categorical_feature = ['part', 'prior_question_had_explanation_enc'], reference=lgb_train, free_raw_data=False )<init_hyperparams>
traintest_drop3[traintest_drop3['PassengerId']==61]
Titanic - Machine Learning from Disaster
9,235,765
y_val_array = np.array(y_val) def obj_and_bounds(trial): params = { 'num_leaves': trial.suggest_int('num_leaves', 160, 162), 'boosting_type': 'gbdt', 'max_bin': trial.suggest_int('max_bin', 889, 891), 'objective': 'binary', 'metric': 'auc', 'max_depth': trial.suggest_int('max_depth', 10, 12), 'min_child_weight': trial.suggest_int('min_child_weight', 10, 12), 'feature_fraction': trial.suggest_uniform('feature_fraction', 0.69, 0.7), 'bagging_fraction': trial.suggest_uniform('bagging_fraction', 0.92, 0.93), 'bagging_freq': trial.suggest_int('bagging_freq', 6, 8), 'min_child_samples': trial.suggest_int('min_child_samples', 76, 78), 'lambda_l1': trial.suggest_loguniform('lambda_l1', 2e-2, 3e-2), 'lambda_l2': trial.suggest_loguniform('lambda_l2', 9e-8, 1e-7), 'early_stopping_rounds': 10 } model = lgb.train(params, lgb_train, valid_sets=[lgb_train, lgb_eval], verbose_eval=1000, num_boost_round=300) val_pred = model.predict(X_val) score = roc_auc_score(y_val_array, val_pred) print("ROC/AUC = ") print(score) return score<find_best_params>
traintest_drop3[traintest_drop3['PassengerId']==63]
Titanic - Machine Learning from Disaster
9,235,765
study = optuna.create_study(direction='maximize') study.optimize(obj_and_bounds, n_trials=2) <find_best_params>
traintest_drop3[traintest_drop3['Pclass']==1]['Embarked'].value_counts()
Titanic - Machine Learning from Disaster
9,235,765
print('Number of finished trials: {}'.format(len(study.trials))) trial = study.best_trial print('ROC/AUC of the best trial: {}'.format(trial.value)) print('Parameters of the best trial: ') for key, value in trial.params.items() : print(' {}: {}'.format(key, value))<train_model>
traintest_drop3.loc[(traintest_drop3['Embarked'].isna()),'Embarked'] = 'S'
Titanic - Machine Learning from Disaster
9,235,765
model = lgb.train( trial.params, lgb_train, valid_sets=[lgb_train, lgb_eval], verbose_eval=1000, num_boost_round=2000 )<compute_train_metric>
oenc = OrdinalEncoder() traintest_objenc = pd.DataFrame(oenc.fit_transform(traintest_drop3.select_dtypes('object')) ,columns = traintest_drop3.select_dtypes('object' ).columns )
Titanic - Machine Learning from Disaster
9,235,765
y_pred = model.predict(X_val) roc_auc_score(y_val_array, y_pred )<merge>
traintest_num = traintest_drop3.select_dtypes(['int64','float64'] ).drop(['index','PassengerId'], axis=1 ).join(traintest_objenc )
Titanic - Machine Learning from Disaster
9,235,765
iter_test = env.iter_test() for(test_df, sample_prediction_df)in iter_test: test_df['task_container_id'] = test_df.task_container_id.mask(test_df.task_container_id > 9999, 9999) test_df = pd.merge(test_df, group3, left_on=['task_container_id'], right_index= True, how="left") test_df = pd.merge(test_df, question2, left_on = 'content_id', right_on = 'question_id', how = 'left') test_df = pd.merge(test_df, results_u_final, on=['user_id'], how="left") test_df = pd.merge(test_df, results_u2_final, on=['user_id'], how="left") test_df['answered_correctly_user'].fillna(0.65, inplace=True) test_df['explanation_mean_user'].fillna(prior_mean_user, inplace=True) test_df['quest_pct'].fillna(content_mean, inplace=True) test_df['part'].fillna(4, inplace = True) test_df['avg_questions_seen'].fillna(1, inplace = True) test_df['prior_question_elapsed_time'].fillna(elapsed_mean, inplace = True) test_df['prior_question_had_explanation'].fillna(False, inplace=True) test_df["prior_question_had_explanation_enc"] = lb_make.fit_transform(test_df["prior_question_had_explanation"]) test_df['answered_correctly'] = model.predict(test_df[['answered_correctly_user', 'explanation_mean_user', 'quest_pct', 'avg_questions_seen', 'prior_question_elapsed_time','prior_question_had_explanation_enc', 'part']]) env.predict(test_df.loc[test_df['content_type_id'] == 0, ['row_id', 'answered_correctly']] )<set_options>
traintest_norm = traintest_drop3[['index','PassengerId']].join(pd.DataFrame(StandardScaler().fit_transform(traintest_num), columns = traintest_num.columns))
Titanic - Machine Learning from Disaster
9,235,765
env = riiideducation.make_env()<load_from_csv>
train_norm = traintest_norm[0:891].drop('index', axis=1 )
Titanic - Machine Learning from Disaster
9,235,765
train = pd.read_csv('/kaggle/input/riiid-test-answer-prediction/train.csv', usecols=[1, 2, 3, 4, 5, 7, 8, 9], dtype={'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'task_container_id': 'int16', 'answered_correctly':'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'} )<sort_values>
test_norm = traintest_norm[891::].drop('index', axis=1 ).reset_index().drop('index',axis=1 )
Titanic - Machine Learning from Disaster
9,235,765
train = train[train.content_type_id == False].sort_values('timestamp' ).reset_index(drop = True )<count_unique_values>
X_train, X_test, y_train, y_test = train_test_split(train_norm, y )
Titanic - Machine Learning from Disaster
9,235,765
train[(train.content_type_id == False)].task_container_id.nunique()<groupby>
modelgbr = GradientBoostingClassifier(random_state=42) modelgbr.fit(X_train, y_train) y_predgbr = modelgbr.predict(X_test) accuracy_score(y_test, y_predgbr)
Titanic - Machine Learning from Disaster
9,235,765
group1 = train.loc[(train.content_type_id == False), ['task_container_id', 'user_id']].groupby(['task_container_id'] ).agg(['count']) group1.columns = ['avg_questions'] group2 = train.loc[(train.content_type_id == False), ['task_container_id', 'user_id']].groupby(['task_container_id'] ).agg(['nunique']) group2.columns = ['avg_questions'] group3 = group1 / group2<groupby>
pd.DataFrame(modelgbr.feature_importances_, X_train.columns ).sort_values(by=0,ascending=False)
Titanic - Machine Learning from Disaster
9,235,765
group3['avg_questions_seen'] = group3.avg_questions.cumsum()<set_options>
def model_test(testmodel): model = testmodel() model.fit(X_train, y_train) y_pred = model.predict(X_test) return accuracy_score(y_test, y_pred )
Titanic - Machine Learning from Disaster
9,235,765
group3.iloc[0].avg_questions_seen<groupby>
models = [RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier, BaggingClassifier, ExtraTreesClassifier, DecisionTreeClassifier, KNeighborsClassifier]
Titanic - Machine Learning from Disaster
9,235,765
results_u_final = train.loc[train.content_type_id == False, ['user_id','answered_correctly']].groupby(['user_id'] ).agg(['mean']) results_u_final.columns = ['answered_correctly_user'] results_u2_final = train.loc[train.content_type_id == False, ['user_id','prior_question_had_explanation']].groupby(['user_id'] ).agg(['mean']) results_u2_final.columns = ['explanation_mean_user']<drop_column>
for i in models: print(i) print(model_test(i))
Titanic - Machine Learning from Disaster
9,235,765
train.drop(['timestamp', 'content_type_id'], axis=1, inplace=True )<filter>
modelclf = GradientBoostingClassifier() parameters = {'n_estimators':[50,200,300], 'learning_rate':[0.001,0.01,0.1],'max_depth':[1,3,6]} clf = GridSearchCV(modelclf, parameters) clf.fit(X_train, y_train) y_predclf = clf.predict(X_test) accuracy_score(y_test, y_predclf) clf.cv_results_ clf.best_params_
Titanic - Machine Learning from Disaster
9,235,765
validation = train.groupby('user_id' ).tail(5) train = train[~train.index.isin(validation.index)] len(train)+ len(validation )<groupby>
modelsub = GradientBoostingClassifier(learning_rate = 0.01, max_depth = 3, n_estimators = 300) modelsub.fit(train_norm.drop('PassengerId',axis=1), y) preds_test = modelsub.predict(test_norm.drop('PassengerId',axis=1))
Titanic - Machine Learning from Disaster
9,235,765
<groupby><EOS>
submission = pd.DataFrame({ "PassengerId": test["PassengerId"], "Survived": preds_test }) submission.to_csv('submission.csv', index=False)
Titanic - Machine Learning from Disaster
9,420,601
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<groupby>
%matplotlib inline sns.set()
Titanic - Machine Learning from Disaster
9,420,601
results_u_X = train[['user_id','answered_correctly']].groupby(['user_id'] ).agg(['mean']) results_u_X.columns = ['answered_correctly_user'] results_u2_X = train[['user_id','prior_question_had_explanation']].groupby(['user_id'] ).agg(['mean']) results_u2_X.columns = ['explanation_mean_user']<drop_column>
train = pd.read_csv('.. /input/titanic/train.csv') test = pd.read_csv('.. /input/titanic/test.csv' )
Titanic - Machine Learning from Disaster
9,420,601
del(train )<merge>
train['Survived'].value_counts(normalize=True )
Titanic - Machine Learning from Disaster
9,420,601
X = pd.merge(X, group3, left_on=['task_container_id'], right_index= True, how="left") X = pd.merge(X, results_u_X, on=['user_id'], how="left") X = pd.merge(X, results_u2_X, on=['user_id'], how="left" )<merge>
train['Survived'].groupby(train['Pclass'] ).mean()
Titanic - Machine Learning from Disaster
9,420,601
validation = pd.merge(validation, group3, left_on=['task_container_id'], right_index= True, how="left") validation = pd.merge(validation, results_u_val, on=['user_id'], how="left") validation = pd.merge(validation, results_u2_val, on=['user_id'], how="left" )<categorify>
train.isnull().sum()
Titanic - Machine Learning from Disaster
9,420,601
lb_make = LabelEncoder() X.prior_question_had_explanation.fillna(False, inplace = True) validation.prior_question_had_explanation.fillna(False, inplace = True) validation["prior_question_had_explanation_enc"] = lb_make.fit_transform(validation["prior_question_had_explanation"]) X["prior_question_had_explanation_enc"] = lb_make.fit_transform(X["prior_question_had_explanation"] )<load_from_csv>
train.isnull().sum()
Titanic - Machine Learning from Disaster
9,420,601
question2 = pd.read_csv('/kaggle/input/question2/question2.csv' )<categorify>
test.isnull().sum()
Titanic - Machine Learning from Disaster
9,420,601
question2.quest_pct = question2.quest_pct.mask(( question2['count'] < 3),.65) question2.quest_pct = question2.quest_pct.mask(( question2.quest_pct <.2)&(question2['count'] < 21),.2) question2.quest_pct = question2.quest_pct.mask(( question2.quest_pct >.95)&(question2['count'] < 21),.95 )<merge>
test.isnull().sum()
Titanic - Machine Learning from Disaster
9,420,601
X = pd.merge(X, question2, left_on = 'content_id', right_on = 'question_id', how = 'left') validation = pd.merge(validation, question2, left_on = 'content_id', right_on = 'question_id', how = 'left') X.part = X.part - 1 validation.part = validation.part - 1<prepare_x_and_y>
train_test_data = [train, test] for dataset in train_test_data: dataset['Title'] = dataset['Name'].str.extract('([A-Za-z]+)\.', expand=False )
Titanic - Machine Learning from Disaster
9,420,601
y = X['answered_correctly'] X = X.drop(['answered_correctly'], axis=1) X.head() y_val = validation['answered_correctly'] X_val = validation.drop(['answered_correctly'], axis=1 )<drop_column>
train['Name_Title'] = train['Name'].apply(lambda x: x.split(',')[1] ).apply(lambda x: x.split() [0]) train['Name_Title'].value_counts()
Titanic - Machine Learning from Disaster
9,420,601
X = X[['answered_correctly_user', 'explanation_mean_user', 'quest_pct', 'avg_questions_seen', 'prior_question_elapsed_time','prior_question_had_explanation_enc', 'part']] X_val = X_val[['answered_correctly_user', 'explanation_mean_user', 'quest_pct', 'avg_questions_seen', 'prior_question_elapsed_time','prior_question_had_explanation_enc', 'part']]<data_type_conversions>
train['Survived'].groupby(train['Name_Title'] ).mean()
Titanic - Machine Learning from Disaster
9,420,601
X['answered_correctly_user'].fillna(0.65, inplace=True) X['explanation_mean_user'].fillna(prior_mean_user, inplace=True) X['quest_pct'].fillna(content_mean, inplace=True) X['part'].fillna(4, inplace = True) X['avg_questions_seen'].fillna(1, inplace = True) X['prior_question_elapsed_time'].fillna(elapsed_mean, inplace = True) X['prior_question_had_explanation_enc'].fillna(0, inplace = True) <data_type_conversions>
train['Name_Len'] = train['Name'].apply(lambda x: len(x)) train['Survived'].groupby(pd.qcut(train['Name_Len'],5)).mean()
Titanic - Machine Learning from Disaster
9,420,601
X_val['answered_correctly_user'].fillna(0.65, inplace=True) X_val['explanation_mean_user'].fillna(prior_mean_user, inplace=True) X_val['quest_pct'].fillna(content_mean, inplace=True) X_val['part'].fillna(4, inplace = True) X['avg_questions_seen'].fillna(1, inplace = True) X_val['prior_question_elapsed_time'].fillna(elapsed_mean, inplace = True) X_val['prior_question_had_explanation_enc'].fillna(0, inplace = True )<create_dataframe>
pd.qcut(train['Name_Len'],5 ).value_counts()
Titanic - Machine Learning from Disaster
9,420,601
params = { 'objective': 'binary', 'boosting' : 'gbdt', 'max_bin': 800, 'learning_rate': 0.0175, 'num_leaves': 80 } lgb_train = lgb.Dataset(X, y, categorical_feature = ['part', 'prior_question_had_explanation_enc']) lgb_eval = lgb.Dataset(X_val, y_val, categorical_feature = ['part', 'prior_question_had_explanation_enc'], reference=lgb_train )<train_model>
train['Sex'].value_counts(normalize=True )
Titanic - Machine Learning from Disaster
9,420,601
model = lgb.train( params, lgb_train, valid_sets=[lgb_train, lgb_eval], verbose_eval=50, num_boost_round=1300, early_stopping_rounds=8 )<compute_train_metric>
train['Survived'].groupby(train['Sex'] ).mean()
Titanic - Machine Learning from Disaster
9,420,601
y_pred = model.predict(X_val) y_true = np.array(y_val) roc_auc_score(y_true, y_pred )<import_modules>
train['Survived'].groupby(train['Age'].isnull() ).mean()
Titanic - Machine Learning from Disaster
9,420,601
import matplotlib.pyplot as plt import seaborn as sns<split>
train['Survived'].groupby(pd.qcut(train['Age'],5)).mean()
Titanic - Machine Learning from Disaster
9,420,601
iter_test = env.iter_test()<merge>
pd.qcut(train['Age'],5 ).value_counts()
Titanic - Machine Learning from Disaster
9,420,601
for(test_df, sample_prediction_df)in iter_test: test_df['task_container_id'] = test_df.task_container_id.mask(test_df.task_container_id > 9999, 9999) test_df = pd.merge(test_df, group3, left_on=['task_container_id'], right_index= True, how="left") test_df = pd.merge(test_df, question2, left_on = 'content_id', right_on = 'question_id', how = 'left') test_df = pd.merge(test_df, results_u_final, on=['user_id'], how="left") test_df = pd.merge(test_df, results_u2_final, on=['user_id'], how="left") test_df['answered_correctly_user'].fillna(0.65, inplace=True) test_df['explanation_mean_user'].fillna(prior_mean_user, inplace=True) test_df['quest_pct'].fillna(content_mean, inplace=True) test_df['part'] = test_df.part - 1 test_df['part'].fillna(4, inplace = True) test_df['avg_questions_seen'].fillna(1, inplace = True) test_df['prior_question_elapsed_time'].fillna(elapsed_mean, inplace = True) test_df['prior_question_had_explanation'].fillna(False, inplace=True) test_df["prior_question_had_explanation_enc"] = lb_make.fit_transform(test_df["prior_question_had_explanation"]) test_df['answered_correctly'] = model.predict(test_df[['answered_correctly_user', 'explanation_mean_user', 'quest_pct', 'avg_questions_seen', 'prior_question_elapsed_time','prior_question_had_explanation_enc', 'part']]) env.predict(test_df.loc[test_df['content_type_id'] == 0, ['row_id', 'answered_correctly']] )<import_modules>
train["Age"].fillna(train.groupby("Title")["Age"].transform("median"), inplace=True) test["Age"].fillna(test.groupby("Title")["Age"].transform("median"), inplace=True )
Titanic - Machine Learning from Disaster
9,420,601
import optuna import lightgbm as lgb import pickle import riiideducation import dask.dataframe as dd import pandas as pd import numpy as np from sklearn.metrics import roc_auc_score<load_from_csv>
train['Survived'].groupby(train['SibSp'] ).mean()
Titanic - Machine Learning from Disaster
9,420,601
train = pd.read_csv('/kaggle/input/riiid-test-answer-prediction/train.csv', usecols=[1, 2, 3,4,7,8,9], dtype={'timestamp': 'int64', 'user_id': 'int32' ,'content_id': 'int16','content_type_id': 'int8','answered_correctly':'int8','prior_question_elapsed_time': 'float32','prior_question_had_explanation': 'boolean'} ) train = train[train.content_type_id == False] train = train.sort_values(['timestamp'], ascending=True) train.drop(['timestamp','content_type_id'], axis=1, inplace=True) results_c = train[['content_id','answered_correctly']].groupby(['content_id'] ).agg(['mean']) results_c.columns = ["answered_correctly_content"] results_u = train[['user_id','answered_correctly']].groupby(['user_id'] ).agg(['mean', 'sum']) results_u.columns = ["answered_correctly_user", 'sum']<load_from_csv>
train['SibSp'].value_counts()
Titanic - Machine Learning from Disaster
9,420,601
questions_df = pd.read_csv('/kaggle/input/riiid-test-answer-prediction/questions.csv', usecols=[0,1, 3,4], dtype={'question_id': 'int16', 'part': 'int8','bundle_id': 'int8','tags': 'str'} ) tag = questions_df["tags"].str.split(" ", n = 10, expand = True) tag.columns = ['tags1','tags2','tags3','tags4','tags5','tags6'] questions_df = pd.concat([questions_df,tag],axis=1) questions_df['tags1'] = pd.to_numeric(questions_df['tags1'], errors='coerce') questions_df['tags2'] = pd.to_numeric(questions_df['tags2'], errors='coerce') questions_df['tags3'] = pd.to_numeric(questions_df['tags3'], errors='coerce') questions_df['tags4'] = pd.to_numeric(questions_df['tags4'], errors='coerce') questions_df['tags5'] = pd.to_numeric(questions_df['tags5'], errors='coerce') questions_df['tags6'] = pd.to_numeric(questions_df['tags6'], errors='coerce' )<categorify>
train['Survived'].groupby(train['Parch'] ).mean()
Titanic - Machine Learning from Disaster
9,420,601
lb_make = LabelEncoder() train['prior_question_had_explanation'].fillna(False, inplace=True) train["prior_question_had_explanation_enc"] = lb_make.fit_transform(train["prior_question_had_explanation"] )<load_pretrained>
train['Parch'].value_counts()
Titanic - Machine Learning from Disaster
9,420,601
file = '.. /input/riiid-lgb-training-and-save-model/trained_model.pkl' model = pickle.load(open(file, 'rb')) print('Trained LGB model was loaded!' )<find_best_params>
train['Ticket_Len'] = train['Ticket'].apply(lambda x: len(x))
Titanic - Machine Learning from Disaster
9,420,601
env = riiideducation.make_env()<merge>
train['Ticket_Len'].value_counts()
Titanic - Machine Learning from Disaster
9,420,601
iter_test = env.iter_test() for(test_df, sample_prediction_df)in iter_test: test_df = test_df.sort_values(['user_id','timestamp'], ascending=False) test_df['answer_time'] = test_df.groupby(['user_id'])['prior_question_elapsed_time'].shift(1) test_df = pd.merge(test_df, results_u, on=['user_id'], how="left") test_df = pd.merge(test_df, results_c, on=['content_id'], how="left") test_df = pd.merge(test_df, questions_df, left_on = 'content_id', right_on = 'question_id', how = 'left') test_df['answered_correctly_user'].fillna(0.5, inplace=True) test_df['answered_correctly_content'].fillna(0.5, inplace=True) test_df['sum'].fillna(0, inplace=True) test_df['prior_question_had_explanation'].fillna(False, inplace=True) test_df["prior_question_had_explanation_enc"] = lb_make.transform(test_df["prior_question_had_explanation"]) test_df['answered_correctly'] = model.predict(test_df[['answered_correctly_user', 'answered_correctly_content', 'sum','bundle_id','part','prior_question_elapsed_time','prior_question_had_explanation_enc', 'tags1','tags2','tags3']]) env.predict(test_df.loc[test_df['content_type_id'] == 0, ['row_id', 'answered_correctly']] )<set_options>
train['Ticket_Lett'] = train['Ticket'].apply(lambda x: str(x)[0] )
Titanic - Machine Learning from Disaster
9,420,601
env = riiideducation.make_env()<load_from_csv>
train['Ticket_Lett'].value_counts()
Titanic - Machine Learning from Disaster
9,420,601
train = pd.read_csv('/kaggle/input/riiid-test-answer-prediction/train.csv', usecols=[1, 2, 3, 4, 7, 8, 9], dtype={'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'answered_correctly':'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'} )<sort_values>
train.groupby(['Ticket_Lett'])['Survived'].mean()
Titanic - Machine Learning from Disaster
9,420,601
train = train[train.content_type_id == False] train = train.sort_values(['timestamp'], ascending=True ).reset_index(drop = True) train.head(10 )<groupby>
pd.qcut(train['Fare'], 3 ).value_counts()
Titanic - Machine Learning from Disaster
9,420,601
results_c_final = train[['content_id','answered_correctly']].groupby(['content_id'] ).agg(['mean']) results_c_final.columns = ["answered_correctly_content"] results_u_final = train[['user_id','answered_correctly']].groupby(['user_id'] ).agg(['mean', 'sum', 'count']) results_u_final.columns = ['answered_correctly_user', 'sum', 'count']<filter>
train['Survived'].groupby(pd.qcut(train['Fare'], 3)).mean()
Titanic - Machine Learning from Disaster
9,420,601
train.loc[(train.timestamp < 1000000)&(train.timestamp > 0)].answered_correctly.mean()<count_values>
train['Cabin_Letter'] = train['Cabin'].apply(lambda x: str(x)[0] )
Titanic - Machine Learning from Disaster
9,420,601
train.prior_question_had_explanation.value_counts()<drop_column>
train['Cabin_Letter'].value_counts()
Titanic - Machine Learning from Disaster
9,420,601
train.drop(['timestamp', 'content_type_id'], axis=1, inplace=True )<create_dataframe>
train['Survived'].groupby(train['Cabin_Letter'] ).mean()
Titanic - Machine Learning from Disaster
9,420,601
validation = pd.DataFrame()<remove_duplicates>
train['Cabin_num'] = train['Cabin'].apply(lambda x: str(x ).split(' ')[-1][1:]) train['Cabin_num'].replace('an', np.NaN, inplace = True) train['Cabin_num'] = train['Cabin_num'].apply(lambda x: int(x)if not pd.isnull(x)and x != '' else np.NaN )
Titanic - Machine Learning from Disaster
9,420,601
for i in range(4): last_records = train.drop_duplicates('user_id', keep = 'last') train = train[~train.index.isin(last_records.index)] validation = validation.append(last_records )<create_dataframe>
pd.qcut(train['Cabin_num'],3 ).value_counts()
Titanic - Machine Learning from Disaster
9,420,601
X = pd.DataFrame()<remove_duplicates>
train['Survived'].groupby(pd.qcut(train['Cabin_num'], 3)).mean()
Titanic - Machine Learning from Disaster
9,420,601
for i in range(15): last_records = train.drop_duplicates('user_id', keep = 'last') train = train[~train.index.isin(last_records.index)] X = X.append(last_records )<groupby>
train['Embarked'].value_counts()
Titanic - Machine Learning from Disaster
9,420,601
results_c = train[['content_id','answered_correctly']].groupby(['content_id'] ).agg(['mean']) results_c.columns = ["answered_correctly_content"] results_u = train[['user_id','answered_correctly']].groupby(['user_id'] ).agg(['mean', 'sum', 'count']) results_u.columns = ["answered_correctly_user", 'sum', 'count']<drop_column>
train['Embarked'].value_counts(normalize=True )
Titanic - Machine Learning from Disaster
9,420,601
del(train) <merge>
train['Survived'].groupby(train['Embarked'] ).mean()
Titanic - Machine Learning from Disaster
9,420,601
X = pd.merge(X, results_u, on=['user_id'], how="left") X = pd.merge(X, results_c_final, on=['content_id'], how="left" )<merge>
def names(train, test): for i in [train, test]: i['Name_Len'] = i['Name'].apply(lambda x: len(x)) i['Name_Title'] = i['Name'].apply(lambda x: x.split(',')[1] ).apply(lambda x: x.split() [0]) del i['Name'] return train, test
Titanic - Machine Learning from Disaster
9,420,601
validation = pd.merge(validation, results_u, on=['user_id'], how="left") validation = pd.merge(validation, results_c_final, on=['content_id'], how="left" )<categorify>
def age_impute(train, test): for i in [train, test]: i['Age_Null_Flag'] = i['Age'].apply(lambda x: 1 if pd.isnull(x)else 0) data = train.groupby(['Name_Title', 'Pclass'])['Age'] i['Age'] = data.transform(lambda x: x.fillna(x.mean())) return train, test
Titanic - Machine Learning from Disaster
9,420,601
lb_make = LabelEncoder() X.prior_question_had_explanation.fillna(False, inplace = True) validation.prior_question_had_explanation.fillna(False, inplace = True) validation["prior_question_had_explanation_enc"] = lb_make.fit_transform(validation["prior_question_had_explanation"]) X["prior_question_had_explanation_enc"] = lb_make.fit_transform(X["prior_question_had_explanation"]) X.head()<load_from_csv>
def fam_size(train, test): for i in [train, test]: i['Fam_Size'] = np.where(( i['SibSp']+i['Parch'])== 0 , 'Solo', np.where(( i['SibSp']+i['Parch'])<= 3,'Nuclear', 'Big')) del i['SibSp'] del i['Parch'] return train, test
Titanic - Machine Learning from Disaster
9,420,601
questions_df = pd.read_csv('/kaggle/input/riiid-test-answer-prediction/questions.csv', usecols=[0, 3], dtype={'question_id': 'int16', 'part': 'int8'} )<merge>
def cabin(train, test): for i in [train, test]: i['Cabin_Letter'] = i['Cabin'].apply(lambda x: str(x)[0]) del i['Cabin'] return train, test
Titanic - Machine Learning from Disaster
9,420,601
X = pd.merge(X, questions_df, left_on = 'content_id', right_on = 'question_id', how = 'left') validation = pd.merge(validation, questions_df, left_on = 'content_id', right_on = 'question_id', how = 'left') X.part = X.part - 1 validation.part = validation.part - 1 X.part.value_counts()<prepare_x_and_y>
def cabin_num(train, test): for i in [train, test]: i['Cabin_num1'] = i['Cabin'].apply(lambda x: str(x ).split(' ')[-1][1:]) i['Cabin_num1'].replace('an', np.NaN, inplace = True) i['Cabin_num1'] = i['Cabin_num1'].apply(lambda x: int(x)if not pd.isnull(x)and x != '' else np.NaN) i['Cabin_num'] = pd.qcut(train['Cabin_num1'],3) train = pd.concat(( train, pd.get_dummies(train['Cabin_num'], prefix = 'Cabin_num')) , axis = 1) test = pd.concat(( test, pd.get_dummies(test['Cabin_num'], prefix = 'Cabin_num')) , axis = 1) del train['Cabin_num'] del test['Cabin_num'] del train['Cabin_num1'] del test['Cabin_num1'] return train, test
Titanic - Machine Learning from Disaster
9,420,601
y = X['answered_correctly'] X = X.drop(['answered_correctly'], axis=1) X.head() y_val = validation['answered_correctly'] X_val = validation.drop(['answered_correctly'], axis=1 )<drop_column>
def embarked_impute(train, test): for i in [train, test]: i['Embarked'] = i['Embarked'].fillna('S') return train, test
Titanic - Machine Learning from Disaster
9,420,601
X = X[['answered_correctly_user', 'answered_correctly_content', 'sum', 'count', 'prior_question_elapsed_time','prior_question_had_explanation_enc', 'part']] X_val = X_val[['answered_correctly_user', 'answered_correctly_content', 'sum', 'count', 'prior_question_elapsed_time','prior_question_had_explanation_enc', 'part']]<data_type_conversions>
test['Fare'].fillna(train['Fare'].mean() , inplace = True )
Titanic - Machine Learning from Disaster
9,420,601
X_val['answered_correctly_user'].fillna(0.5, inplace=True) X_val['answered_correctly_content'].fillna(0.5, inplace=True) X_val['part'].fillna(4, inplace = True) X_val['count'].fillna(0, inplace = True) X_val['sum'].fillna(0, inplace = True) X_val['prior_question_elapsed_time'].fillna(elapsed_mean, inplace = True) X_val['prior_question_had_explanation_enc'].fillna(0, inplace = True )<create_dataframe>
def dummies(train, test, columns = ['Pclass', 'Sex', 'Embarked', 'Ticket_Lett', 'Cabin_Letter', 'Name_Title', 'Fam_Size']): for column in columns: train[column] = train[column].apply(lambda x: str(x)) test[column] = test[column].apply(lambda x: str(x)) good_cols = [column+'_'+i for i in train[column].unique() if i in test[column].unique() ] train = pd.concat(( train, pd.get_dummies(train[column], prefix = column)[good_cols]), axis = 1) test = pd.concat(( test, pd.get_dummies(test[column], prefix = column)[good_cols]), axis = 1) del train[column] del test[column] return train, test
Titanic - Machine Learning from Disaster
9,420,601
params = { 'objective': 'binary', 'max_bin': 700, 'learning_rate': 0.0175, 'num_leaves': 80 } lgb_train = lgb.Dataset(X, y, categorical_feature = ['part', 'prior_question_had_explanation_enc']) lgb_eval = lgb.Dataset(X_val, y_val, categorical_feature = ['part', 'prior_question_had_explanation_enc'], reference=lgb_train )<train_model>
def drop(train, test, bye = ['PassengerId']): for i in [train, test]: for z in bye: del i[z] return train, test
Titanic - Machine Learning from Disaster
9,420,601
model = lgb.train( params, lgb_train, valid_sets=[lgb_train, lgb_eval], verbose_eval=50, num_boost_round=10000, early_stopping_rounds=12 )<compute_train_metric>
train = train = pd.read_csv('.. /input/titanic/train.csv') test = pd.read_csv('.. /input/titanic/test.csv') train, test = names(train, test) train, test = age_impute(train, test) train, test = cabin_num(train, test) train, test = cabin(train, test) train, test = embarked_impute(train, test) train, test = fam_size(train, test) test['Fare'].fillna(train['Fare'].mean() , inplace = True) train, test = ticket_grouped(train, test) train, test = dummies(train, test, columns = ['Pclass', 'Sex', 'Embarked', 'Ticket_Lett', 'Cabin_Letter', 'Name_Title', 'Fam_Size']) train, test = drop(train, test )
Titanic - Machine Learning from Disaster
9,420,601
y_pred = model.predict(X_val) y_true = np.array(y_val) roc_auc_score(y_true, y_pred )<import_modules>
rf = RandomForestClassifier(criterion='gini', n_estimators=700, min_samples_split=10, min_samples_leaf=1, max_features='auto', oob_score=True, random_state=1, n_jobs=-1) rf.fit(train.iloc[:, 1:], train.iloc[:, 0]) print("%.4f" % rf.oob_score_ )
Titanic - Machine Learning from Disaster
9,420,601
import matplotlib.pyplot as plt import seaborn as sns<split>
round(np.mean(rf.oob_score_)*100, 2 )
Titanic - Machine Learning from Disaster
9,420,601
iter_test = env.iter_test()<merge>
pd.concat(( pd.DataFrame(train.iloc[:, 1:].columns, columns = ['variable']), pd.DataFrame(rf.feature_importances_, columns = ['importance'])) , axis = 1 ).sort_values(by='importance', ascending = False)[:20]
Titanic - Machine Learning from Disaster
9,420,601
for(test_df, sample_prediction_df)in iter_test: test_df = pd.merge(test_df, questions_df, left_on = 'content_id', right_on = 'question_id', how = 'left') test_df = pd.merge(test_df, results_u_final, on=['user_id'], how="left") test_df = pd.merge(test_df, results_c_final, on=['content_id'], how="left") test_df['answered_correctly_user'].fillna(0.5, inplace=True) test_df['answered_correctly_content'].fillna(0.5, inplace=True) test_df['part'] = test_df.part - 1 test_df['part'].fillna(4, inplace = True) test_df['sum'].fillna(0, inplace=True) test_df['count'].fillna(0, inplace=True) test_df['prior_question_elapsed_time'].fillna(elapsed_mean, inplace = True) test_df['prior_question_had_explanation'].fillna(False, inplace=True) test_df["prior_question_had_explanation_enc"] = lb_make.fit_transform(test_df["prior_question_had_explanation"]) test_df['answered_correctly'] = model.predict(test_df[['answered_correctly_user', 'answered_correctly_content', 'sum', 'count', 'prior_question_elapsed_time','prior_question_had_explanation_enc', 'part']]) env.predict(test_df.loc[test_df['content_type_id'] == 0, ['row_id', 'answered_correctly']] )<define_variables>
predictions = rf.predict(test) predictions = pd.DataFrame(predictions, columns=['Survived']) test = pd.read_csv(os.path.join('.. /input/titanic/test.csv')) predictions = pd.concat(( test.iloc[:, 0], predictions), axis = 1) predictions.to_csv('vysledok.csv', sep=",", index = False )
Titanic - Machine Learning from Disaster
9,746,662
DEVICE = "TPU" CFG = dict( net_count = 7, batch_size = 8, read_size = 256, crop_size = 250, net_size = 224, LR_START = 0.000005, LR_MAX = 0.000020, LR_MIN = 0.000001, LR_RAMPUP_EPOCHS = 5, LR_SUSTAIN_EPOCHS = 0, LR_EXP_DECAY = 0.8, epochs = 20, rot = 180.0, shr = 2.0, hzoom = 8.0, wzoom = 8.0, hshift = 8.0, wshift = 8.0, optimizer = 'adam', label_smooth_fac = 0.05, tta_steps = 25 )<install_modules>
filterwarnings('ignore') pd.set_option('display.max_columns', None)
Titanic - Machine Learning from Disaster
9,746,662
!pip install -q efficientnet<set_options>
encoder=OrdinalEncoder() imputer=KNN() def encode(data): nonulls = np.array(data.dropna()) impute_reshape = nonulls.reshape(-1,1) impute_ordinal = encoder.fit_transform(impute_reshape) data.loc[data.notnull() ] = np.squeeze(impute_ordinal) return data
Titanic - Machine Learning from Disaster
9,746,662
random.seed(a=42) <load_from_csv>
Ktrain = pd.read_csv("/kaggle/input/titanic/train.csv") Ktest = pd.read_csv("/kaggle/input/titanic/test.csv") Kgender = pd.read_csv("/kaggle/input/titanic/gender_submission.csv" )
Titanic - Machine Learning from Disaster
9,746,662
BASEPATH = ".. /input/siim-isic-melanoma-classification" df_train = pd.read_csv(os.path.join(BASEPATH, 'train.csv')) df_test = pd.read_csv(os.path.join(BASEPATH, 'test.csv')) df_sub = pd.read_csv(os.path.join(BASEPATH, 'sample_submission.csv')) GCS_PATH = KaggleDatasets().get_gcs_path('melanoma-256x256') files_train = np.sort(np.array(tf.io.gfile.glob(GCS_PATH + '/train*.tfrec'))) files_test = np.sort(np.array(tf.io.gfile.glob(GCS_PATH + '/test*.tfrec')) )<choose_model_class>
for i in Ktrain: print(i,Ktrain[i].nunique() )
Titanic - Machine Learning from Disaster
9,746,662
if DEVICE == "TPU": print("connecting to TPU...") try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() print('Running on TPU ', tpu.master()) except ValueError: print("Could not connect to TPU") tpu = None if tpu: try: print("initializing TPU...") tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) print("TPU initialized") except _: print("failed to initialize TPU") else: DEVICE = "GPU" if DEVICE != "TPU": print("Using default strategy for CPU and single GPU") strategy = tf.distribute.get_strategy() if DEVICE == "GPU": print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU'))) AUTO = tf.data.experimental.AUTOTUNE REPLICAS = strategy.num_replicas_in_sync print(f'REPLICAS: {REPLICAS}' )<normalization>
print(Ktrain.isnull().sum() /len(Ktrain)) Ktest.isnull().sum() /len(Ktest )
Titanic - Machine Learning from Disaster
9,746,662
def get_mat(rotation, shear, height_zoom, width_zoom, height_shift, width_shift): rotation = math.pi * rotation / 180. shear = math.pi * shear / 180. def get_3x3_mat(lst): return tf.reshape(tf.concat([lst],axis=0), [3,3]) c1 = tf.math.cos(rotation) s1 = tf.math.sin(rotation) one = tf.constant([1],dtype='float32') zero = tf.constant([0],dtype='float32') rotation_matrix = get_3x3_mat([c1, s1, zero, -s1, c1, zero, zero, zero, one]) c2 = tf.math.cos(shear) s2 = tf.math.sin(shear) shear_matrix = get_3x3_mat([one, s2, zero, zero, c2, zero, zero, zero, one]) zoom_matrix = get_3x3_mat([one/height_zoom, zero, zero, zero, one/width_zoom, zero, zero, zero, one]) shift_matrix = get_3x3_mat([one, zero, height_shift, zero, one, width_shift, zero, zero, one]) return K.dot(K.dot(rotation_matrix, shear_matrix), K.dot(zoom_matrix, shift_matrix)) def transform(image, cfg): DIM = cfg["read_size"] XDIM = DIM%2 rot = cfg['rot'] * tf.random.normal([1], dtype='float32') shr = cfg['shr'] * tf.random.normal([1], dtype='float32') h_zoom = 1.0 + tf.random.normal([1], dtype='float32')/ cfg['hzoom'] w_zoom = 1.0 + tf.random.normal([1], dtype='float32')/ cfg['wzoom'] h_shift = cfg['hshift'] * tf.random.normal([1], dtype='float32') w_shift = cfg['wshift'] * tf.random.normal([1], dtype='float32') m = get_mat(rot,shr,h_zoom,w_zoom,h_shift,w_shift) x = tf.repeat(tf.range(DIM//2, -DIM//2,-1), DIM) y = tf.tile(tf.range(-DIM//2, DIM//2), [DIM]) z = tf.ones([DIM*DIM], dtype='int32') idx = tf.stack([x,y,z]) idx2 = K.dot(m, tf.cast(idx, dtype='float32')) idx2 = K.cast(idx2, dtype='int32') idx2 = K.clip(idx2, -DIM//2+XDIM+1, DIM//2) idx3 = tf.stack([DIM//2-idx2[0,], DIM//2-1+idx2[1,]]) d = tf.gather_nd(image, tf.transpose(idx3)) return tf.reshape(d,[DIM, DIM,3] )<prepare_x_and_y>
survive=Ktrain.loc[Ktrain["Survived"]==1] loss = Ktrain.loc[Ktrain["Survived"]==0]
Titanic - Machine Learning from Disaster
9,746,662
def read_labeled_tfrecord(example): tfrec_format = { 'image' : tf.io.FixedLenFeature([], tf.string), 'image_name' : tf.io.FixedLenFeature([], tf.string), 'patient_id' : tf.io.FixedLenFeature([], tf.int64), 'sex' : tf.io.FixedLenFeature([], tf.int64), 'age_approx' : tf.io.FixedLenFeature([], tf.int64), 'anatom_site_general_challenge': tf.io.FixedLenFeature([], tf.int64), 'diagnosis' : tf.io.FixedLenFeature([], tf.int64), 'target' : tf.io.FixedLenFeature([], tf.int64) } example = tf.io.parse_single_example(example, tfrec_format) return example['image'], example['target'] def read_unlabeled_tfrecord(example, return_image_name): tfrec_format = { 'image' : tf.io.FixedLenFeature([], tf.string), 'image_name' : tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, tfrec_format) return example['image'], example['image_name'] if return_image_name else 0 def prepare_image(img, cfg=None, augment=True): img = tf.image.decode_jpeg(img, channels=3) img = tf.image.resize(img, [cfg['read_size'], cfg['read_size']]) img = tf.cast(img, tf.float32)/ 255.0 if augment: img = transform(img, cfg) img = tf.image.random_crop(img, [cfg['crop_size'], cfg['crop_size'], 3]) img = tf.image.random_flip_left_right(img) img = tf.image.random_hue(img, 0.01) img = tf.image.random_saturation(img, 0.8, 1.2) img = tf.image.random_contrast(img, 0.8, 1.2) img = tf.image.random_brightness(img, 0.1) else: img = tf.image.central_crop(img, cfg['crop_size'] / cfg['read_size']) img = tf.image.resize(img, [cfg['net_size'], cfg['net_size']]) img = tf.reshape(img, [cfg['net_size'], cfg['net_size'], 3]) return img def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames] return np.sum(n )<categorify>
for i in Ktrain_cat: encode(Ktrain_cat[i]) for i in Ktest_cat: encode(Ktest_cat[i] )
Titanic - Machine Learning from Disaster
9,746,662
def get_dataset(files, cfg, augment = False, shuffle = False, repeat = False, labeled=True, return_image_names=True): ds = tf.data.TFRecordDataset(files, num_parallel_reads=AUTO) ds = ds.cache() if repeat: ds = ds.repeat() if shuffle: ds = ds.shuffle(1024*8) opt = tf.data.Options() opt.experimental_deterministic = False ds = ds.with_options(opt) if labeled: ds = ds.map(read_labeled_tfrecord, num_parallel_calls=AUTO) else: ds = ds.map(lambda example: read_unlabeled_tfrecord(example, return_image_names), num_parallel_calls=AUTO) ds = ds.map(lambda img, imgname_or_label:(prepare_image(img, augment=augment, cfg=cfg), imgname_or_label), num_parallel_calls=AUTO) ds = ds.batch(cfg['batch_size'] * REPLICAS) ds = ds.prefetch(AUTO) return ds<prepare_x_and_y>
Ktrain_=Ktrain.drop(Ktrain_cat, axis=1) Ktrain=pd.concat([Ktrain_,Ktrain_cat], axis=1) Ktrain.head() Ktest_=Ktest.drop(Ktest_cat, axis=1) Ktest=pd.concat([Ktest_,Ktest_cat], axis=1) Ktest.head()
Titanic - Machine Learning from Disaster
9,746,662
def show_dataset(thumb_size, cols, rows, ds): mosaic = PIL.Image.new(mode='RGB', size=(thumb_size*cols +(cols-1), thumb_size*rows +(rows-1))) for idx, data in enumerate(iter(ds)) : img, target_or_imgid = data ix = idx % cols iy = idx // cols img = np.clip(img.numpy() * 255, 0, 255 ).astype(np.uint8) img = PIL.Image.fromarray(img) img = img.resize(( thumb_size, thumb_size), resample=PIL.Image.BILINEAR) mosaic.paste(img,(ix*thumb_size + ix, iy*thumb_size + iy)) display(mosaic) ds = get_dataset(files_train, CFG ).unbatch().take(12*5) show_dataset(64, 12, 5, ds )<create_dataframe>
for i in Ktrain: print(i,Ktrain[i].nunique() )
Titanic - Machine Learning from Disaster
9,746,662
ds = tf.data.TFRecordDataset(files_train, num_parallel_reads=AUTO) ds = ds.take(1 ).cache().repeat() ds = ds.map(read_labeled_tfrecord, num_parallel_calls=AUTO) ds = ds.map(lambda img, target:(prepare_image(img, cfg=CFG, augment=True), target), num_parallel_calls=AUTO) ds = ds.take(12*5) ds = ds.prefetch(AUTO) show_dataset(64, 12, 5, ds )<create_dataframe>
Ktrain=Ktrain.fillna(-999) Ktest=Ktest.fillna(-999) y=Ktrain["Survived"] X=Ktrain.drop(["PassengerId","Survived"],axis=1 ).astype("float64") X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42 )
Titanic - Machine Learning from Disaster
9,746,662
ds = get_dataset(files_test, CFG, labeled=False ).unbatch().take(12*5) show_dataset(64, 12, 5, ds )<choose_model_class>
def compML(df, y, algorithm): y=df[y] X=df.drop(["PassengerId","Survived"], axis=1 ).astype('float64') X_train, X_test,y_train,y_test=train_test_split(X,y, test_size=0.25, random_state=42) model=algorithm().fit(X_train, y_train) y_pred=model.predict(X_test) accuracy= accuracy_score(y_test, y_pred) model_name= algorithm.__name__ print(model_name,": ",accuracy )
Titanic - Machine Learning from Disaster
9,746,662
def get_lr_callback(cfg): lr_start = cfg['LR_START'] lr_max = cfg['LR_MAX'] * strategy.num_replicas_in_sync lr_min = cfg['LR_MIN'] lr_ramp_ep = cfg['LR_RAMPUP_EPOCHS'] lr_sus_ep = cfg['LR_SUSTAIN_EPOCHS'] lr_decay = cfg['LR_EXP_DECAY'] def lrfn(epoch): if epoch < lr_ramp_ep: lr =(lr_max - lr_start)/ lr_ramp_ep * epoch + lr_start elif epoch < lr_ramp_ep + lr_sus_ep: lr = lr_max else: lr =(lr_max - lr_min)* lr_decay**(epoch - lr_ramp_ep - lr_sus_ep)+ lr_min return lr lr_callback = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=False) return lr_callback<choose_model_class>
models = [LogisticRegression, KNeighborsClassifier, GaussianNB, SVC, DecisionTreeClassifier, RandomForestClassifier, GradientBoostingClassifier, LGBMClassifier, XGBClassifier, ]
Titanic - Machine Learning from Disaster
9,746,662
def get_model(cfg): model_input = tf.keras.Input(shape=(cfg['net_size'], cfg['net_size'], 3), name='imgIn') dummy = tf.keras.layers.Lambda(lambda x:x )(model_input) outputs = [] for i in range(cfg['net_count']): constructor = getattr(efn, f'EfficientNetB{i}') x = constructor(include_top=False, weights='imagenet', input_shape=(cfg['net_size'], cfg['net_size'], 3), pooling='avg' )(dummy) x = tf.keras.layers.Dense(1, activation='sigmoid' )(x) outputs.append(x) model = tf.keras.Model(model_input, outputs, name='aNetwork') model.summary() return model<choose_model_class>
ids=Ktest["PassengerId"] X_Ktest=Ktest.drop(["PassengerId"], axis=1 )
Titanic - Machine Learning from Disaster
9,746,662
def compile_new_model(cfg): with strategy.scope() : model = get_model(cfg) losses = [tf.keras.losses.BinaryCrossentropy(label_smoothing = cfg['label_smooth_fac']) for i in range(cfg['net_count'])] model.compile( optimizer = cfg['optimizer'], loss = losses, metrics = [tf.keras.metrics.AUC(name='auc')]) return model<train_model>
model=RandomForestClassifier().fit(X_train, y_train) y_pred=model.predict(X_test) accuracy_score(y_test, y_pred )
Titanic - Machine Learning from Disaster
9,746,662
ds_train = get_dataset(files_train, CFG, augment=True, shuffle=True, repeat=True) ds_train = ds_train.map(lambda img, label:(img, tuple([label] * CFG['net_count']))) steps_train = count_data_items(files_train)/(CFG['batch_size'] * REPLICAS) model = compile_new_model(CFG) history = model.fit(ds_train, verbose = 1, steps_per_epoch = steps_train, epochs = CFG['epochs'], callbacks = [get_lr_callback(CFG)] )<predict_on_test>
params={ "min_samples_split":[2, 5, 10, 20], "max_features":[ 5, 7, 8,10], "n_estimators":[100, 200, 500] }
Titanic - Machine Learning from Disaster
9,746,662
CFG['batch_size'] = 256 cnt_test = count_data_items(files_test) steps = cnt_test /(CFG['batch_size'] * REPLICAS)* CFG['tta_steps'] ds_testAug = get_dataset(files_test, CFG, augment=True, repeat=True, labeled=False, return_image_names=False) probs = model.predict(ds_testAug, verbose=1, steps=steps) probs = np.stack(probs) probs = probs[:,:cnt_test * CFG['tta_steps']] probs = np.stack(np.split(probs, CFG['tta_steps'], axis=1), axis=1) probs = np.mean(probs, axis=1 )<create_dataframe>
model_cv=GridSearchCV(model, params, cv=10, n_jobs=-1, verbose=2 ).fit(X_train, y_train )
Titanic - Machine Learning from Disaster
9,746,662
ds = get_dataset(files_test, CFG, augment=False, repeat=False, labeled=False, return_image_names=True) image_names = np.array([img_name.numpy().decode("utf-8") for img, img_name in iter(ds.unbatch())] )<save_to_csv>
model_cv.best_params_
Titanic - Machine Learning from Disaster
9,746,662
for i in range(CFG["net_count"]): submission = pd.DataFrame(dict( image_name = image_names, target = probs[i,:,0])) submission = submission.sort_values('image_name') submission.to_csv(f'submission_model_{i}.csv', index=False )<save_to_csv>
model_tuned=RandomForestClassifier(min_samples_split=10 , max_features= 5, n_estimators=200 ).fit(X_train, y_train) y_pred=model_tuned.predict(X_test) accuracy_score(y_test, y_pred )
Titanic - Machine Learning from Disaster
9,746,662
submission = pd.DataFrame(dict( image_name = image_names, target = np.mean(probs[:,:,0], axis=0))) submission = submission.sort_values('image_name') submission.to_csv('submission_models_blended.csv', index=False )<set_options>
prediction= model_tuned.predict(X_Ktest )
Titanic - Machine Learning from Disaster
9,746,662
!pip install -q efficientnet warnings.filterwarnings('ignore' )<set_options>
output=pd.DataFrame({"PassengerId":ids, "Survived":prediction}) output.to_csv('submission_tuned.csv', index=False )
Titanic - Machine Learning from Disaster
8,662,763
try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() print('Running on TPU ', tpu.master()) except ValueError: tpu = None if tpu: tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) else: strategy = tf.distribute.get_strategy() print("REPLICAS: ", strategy.num_replicas_in_sync )<define_variables>
train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv')
Titanic - Machine Learning from Disaster
8,662,763
AUTO = tf.data.experimental.AUTOTUNE EPOCHS = 25 BATCH_SIZE = 16 * strategy.num_replicas_in_sync AUG_BATCH = BATCH_SIZE IMAGE_SIZE = [256, 256] SEED = 123 LR = 0.0005 if(IMAGE_SIZE[0] == 256): print('Using size 256') GCS_PATH = KaggleDatasets().get_gcs_path('melanoma-256x256') elif(IMAGE_SIZE[0] == 384): print('Using size 384') GCS_PATH = KaggleDatasets().get_gcs_path('melanoma-384x384') elif(IMAGE_SIZE[0] == 512): print('Using size 512') GCS_PATH = KaggleDatasets().get_gcs_path('melanoma-512x512') elif(IMAGE_SIZE[0] == 768): print('Using size 768') GCS_PATH = KaggleDatasets().get_gcs_path('melanoma-768x768') elif(IMAGE_SIZE[0] == 1024): print('Using size 1024') GCS_PATH = KaggleDatasets().get_gcs_path('siim-isic-melanoma-classification') TRAINING_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/train*.tfrec') TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test*.tfrec') if(IMAGE_SIZE[0] == 1024): TRAINING_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/tfrecords/train*.tfrec') TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/tfrecords/test*.tfrec') SUB = pd.read_csv('/kaggle/input/siim-isic-melanoma-classification/sample_submission.csv' )<normalization>
datasets = [train,test] for df in datasets: df['Title'] = df.Name.str.extract('([A-Za-z]+)\.', expand=False )
Titanic - Machine Learning from Disaster
8,662,763
def get_mat(rotation, shear, height_zoom, width_zoom, height_shift, width_shift): rotation = math.pi * rotation / 180. shear = math.pi * shear / 180. c1 = tf.math.cos(rotation) s1 = tf.math.sin(rotation) one = tf.constant([1],dtype='float32') zero = tf.constant([0],dtype='float32') rotation_matrix = tf.reshape(tf.concat([c1,s1,zero, -s1,c1,zero, zero,zero,one],axis=0),[3,3]) c2 = tf.math.cos(shear) s2 = tf.math.sin(shear) shear_matrix = tf.reshape(tf.concat([one,s2,zero, zero,c2,zero, zero,zero,one],axis=0),[3,3]) zoom_matrix = tf.reshape(tf.concat([one/height_zoom,zero,zero, zero,one/width_zoom,zero, zero,zero,one],axis=0),[3,3]) shift_matrix = tf.reshape(tf.concat([one,zero,height_shift, zero,one,width_shift, zero,zero,one],axis=0),[3,3]) return K.dot(K.dot(rotation_matrix, shear_matrix), K.dot(zoom_matrix, shift_matrix)) def transform(image, label): DIM = IMAGE_SIZE[0] XDIM = DIM%2 tmp = random.uniform(0, 1) if 0 < tmp <= 0.1: rot = 15.0 * tf.random.normal([1],dtype='float32') elif 0.1 < tmp <= 0.2: rot = 30.0 * tf.random.normal([1],dtype='float32') elif 0.2 < tmp <= 0.3: rot = 45.0 * tf.random.normal([1],dtype='float32') elif 0.3 < tmp <= 0.4: rot = 60.0 * tf.random.normal([1],dtype='float32') elif 0.4 < tmp <= 0.5: rot = 75.0 * tf.random.normal([1],dtype='float32') elif 0.5 < tmp <= 0.6: rot = 90.0 * tf.random.normal([1],dtype='float32') elif 0.6 < tmp <= 0.7: rot = 110.0 * tf.random.normal([1],dtype='float32') elif 0.7 < tmp <= 0.8: rot = 130.0 * tf.random.normal([1],dtype='float32') elif 0.8 < tmp <= 0.9: rot = 150.0 * tf.random.normal([1],dtype='float32') elif 0.9 < tmp <= 1.0: rot = 180.0 * tf.random.normal([1],dtype='float32') shr = 5.* tf.random.normal([1],dtype='float32') h_zoom = 1.0 + tf.random.normal([1],dtype='float32')/10. w_zoom = 1.0 + tf.random.normal([1],dtype='float32')/10. h_shift = 16.* tf.random.normal([1],dtype='float32') w_shift = 16.* tf.random.normal([1],dtype='float32') m = get_mat(rot,shr,h_zoom,w_zoom,h_shift,w_shift) x = tf.repeat(tf.range(DIM//2,-DIM//2,-1), DIM) y = tf.tile(tf.range(-DIM//2,DIM//2),[DIM]) z = tf.ones([DIM*DIM],dtype='int32') idx = tf.stack([x,y,z]) idx2 = K.dot(m,tf.cast(idx,dtype='float32')) idx2 = K.cast(idx2,dtype='int32') idx2 = K.clip(idx2,-DIM//2+XDIM+1,DIM//2) idx3 = tf.stack([DIM//2-idx2[0,], DIM//2-1+idx2[1,]]) d = tf.gather_nd(image['inp1'],tf.transpose(idx3)) return {'inp1': tf.reshape(d,[DIM,DIM,3]), 'inp2': image['inp2']}, label def seed_everything(seed): random.seed(seed) np.random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) tf.random.set_seed(seed) def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "target": tf.io.FixedLenFeature([], tf.int64), "age_approx": tf.io.FixedLenFeature([], tf.int64), "sex": tf.io.FixedLenFeature([], tf.int64), "anatom_site_general_challenge": tf.io.FixedLenFeature([], tf.int64) } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = tf.cast(example['target'], tf.float32) data = {} data['age_approx'] = tf.cast(example['age_approx'], tf.int32) data['sex'] = tf.cast(example['sex'], tf.int32) data['anatom_site_general_challenge'] = tf.cast(tf.one_hot(example['anatom_site_general_challenge'], 7), tf.int32) return image, label, data def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "image_name": tf.io.FixedLenFeature([], tf.string), "age_approx": tf.io.FixedLenFeature([], tf.int64), "sex": tf.io.FixedLenFeature([], tf.int64), "anatom_site_general_challenge": tf.io.FixedLenFeature([], tf.int64) } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) image_name = example['image_name'] data = {} data['age_approx'] = tf.cast(example['age_approx'], tf.int32) data['sex'] = tf.cast(example['sex'], tf.int32) data['anatom_site_general_challenge'] = tf.cast(tf.one_hot(example['anatom_site_general_challenge'], 7), tf.int32) return image, image_name, data def load_dataset(filenames, labeled = True, ordered = False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads = AUTO) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls = AUTO) return dataset def setup_input1(image, label, data): anatom = [tf.cast(data['anatom_site_general_challenge'][i], dtype = tf.float32)for i in range(7)] tab_data = [tf.cast(data[tfeat], dtype = tf.float32)for tfeat in ['age_approx', 'sex']] tabular = tf.stack(tab_data + anatom) return {'inp1': image, 'inp2': tabular}, label def setup_input2(image, image_name, data): anatom = [tf.cast(data['anatom_site_general_challenge'][i], dtype = tf.float32)for i in range(7)] tab_data = [tf.cast(data[tfeat], dtype = tf.float32)for tfeat in ['age_approx', 'sex']] tabular = tf.stack(tab_data + anatom) return {'inp1': image, 'inp2': tabular}, image_name def setup_input3(image, image_name, target, data): anatom = [tf.cast(data['anatom_site_general_challenge'][i], dtype = tf.float32)for i in range(7)] tab_data = [tf.cast(data[tfeat], dtype = tf.float32)for tfeat in ['age_approx', 'sex']] tabular = tf.stack(tab_data + anatom) return {'inp1': image, 'inp2': tabular}, image_name, target def data_augment(data, label): data['inp1'] = tf.image.random_flip_left_right(data['inp1']) data['inp1'] = tf.image.random_flip_up_down(data['inp1']) data['inp1'] = tf.image.random_hue(data['inp1'], 0.01) data['inp1'] = tf.image.random_saturation(data['inp1'], 0.8, 1.2) data['inp1'] = tf.image.random_contrast(data['inp1'], 0.8, 1.2) data['inp1'] = tf.image.random_brightness(data['inp1'], 0.1) return data, label def get_training_dataset(filenames, labeled = True, ordered = False): dataset = load_dataset(filenames, labeled = labeled, ordered = ordered) dataset = dataset.map(setup_input1, num_parallel_calls = AUTO) dataset = dataset.map(data_augment, num_parallel_calls = AUTO) dataset = dataset.map(transform, num_parallel_calls = AUTO) dataset = dataset.repeat() dataset = dataset.shuffle(2048) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def get_validation_dataset(filenames, labeled = True, ordered = True): dataset = load_dataset(filenames, labeled = labeled, ordered = ordered) dataset = dataset.map(setup_input1, num_parallel_calls = AUTO) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def get_test_dataset(filenames, labeled = False, ordered = True): dataset = load_dataset(filenames, labeled = labeled, ordered = ordered) dataset = dataset.map(setup_input2, num_parallel_calls = AUTO) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames] return np.sum(n) def read_tfrecord_full(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "image_name": tf.io.FixedLenFeature([], tf.string), "target": tf.io.FixedLenFeature([], tf.int64), "age_approx": tf.io.FixedLenFeature([], tf.int64), "sex": tf.io.FixedLenFeature([], tf.int64), "anatom_site_general_challenge": tf.io.FixedLenFeature([], tf.int64) } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) image_name = example['image_name'] target = tf.cast(example['target'], tf.float32) data = {} data['age_approx'] = tf.cast(example['age_approx'], tf.int32) data['sex'] = tf.cast(example['sex'], tf.int32) data['anatom_site_general_challenge'] = tf.cast(tf.one_hot(example['anatom_site_general_challenge'], 7), tf.int32) return image, image_name, target, data def load_dataset_full(filenames): dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads = AUTO) dataset = dataset.map(read_tfrecord_full, num_parallel_calls = AUTO) return dataset def get_data_full(filenames): dataset = load_dataset_full(filenames) dataset = dataset.map(setup_input3, num_parallel_calls = AUTO) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset NUM_TRAINING_IMAGES = int(count_data_items(TRAINING_FILENAMES)* 0.8) NUM_VALIDATION_IMAGES = int(count_data_items(TRAINING_FILENAMES)* 0.2) NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES) STEPS_PER_EPOCH = NUM_TRAINING_IMAGES // BATCH_SIZE print('Dataset: {} training images, {} validation images, {} unlabeled test images'.format(NUM_TRAINING_IMAGES, NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES))<compute_train_metric>
for df in datasets: df['hasCabin'] = np.where(pd.isnull(df['Cabin']),0,1) df.loc[pd.isnull(df['Embarked']),'Embarked'] = 'None' df.drop(['Name','Ticket','Cabin'],axis=1,inplace=True) train.head()
Titanic - Machine Learning from Disaster
8,662,763
%%time def binary_focal_loss(gamma=2., alpha=.25): def binary_focal_loss_fixed(y_true, y_pred): pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred)) pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred)) epsilon = K.epsilon() pt_1 = K.clip(pt_1, epsilon, 1.- epsilon) pt_0 = K.clip(pt_0, epsilon, 1.- epsilon) return -K.sum(alpha * K.pow(1.- pt_1, gamma)* K.log(pt_1)) \ -K.sum(( 1 - alpha)* K.pow(pt_0, gamma)* K.log(1.- pt_0)) return binary_focal_loss_fixed def get_model() : with strategy.scope() : inp1 = tf.keras.layers.Input(shape =(*IMAGE_SIZE, 3), name = 'inp1') inp2 = tf.keras.layers.Input(shape =(9), name = 'inp2') efnetb6 = efn.EfficientNetB6(weights = 'imagenet', include_top = False) x6 = efnetb6(inp1) x6 = tf.keras.layers.GlobalAveragePooling2D()(x6) x1 = tf.keras.layers.Dense(32 )(inp2) x1 = tf.keras.layers.BatchNormalization()(x1) x1 = tf.keras.layers.Activation('relu' )(x1) concat = tf.keras.layers.concatenate([x6, x1]) concat = tf.keras.layers.Dense(256, activation = 'relu' )(concat) concat = tf.keras.layers.BatchNormalization()(concat) concat = tf.keras.layers.Dropout(0.2 )(concat) concat = tf.keras.layers.Dense(64, activation = 'relu' )(concat) concat = tf.keras.layers.BatchNormalization()(concat) concat = tf.keras.layers.Dropout(0.2 )(concat) output = tf.keras.layers.Dense(1, activation = 'sigmoid' )(concat) model = tf.keras.models.Model(inputs = [inp1, inp2], outputs = [output]) opt = tf.keras.optimizers.Adam(learning_rate = LR) model.compile( optimizer = opt, loss = [tf.keras.losses.BinaryCrossentropy(label_smoothing = 0.05)], metrics = [tf.keras.metrics.BinaryAccuracy() , tf.keras.metrics.AUC() ] ) print(model.summary()) return model def train_and_predict(SUB, folds = 5): models = [] oof_image_name = [] oof_target = [] oof_prediction = [] seed_everything(SEED) kfold = KFold(folds, shuffle = True, random_state = SEED) for fold,(trn_ind, val_ind)in enumerate(kfold.split(TRAINING_FILENAMES)) : tic = time.time() print(' ') print('-'*50) print(f'Training fold {fold}') print('-'*50) print(' ') train_dataset = get_training_dataset([TRAINING_FILENAMES[x] for x in trn_ind], labeled = True, ordered = False) val_dataset = get_validation_dataset([TRAINING_FILENAMES[x] for x in val_ind], labeled = True, ordered = True) K.clear_session() model = get_model() early_stopping = tf.keras.callbacks.EarlyStopping(monitor = 'val_auc', mode = 'max', patience = 10, verbose = 1, min_delta = 0.0001, restore_best_weights = True) cb_lr_schedule = tf.keras.callbacks.ReduceLROnPlateau(monitor = 'val_auc', factor = 0.5, patience = 2, verbose = 1, min_delta = 0.0001, mode = 'max') history = model.fit(train_dataset, steps_per_epoch = STEPS_PER_EPOCH, epochs = EPOCHS, callbacks = [early_stopping, cb_lr_schedule], validation_data = val_dataset, verbose = 2) models.append(model) model.save_weights(f'ENet_fold_{fold}.h5') print('Weights saved!') number_of_files = count_data_items([TRAINING_FILENAMES[x] for x in val_ind]) dataset = get_data_full([TRAINING_FILENAMES[x] for x in val_ind]) image_name = dataset.map(lambda image, image_name, target: image_name ).unbatch() image_name = next(iter(image_name.batch(number_of_files)) ).numpy().astype('U') target = dataset.map(lambda image, image_name, target: target ).unbatch() target = next(iter(target.batch(number_of_files)) ).numpy() image = dataset.map(lambda image, image_name, target: image) probabilities = model.predict(image) oof_image_name.extend(list(image_name)) oof_target.extend(list(target)) oof_prediction.extend(list(np.concatenate(probabilities))) toc = time.time() print(' ') print('Time taken:', toc-tic) print(' ') print('-'*50) oof_df = pd.DataFrame({'image_name': oof_image_name, 'target': oof_target, 'predictions': oof_prediction}) oof_df.to_csv('oof.csv', index = False) test_ds = get_test_dataset(TEST_FILENAMES, labeled = False, ordered = True) test_images_ds = test_ds.map(lambda image, image_name: image) print('Computing predictions...') probabilities = np.average([np.concatenate(models[i].predict(test_images_ds)) for i in range(folds)], axis = 0) print('Generating submission file...') test_ids_ds = test_ds.map(lambda image, image_name: image_name ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') pred_df = pd.DataFrame({'image_name': test_ids, 'target': probabilities}) SUB.drop('target', inplace = True, axis = 1) SUB = SUB.merge(pred_df, on = 'image_name') SUB.to_csv(f'submission.csv', index = False) return oof_target, oof_prediction oof_target, oof_prediction = train_and_predict(SUB )<compute_test_metric>
SEED = 1 np.random.seed(SEED) le = dict() le['Sex'] = LabelEncoder() le['Sex'].fit(train.Sex) le['Embarked'] = LabelEncoder() le['Embarked'].fit(train.Embarked) le['Title'] = LabelEncoder() le['Title'].fit(pd.concat([train.Title, test.Title], axis=0)) for df in datasets: df['Sex'] = le['Sex'].transform(df['Sex']) df['Embarked'] = le['Embarked'].transform(df['Embarked']) df['Title'] = le['Title'].transform(df['Title']) train.head()
Titanic - Machine Learning from Disaster
8,662,763
roc_auc = metrics.roc_auc_score(oof_target, oof_prediction) print('Our out of folds roc auc score is: ', roc_auc )<import_modules>
for df in datasets: df.loc[pd.isnull(df['Age']), 'Age'] = df['Age'].mean() for df in datasets: df.loc[:,'Age'] = np.round(df['Age'] )
Titanic - Machine Learning from Disaster
8,662,763
!pip install -q efficientnet <set_options>
for df in datasets: df.loc[pd.isnull(df['Fare']),'Fare'] = df['Fare'].mean()
Titanic - Machine Learning from Disaster
8,662,763
try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() print('Running on TPU ', tpu.master()) except ValueError: tpu = None if tpu: tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) else: strategy = tf.distribute.get_strategy() print("REPLICAS: ", strategy.num_replicas_in_sync )<load_from_csv>
x_train0 = train.drop(['PassengerId','Survived'],axis=1) y_train0 = train['Survived'] x_test0 = test.drop(['PassengerId'],axis=1) sc = StandardScaler() x_train = sc.fit_transform(x_train0) x_test = sc.fit_transform(x_test0) y_train = y_train0.values.astype('float32')
Titanic - Machine Learning from Disaster
8,662,763
AUTO = tf.data.experimental.AUTOTUNE GCS_PATH = KaggleDatasets().get_gcs_path('melanoma-512x512') EPOCHS = 40 BATCH_SIZE = 16 * strategy.num_replicas_in_sync AUG_BATCH = BATCH_SIZE IMAGE_SIZE = [512, 512] SEED = 123 LR = 0.0003 cutmix_rate = 0.3 gridmask_rate = 0 TRAINING_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/train*.tfrec') TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test*.tfrec') SUB = pd.read_csv('/kaggle/input/siim-isic-melanoma-classification/sample_submission.csv' )<normalization>
from keras import models from keras import layers from keras import optimizers
Titanic - Machine Learning from Disaster