kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
5,213,367
def get_df() : base_image_dir = os.path.join('.. ', 'input/aptos2019-blindness-detection/') train_dir = os.path.join(base_image_dir,'train_images/') df = pd.read_csv(os.path.join(base_image_dir, 'train.csv')) df['path'] = df['id_code'].map(lambda x: os.path.join(train_dir,'{}.png'.format(x))) df = df.drop(columns=['id_code']) df = df.sample(frac=1 ).reset_index(drop=True) test_df = pd.read_csv('.. /input/aptos2019-blindness-detection/sample_submission.csv') return df, test_df df, test_df = get_df() bs = 16 sz = 380 <import_modules>
df = df.set_index('Age' ).reset_index() df.head(2 )
Titanic - Machine Learning from Disaster
5,213,367
from fastai import * from fastai.vision import * from torchvision.models import * import torch import torch.optim as optim from fastai.vision.models import * from fastai.vision.learner import model_meta from fastai.callbacks import *<init_hyperparams>
merged['Age'] = merged.groupby(['Title', 'Pclass'])['Age'].transform(lambda x: x.fillna(x.median()))
Titanic - Machine Learning from Disaster
5,213,367
aptos19_stats =([0.42, 0.22, 0.075], [0.27, 0.15, 0.081]) tsfm1 = get_transforms(do_flip=False, flip_vert=False, max_rotate=0., max_zoom=0., max_warp=0.0, max_lighting=0.,p_affine=0.) data =(ImageList.from_df(df=df,path='./',cols='path') .split_by_rand_pct(0.1) .label_from_df(cols='diagnosis',label_cls=FloatList) .transform(tsfm1,size=380,) .databunch(bs=bs,num_workers=4) .normalize(imagenet_stats) )<define_variables>
merged.loc[:, ['Pclass', 'Sex', 'Embarked', 'Cabin', 'Title', 'Family_size', 'Ticket']] = merged.loc[:, ['Pclass', 'Sex', 'Embarked', 'Cabin', 'Title', 'Family_size', 'Ticket']].astype('category') merged['Survived'] = merged['Survived'].dropna().astype('int' )
Titanic - Machine Learning from Disaster
5,213,367
data.show_batch(3,(5,5))<load_pretrained>
merged.drop(columns = ['Name', 'Age','SibSp', 'Parch','Fare'], inplace = True, axis = 1) merged.columns
Titanic - Machine Learning from Disaster
5,213,367
md_ef = EfficientNet.from_pretrained('efficientnet-b5', num_classes=1 )<compute_test_metric>
merged = pd.get_dummies(merged, drop_first=True) merged.head(2 )
Titanic - Machine Learning from Disaster
5,213,367
def quadratic_kappa(y_hat, y): return torch.tensor(cohen_kappa_score(torch.round(y_hat), y, weights='quadratic'),device='cuda:0' )<load_pretrained>
train = merged.iloc[:891, :] test = merged.iloc[891:, :]
Titanic - Machine Learning from Disaster
5,213,367
learn = Learner(data, md_ef, metrics = [quadratic_kappa] ,callback_fns=[BnFreeze,partial(SaveModelCallback, monitor='quadratic_kappa', name='best_accuracy')] ).to_fp16() learn.data.add_test(ImageList.from_df(test_df, '.. /input/aptos2019-blindness-detection', folder='test_images', suffix='.png')) <define_variables>
train = train.drop(columns = ['PassengerId'], axis = 1) test = test.drop(columns = ['Survived'], axis = 1 )
Titanic - Machine Learning from Disaster
5,213,367
learn.model_dir="/kaggle/working/models"<load_pretrained>
X_train = train.drop(columns = ['Survived'], axis = 1) y_train = train['Survived'] X_test = test.drop("PassengerId", axis = 1 ).copy()
Titanic - Machine Learning from Disaster
5,213,367
learn.load('modelk');<find_best_params>
LR = LogisticRegression() KNN = KNeighborsClassifier() DT = DecisionTreeClassifier() RF = RandomForestClassifier() SVM = SVC(gamma='auto') XGB = XGBClassifier(n_jobs=-1, random_state=42 )
Titanic - Machine Learning from Disaster
5,213,367
learn.freeze_to(-1 )<train_model>
def train_accuracy(model): model.fit(X_train, y_train) train_accuracy = model.score(X_train, y_train) train_accuracy = np.round(train_accuracy*100,2) return train_accuracy
Titanic - Machine Learning from Disaster
5,213,367
learn.fit_one_cycle(5,3e-04 )<save_model>
train_accuracy = pd.DataFrame({'Training accuracy %':[train_accuracy(LR),train_accuracy(KNN),train_accuracy(DT),train_accuracy(RF),train_accuracy(SVM), train_accuracy(XGB)]}) train_accuracy.index = ['Logistic Regression','KNN','Decision Tree','Random Forest','SVM','XGB'] sorted_train_accuracy = train_accuracy.sort_values(by='Training accuracy %', ascending = False) sorted_train_accuracy
Titanic - Machine Learning from Disaster
5,213,367
learn.load('best_accuracy') learn.save('stage1' )<train_model>
def val_score(model): val_score = cross_val_score(model, X_train, y_train, cv = 10, scoring = 'accuracy' ).mean() val_score = np.round(val_score*100, 2) return val_score val_score = pd.DataFrame({'val_score(%)':[val_score(LR), val_score(KNN), val_score(DT), val_score(RF), val_score(SVM), val_score(XGB)]}) val_score.index = ['Logistic Regression', 'KNN','Decision Tree', 'Random Forest', 'SVC','XGB'] sorted_val_score = val_score.sort_values(by = 'val_score(%)', ascending = False) sorted_val_score
Titanic - Machine Learning from Disaster
5,213,367
learn.fit_one_cycle(10, 7.59E-07 )<import_modules>
lr_params = {'penalty':['l1', 'l2'], 'C': np.logspace(0, 2, 4, 8 ,10)} knn_params = {'n_neighbors':[4,5,6,7,8,9,10], 'weights':['uniform', 'distance'], 'algorithm':['auto', 'ball_tree','kd_tree','brute'], 'p':[1,2]} dt_params = {'max_features': ['auto', 'sqrt', 'log2'], 'min_samples_split': [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 'min_samples_leaf':[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], 'random_state':[46]} rf_params = {'criterion':['gini','entropy'], 'n_estimators':[ 10, 30, 200, 400], 'min_samples_leaf':[1, 2, 3], 'min_samples_split':[3, 4, 6, 7], 'max_features':['sqrt', 'auto', 'log2'], 'random_state':[46]} svc_params = {'C': [0.1, 1, 10,100], 'kernel': ['linear', 'rbf', 'poly', 'sigmoid'], 'gamma': [ 1, 0.1, 0.001, 0.0001]} xgb_params = xgb_params_grid = {"learning_rate" : [0.05, 0.10, 0.15, 0.20, 0.25, 0.30 ] , "max_depth" : [ 3, 4, 5, 6, 8, 10, 12, 15], "min_child_weight" : [ 1, 3, 5, 7 ], "gamma" : [ 0.0, 0.1, 0.2 , 0.3, 0.4 ], "colsample_bytree" : [ 0.3, 0.4, 0.5 , 0.7 ] }
Titanic - Machine Learning from Disaster
5,213,367
import scipy as sp from sklearn import metrics<compute_test_metric>
def tune_hyperparameters(model, param_grid): global best_params, best_scores grid = GridSearchCV(model,param_grid, verbose=0, cv=10, scoring='accuracy', n_jobs=-1) grid.fit(X_train, y_train) best_params, best_scores = grid.best_params_, np.round(grid.best_score_*100,2) return best_params, best_scores
Titanic - Machine Learning from Disaster
5,213,367
class OptimizedRounder(object): def __init__(self): self.coef_ = 0 def _kappa_loss(self, coef, X, y): X_p = np.copy(X) for i, pred in enumerate(X_p): if pred < coef[0]: X_p[i] = 0 elif pred >= coef[0] and pred < coef[1]: X_p[i] = 1 elif pred >= coef[1] and pred < coef[2]: X_p[i] = 2 elif pred >= coef[2] and pred < coef[3]: X_p[i] = 3 else: X_p[i] = 4 ll = metrics.cohen_kappa_score(y, X_p, weights='quadratic') return -ll def fit(self, X, y): loss_partial = partial(self._kappa_loss, X=X, y=y) initial_coef = [0.5, 1.5, 2.5, 3.5] self.coef_ = sp.optimize.minimize(loss_partial, initial_coef, method='nelder-mead') print(-loss_partial(self.coef_['x'])) def predict(self, X, coef): X_p = np.copy(X) for i, pred in enumerate(X_p): if pred < coef[0]: X_p[i] = 0 elif pred >= coef[0] and pred < coef[1]: X_p[i] = 1 elif pred >= coef[1] and pred < coef[2]: X_p[i] = 2 elif pred >= coef[2] and pred < coef[3]: X_p[i] = 3 else: X_p[i] = 4 return X_p def coefficients(self): return self.coef_['x']<load_pretrained>
tune_hyperparameters(LR, param_grid=lr_params) lr_best_params, lr_best_score = best_params,best_scores print('Logistic Regression Best Score:', lr_best_score) print('And Best Parameters:', lr_best_params )
Titanic - Machine Learning from Disaster
5,213,367
learn.load('best_accuracy');<predict_on_test>
tune_hyperparameters(KNN, param_grid=knn_params) knn_best_params, knn_best_score = best_params,best_scores print('KNN Best Score:', knn_best_score) print('And Best Parameters:', knn_best_params )
Titanic - Machine Learning from Disaster
5,213,367
valid_preds = learn.get_preds(ds_type=DatasetType.Valid )<train_model>
tune_hyperparameters(DT, param_grid=dt_params) dt_best_params, dt_best_score = best_params,best_scores print('DT Best Score:', dt_best_score) print('And Best Parameters:', dt_best_params )
Titanic - Machine Learning from Disaster
5,213,367
optR = OptimizedRounder() optR.fit(valid_preds[0],valid_preds[1]) coefficients = optR.coefficients() coefficients=np.around(( coefficients), decimals=2) print(coefficients )<predict_on_test>
tune_hyperparameters(RF, param_grid=rf_params) rf_best_params, rf_best_score = best_params,best_scores print('RF Best Score:', rf_best_score) print('And Best Parameters:', rf_best_params )
Titanic - Machine Learning from Disaster
5,213,367
preds,y = learn.get_preds(DatasetType.Test )<save_to_csv>
tune_hyperparameters(XGB, param_grid=xgb_params) xgb_best_params, xgb_best_score = best_params,best_scores print('SVC Best Score:', xgb_best_score) print('And Best Parameters:', xgb_best_params )
Titanic - Machine Learning from Disaster
5,213,367
tst_pred = optR.predict(preds, coefficients) test_df.diagnosis = tst_pred.astype(int) test_df.to_csv('submission.csv',index=False )<count_values>
tune_hyperparameters(SVM, param_grid=svc_params) svc_best_params, svc_best_score = best_params,best_scores print('SVC Best Score:', svc_best_score) print('And Best Parameters:', svc_best_params )
Titanic - Machine Learning from Disaster
5,213,367
test_df['diagnosis'].value_counts()<define_variables>
lr = LogisticRegression(**lr_best_params) knn = KNeighborsClassifier(**knn_best_params) dt = DecisionTreeClassifier(**dt_best_params) rf = RandomForestClassifier(**rf_best_params) svc = SVC(**svc_best_params) xgb = XGBClassifier(**xgb_best_params )
Titanic - Machine Learning from Disaster
5,213,367
data.show_batch()<import_modules>
models = {'LR':lr,'KNN':knn,'DT':dt,'RF':rf,'SVC':svc, 'XGB':xgb} score = [] for x,(keys, items)in enumerate(models.items()): items.fit(X_train,y_train) scores = cross_val_score(items, X_train, y_train, cv = 10, scoring='accuracy')*100 score.append(scores.mean()) print('Mean Accuracy: %0.4f(+/- %0.4f)[%s]' %(scores.mean() , scores.std() , keys))
Titanic - Machine Learning from Disaster
5,213,367
from fastai.vision import *<feature_engineering>
model_prediction = pd.DataFrame({'LR':lr.predict(X_test), 'KNN':knn.predict(X_test), 'DT':dt.predict(X_test),'RF':rf.predict(X_test), 'SVC':svc.predict(X_test), 'XGB':xgb.predict(X_test)}) model_prediction.head()
Titanic - Machine Learning from Disaster
5,213,367
vision.data.open_image=temp<set_options>
submisson = pd.DataFrame({"PassengerID":test["PassengerId"],"Survived":rf.predict(X_test)}) submisson.to_csv('submisson_rf.csv',index=False )
Titanic - Machine Learning from Disaster
5,213,367
%reload_ext autoreload %autoreload 2 %matplotlib inline<import_modules>
submission = pd.DataFrame({'PassengerId':test['PassengerId'],'Survived':svc.predict(X_test)}) submission.to_csv("submission_svc.csv", index = False )
Titanic - Machine Learning from Disaster
5,213,367
<define_variables><EOS>
submission = pd.DataFrame({'PassengerId':test['PassengerId'],'Survived':xgb.predict(X_test)}) submission.to_csv("submission_xgb.csv", index = False )
Titanic - Machine Learning from Disaster
5,043,030
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<load_from_csv>
plt.style.use('ggplot') warnings.simplefilter(action='ignore', category=FutureWarning )
Titanic - Machine Learning from Disaster
5,043,030
train = pd.read_csv(path/'.. /input/aptos2019-blindness-detection/train.csv')[:100] test = pd.read_csv(path/'.. /input/aptos2019-blindness-detection/sample_submission.csv') <data_type_conversions>
dataset = pd.read_csv('.. /input/titanic/train.csv') dataset.head()
Titanic - Machine Learning from Disaster
5,043,030
test2 = test.copy() test2['id_code'] = np.arange(0,test.shape[0],dtype=np.int ).astype('str' )<feature_engineering>
dataset.isnull().sum(axis=0 )
Titanic - Machine Learning from Disaster
5,043,030
tfms = get_transforms(flip_vert=True,max_rotate = 10,max_warp = 0,max_zoom =1.05,max_lighting = 0 )<concatenate>
dataset = dataset.fillna({"Embarked": "S"} )
Titanic - Machine Learning from Disaster
5,043,030
tfms[1].append(tfms[0][1]) tfms[1].append(tfms[0][2]) <set_options>
dataset = pd.get_dummies(dataset, columns=['Sex']) dataset.head()
Titanic - Machine Learning from Disaster
5,043,030
gc.collect() <define_variables>
dataset = pd.get_dummies(dataset, columns=['Embarked']) dataset.head()
Titanic - Machine Learning from Disaster
5,043,030
image_dic = {}<load_pretrained>
feat_names = ['Pclass', 'Sex_male', 'Sex_female', 'Embarked_C', 'Embarked_Q', 'Embarked_S', 'Parch', 'SibSp', 'Fare'] targ_names = ['Dead(0)', 'Survived(1)'] train_class = dataset[['Survived']] train_feature = dataset[feat_names] train_feature.head()
Titanic - Machine Learning from Disaster
5,043,030
def load_image_on_ram(p,names,k): for i in tqdm(names): path= p+i+'.png' image = read_image2(path,k) image_dic[i] = image<define_variables>
clf = DecisionTreeClassifier(random_state=0) scoring = {'acc': 'accuracy', 'prec_macro': 'precision_macro', 'rec_macro': 'recall_macro', 'f1_macro': 'f1_macro'} scores = cross_validate(clf, train_feature, train_class, cv=10, scoring=scoring) print('Accuracy score : %.3f' % scores['test_acc'].mean()) print('Precisoin score : %.3f' % scores['test_prec_macro'].mean()) print('Recall score : %.3f' % scores['test_rec_macro'].mean()) print('F1 score : %.3f' % scores['test_f1_macro'].mean() )
Titanic - Machine Learning from Disaster
5,043,030
path1 = '.. /input/aptos2019-blindness-detection/train_images/' path2 = '.. /input/aptos2019-blindness-detection/test_images/'<load_pretrained>
para_grid = { 'min_samples_split' : range(10,500,20), 'max_depth': range(1,20,2), 'criterion':("gini", "entropy") } clf_tree = DecisionTreeClassifier() clf_cv = GridSearchCV(clf_tree, para_grid, scoring='accuracy', cv=5, n_jobs=-1) clf_cv.fit(train_feature,train_class) best_parameters = clf_cv.best_params_ print(best_parameters )
Titanic - Machine Learning from Disaster
5,043,030
load_image_on_ram(path1,train.id_code.unique() ,1 )<load_pretrained>
clf = clf_cv.best_estimator_ scoring = {'acc': 'accuracy', 'prec_macro': 'precision_macro', 'rec_macro': 'recall_macro', 'f1_macro': 'f1_macro'} scores = cross_validate(clf, train_feature, train_class, cv=10, scoring=scoring) print('Accuracy score : %.3f' % scores['test_acc'].mean()) print('Precisoin score : %.3f' % scores['test_prec_macro'].mean()) print('Recall score : %.3f' % scores['test_rec_macro'].mean()) print('F1 score score : %.3f' % scores['test_f1_macro'].mean() )
Titanic - Machine Learning from Disaster
5,043,030
image_type = lambda x : load_image_on_ram(path2,test.id_code.unique() ,x )<categorify>
clf2 = clf_cv.best_estimator_ clf2.fit(X_train,y_train) predictions = clf2.predict(X_test) print(metrics.classification_report(y_test,predictions, target_names=targ_names, digits=3))
Titanic - Machine Learning from Disaster
5,043,030
def im_b(path=""): img = path.split('/')[-1] image = image_dic[img]/255 return image <categorify>
fig, ax = plt.subplots(figsize=(3,3)) cm = ConfusionMatrix(clf2, classes=[0, 1], cmap='RdPu') cm.score(X_test, y_test) for label in cm.ax.texts: label.set_size(14) cm.poof()
Titanic - Machine Learning from Disaster
5,043,030
class MyImageItemList(ImageList): def open(self,path:PathOrStr)->Image: image = im_load(path) xx = vision.Image(px=pil2tensor(image,np.float32)) return xx<load_pretrained>
modelviz = clf_cv.best_estimator_ visualizer = ROCAUC(modelviz, classes=["Dead", "Survived"]) visualizer.fit(X_train, y_train) visualizer.score(X_test, y_test) visualizer.show()
Titanic - Machine Learning from Disaster
5,043,030
im_load = im_b<train_model>
data = export_graphviz(clf,out_file=None,feature_names=feat_names,class_names=targ_names, filled=True, rounded=True, special_characters=True) graph = graphviz.Source(data) graph
Titanic - Machine Learning from Disaster
5,043,030
def get_data(sz=256): data =(MyImageItemList.from_df(df = train,path = '',folder = path1) .split_by_rand_pct (.15) .label_from_df(cols = 'diagnosis' ) .add_test(MyImageItemList.from_df(df = test,path='',folder =path2)) .transform(tfms,size=sz,resize_method = ResizeMethod.SQUISH, padding_mode = 'zeros') .databunch(bs = 8,num_workers = 4)) data.normalize(imagenet_stats) return data data = get_data(400 )<define_variables>
test = pd.read_csv('.. /input/titanic/test.csv') clf.fit(train_feature, train_class) meanFare = dataset['Fare'].mean() test = test.fillna({"Fare": meanFare}) test = pd.get_dummies(test, columns=['Sex']) test = pd.get_dummies(test, columns=['Embarked']) ids = test['PassengerId'] test_feature = test[feat_names] predictions = clf.predict(test_feature) output = pd.DataFrame({ 'PassengerId' : ids, 'Survived': predictions }) output.head()
Titanic - Machine Learning from Disaster
5,043,030
<load_pretrained><EOS>
output.to_csv('submission.csv', index=False )
Titanic - Machine Learning from Disaster
5,890,064
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<install_modules>
def ignore_warn(*args, **kwargs): pass warnings.warn = ignore_warn
Titanic - Machine Learning from Disaster
5,890,064
!cp '.. /input/ef5-ahe/ef5_ah_400_1.pth' models/ !cp '.. /input/ef5-400/ef5_400_1.pth' models/ !cp '.. /input/ef5-b15/ef5_b15_400_1.pth' models/ !cp '.. /input/ef5-b15/ef5_b15_320_1.pth' models/ !cp '.. /input/ef5-b-w-400/ef5_b_400_2.pth' models/ !cp '.. /input/ef4-b-ls-400/ef5_b_400_1.pth' models/ <train_model>
test = pd.read_csv('.. /input/titanic/test.csv') test['Boy'] =(test.Name.str.split().str[1] == 'Master.' ).astype('int') submission = pd.DataFrame({'PassengerId': test['PassengerId'], 'Survived': pd.Series(dtype='int32')}) test['Survived'] = [1 if(x == 'female')else 0 for x in test['Sex']] test.loc[(test.Boy == 1), 'Survived'] = 1 test.loc[(( test.Pclass == 3)&(test.Embarked == 'S')) , 'Survived'] = 0 submission.Survived = test.Survived submission.to_csv('submission_S_Boy_Sex.csv', index=False )
Titanic - Machine Learning from Disaster
5,890,064
<compute_test_metric><EOS>
def highlight(value): if value >= 0.5: style = 'background-color: palegreen' else: style = 'background-color: pink' return style train = pd.read_csv('.. /input/titanic/train.csv') pd.pivot_table(train, values='Survived', index=['Sex'] ).style.applymap(highlight )
Titanic - Machine Learning from Disaster
11,166,309
<set_options><EOS>
import pandas as pd import numpy as np
Titanic - Machine Learning from Disaster
11,166,309
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<compute_test_metric>
import pandas as pd import numpy as np
Titanic - Machine Learning from Disaster
11,166,309
class OptimizedRounder(object): def __init__(self): self.coef_ = 0 def _kappa_loss(self, coef, X, y): X_p = np.copy(X) for i, pred in enumerate(X_p): if pred < coef[0]: X_p[i] = 0 elif pred >= coef[0] and pred < coef[1]: X_p[i] = 1 elif pred >= coef[1] and pred < coef[2]: X_p[i] = 2 elif pred >= coef[2] and pred < coef[3]: X_p[i] = 3 else: X_p[i] = 4 ll = metrics.cohen_kappa_score(y, X_p, weights='quadratic') return -ll def fit(self, X, y): loss_partial = partial(self._kappa_loss, X=X, y=y) initial_coef = [0.5, 1.5, 2.5, 3.5] self.coef_ = sp.optimize.minimize(loss_partial, initial_coef, method='nelder-mead') print(-loss_partial(self.coef_['x'])) def predict(self, X, coef): X_p = np.copy(X) for i, pred in enumerate(X_p): if pred < coef[0]: X_p[i] = 0 elif pred >= coef[0] and pred < coef[1]: X_p[i] = 1 elif pred >= coef[1] and pred < coef[2]: X_p[i] = 2 elif pred >= coef[2] and pred < coef[3]: X_p[i] = 3 else: X_p[i] = 4 return X_p def coefficients(self): return self.coef_['x'] def run_subm(learn=learn, coefficients=[0.5, 1.5, 2.5, 3.5]): opt = OptimizedRounder() preds,y = learn.TTA(scale=1,ds_type=DatasetType.Test) tst_pred = opt.predict(preds, coefficients) test_df.diagnosis = tst_pred.astype(int) test_df.to_csv('submission.csv',index=False) return test_df <prepare_output>
test = pd.read_csv('.. /input/titanic/test.csv') train = pd.read_csv('.. /input/titanic/train.csv' )
Titanic - Machine Learning from Disaster
11,166,309
learn.data = get_data(400 )<load_pretrained>
testPassengerIds = test['PassengerId'] testPassengerIds.head()
Titanic - Machine Learning from Disaster
11,166,309
learn.load('ef5_b_400_1');<feature_engineering>
train.drop(['PassengerId', 'Name', 'Ticket'], inplace = True, axis = 1) test.drop(['PassengerId', 'Name', 'Ticket'], inplace = True, axis = 1 )
Titanic - Machine Learning from Disaster
11,166,309
%%time preds1 = learn.TTA(beta =.2, scale=1,ds_type=DatasetType.Test )<define_search_space>
train.isnull().any()
Titanic - Machine Learning from Disaster
11,166,309
v1 = torch.tensor([1.0,1.0,.7,.7,1.0] )<load_pretrained>
test.isnull().any()
Titanic - Machine Learning from Disaster
11,166,309
learn.load('ef5_b_400_2');<set_options>
train['Cabin'].fillna(0, inplace = True) def getCabin(value): val_dict = { 'A' : 6, 'B' : 5, 'C' : 4, 'D' : 3, 'E' : 2, 'F' : 1, 'T' : 1 } return val_dict.get(str(value)[0], 0) train['Cabin'] = train["Cabin"].apply(getCabin) test['Cabin'] = test['Cabin'].apply(getCabin )
Titanic - Machine Learning from Disaster
11,166,309
gc.collect()<feature_engineering>
train['Embarked'].fillna(train['Embarked'].mode().item() , inplace = True) test['Embarked'].fillna(train['Embarked'].mode().item() , inplace = True )
Titanic - Machine Learning from Disaster
11,166,309
%%time preds2 = learn.TTA(beta =.2, scale=1,ds_type=DatasetType.Test )<define_search_space>
train = pd.get_dummies(train, drop_first=True) test = pd.get_dummies(test, drop_first=True )
Titanic - Machine Learning from Disaster
11,166,309
v2 = torch.tensor([1.0,.7,1.0,.7,1.0] )<prepare_output>
train.isnull().any() | test.isnull().any()
Titanic - Machine Learning from Disaster
11,166,309
learn.data = get_data(400 )<set_options>
def correctedLog(value): return np.log(1 + value) train['Status'] = train['Pclass'] + train['Fare'].apply(correctedLog) test['Status'] = test['Pclass'] + test['Fare'].apply(correctedLog )
Titanic - Machine Learning from Disaster
11,166,309
gc.collect()<set_options>
train['RootAgeTimesClass'] = train['Age'].apply(np.sqrt)* train['Pclass'] test['RootAgeTimesClass'] = test['Age'].apply(np.sqrt)* test['Pclass']
Titanic - Machine Learning from Disaster
11,166,309
learn.load('ef5_b15_400_1'); gc.collect()<feature_engineering>
train['FamilySize'] = train['SibSp'] + train['Parch'] + 1 test['FamilySize'] = train['SibSp'] + train['Parch'] + 1
Titanic - Machine Learning from Disaster
11,166,309
%%time preds3 = learn.TTA(beta =.2, scale=1,ds_type=DatasetType.Test )<define_search_space>
train['Young'] = train['Age'] <= train['Age'].mean() test['Young'] = test["Age"] <= train['Age'].mean()
Titanic - Machine Learning from Disaster
11,166,309
v3 = torch.tensor([1.0,1.0,.5,1.0,1.0] )<prepare_x_and_y>
train['YoungMale'] = train['Young'] & train['Sex_male'] test['YoungMale'] = test["Young"] & test['Sex_male']
Titanic - Machine Learning from Disaster
11,166,309
learn.data = get_data(320) <load_pretrained>
X = train.iloc[:, 1: ] y = train.iloc[:, 0] y.shape, X.shape
Titanic - Machine Learning from Disaster
11,166,309
learn.load('ef5_b15_320_1');<set_options>
train['Fare'], maxlog = boxcox(train['Fare'] + 1) test['Fare'] = boxcox(test['Fare'] + 1, lmbda = maxlog )
Titanic - Machine Learning from Disaster
11,166,309
gc.collect()<feature_engineering>
from sklearn.model_selection import train_test_split from sklearn.metrics import f1_score
Titanic - Machine Learning from Disaster
11,166,309
%%time preds4 = learn.TTA(beta =.2, scale=1,ds_type=DatasetType.Test )<define_search_space>
X_train, X_val, y_train, y_val = train_test_split(X, y, random_state =55, test_size = 0.2, shuffle = True) y_train.shape, y_val.shape
Titanic - Machine Learning from Disaster
11,166,309
v4 = torch.tensor([1.0,1.0,.5,1.0,1.0] )<find_best_params>
np.random.seed(0 )
Titanic - Machine Learning from Disaster
11,166,309
image_type(3) learn.data = get_data(400 )<set_options>
paramDict = { 'n_estimators' : [5, 10, 25, 50, 75, 100, 200, 500], 'max_depth' : [4, 8, 10, 15, 20, 50], } model = RandomForestClassifier(n_jobs = 8) clf = GridSearchCV(estimator=model, param_grid=paramDict, n_jobs=10 )
Titanic - Machine Learning from Disaster
11,166,309
learn.load('ef5_ah_400_1') gc.collect()<feature_engineering>
clf.fit(X_train, y_train )
Titanic - Machine Learning from Disaster
11,166,309
%%time preds5 = learn.TTA(beta =.2, scale=1,ds_type=DatasetType.Test )<define_search_space>
clf.best_params_, clf.best_score_
Titanic - Machine Learning from Disaster
11,166,309
v5 = torch.tensor([1.0,.7,.7,1.0,.7] )<set_options>
f1_score(clf.predict(X_val), y_val )
Titanic - Machine Learning from Disaster
11,166,309
gc.collect()<set_options>
finalModel = RandomForestClassifier(**clf.best_params_) finalModel.fit(X, y) y_preds = finalModel.predict(test) file_name = "Submission_16_08_6.csv" y_pred_series = pd.Series(y_preds.flatten() , name = 'Survived') file = pd.concat([testPassengerIds, y_pred_series], axis = 1) file.to_csv(file_name, index = False);
Titanic - Machine Learning from Disaster
7,959,083
learn.load('ef5_400_1') gc.collect()<feature_engineering>
import seaborn as sns import matplotlib.pyplot as plt
Titanic - Machine Learning from Disaster
7,959,083
%%time preds6 = learn.TTA(beta =.2, scale=1,ds_type=DatasetType.Test )<prepare_output>
gender_submission = pd.read_csv(".. /input/titanic/gender_submission.csv") test_raw = pd.read_csv(".. /input/titanic/test.csv", index_col='PassengerId') train_raw = pd.read_csv(".. /input/titanic/train.csv",index_col='PassengerId' )
Titanic - Machine Learning from Disaster
7,959,083
test_preds = torch.argmax(preds,dim=1 )<save_to_csv>
X=train_raw.drop('Survived', axis=1) y=train_raw['Survived'] X_cb=pd.concat([X, test_raw], axis=0) test=test_raw train_idx=list(train_raw.index) test_idx=list(test_raw.index )
Titanic - Machine Learning from Disaster
7,959,083
test['diagnosis'] = np.array(test_preds,dtype = np.int) test.to_csv('submission.csv',index = False )<count_values>
X_cb.isnull().sum().sort_values(ascending=False )
Titanic - Machine Learning from Disaster
7,959,083
test['diagnosis'].value_counts()<set_options>
class_fare=test.groupby(['Pclass','Embarked'])['Fare'].mean() test['Fare'].fillna(class_fare[3]['S'], inplace=True )
Titanic - Machine Learning from Disaster
7,959,083
%reload_ext autoreload %autoreload 2 <define_variables>
X['Embarked'].value_counts()
Titanic - Machine Learning from Disaster
7,959,083
SEED = 1234 SIZE = 224 PATH = ".. /input/aptos2019-blindness-detection"<set_options>
X['Embarked'].fillna(X['Embarked'].mode() [0], inplace=True )
Titanic - Machine Learning from Disaster
7,959,083
def seed_everything(seed): random.seed(seed) os.environ["PYTHONHASHSEED"] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True seed_everything(SEED )<load_from_csv>
X[['Last','First']]=X['Name'].str.split(',', expand=True) X[['Title', 'FName']]=X['First'].str.split('.', n=1, expand=True) X['Title']=X['Title'].str.strip() test[['Last','First']]=test['Name'].str.split(',', expand=True) test[['Title', 'FName']]=test['First'].str.split('.', n=1, expand=True) test['Title']=test['Title'].str.strip()
Titanic - Machine Learning from Disaster
7,959,083
train_df = pd.read_csv(PATH+"/train.csv") sub = pd.read_csv(PATH+"/sample_submission.csv" )<define_variables>
X['Adult']='' for i in list(X.index): if X['Age'].loc[i]>8: X['Adult'].loc[i]=1 elif X['Age'].loc[i]<=8: X['Adult'].loc[i]=0 else: if X['Title'].loc[i]=='Master': X['Adult'].loc[i]=0 else: X['Adult'].loc[i]=1 test['Adult']='' for i in list(test.index): if test['Age'].loc[i]>8: test['Adult'].loc[i]=1 elif test['Age'].loc[i]<=8: test['Adult'].loc[i]=0 else: if test['Title'].loc[i]=='Master': test['Adult'].loc[i]=0 else: test['Adult'].loc[i]=1
Titanic - Machine Learning from Disaster
7,959,083
train = ImageList.from_df(train_df, path=PATH, cols='id_code', folder="train_images", suffix='.png') test = ImageList.from_df(sub, path=PATH, cols='id_code', folder="test_images", suffix='.png' )<compute_test_metric>
X['Family_Size']=X['SibSp']+X['Parch']+1 X["With_Family"]=X.apply(lambda row: 1 if row['Family_Size']>1 else 0, axis=1) test['Family_Size']=test['SibSp']+test['Parch']+1 test["With_Family"]=test.apply(lambda row: 1 if row['Family_Size']>1 else 0, axis=1) count=X['Ticket'].value_counts().to_frame().reset_index() count.columns=['Ticket','Person_on_Ticket'] X=pd.merge(X,count,how='left',on='Ticket' ).set_index(X.index) count_t=test['Ticket'].value_counts().to_frame().reset_index() count_t.columns=['Ticket','Person_on_Ticket'] test=pd.merge(test,count_t,how='left',on='Ticket' ).set_index(test.index )
Titanic - Machine Learning from Disaster
7,959,083
def quadratic_kappa(y_hat, y): return torch.tensor(cohen_kappa_score(torch.round(y_hat), y, weights='quadratic'),device='cuda:0' )<import_modules>
X.drop(['Name','Age','Ticket','First','FName','Cabin'], axis=1, inplace=True) test.drop(['Name','Age','Ticket','First','FName','Cabin'], axis=1, inplace=True )
Titanic - Machine Learning from Disaster
7,959,083
from sklearn.model_selection import StratifiedKFold from tqdm import tqdm<load_pretrained>
X=pd.get_dummies(X) test=pd.get_dummies(test) X.head()
Titanic - Machine Learning from Disaster
7,959,083
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state = SEED) model_name = 'densenet201' bs = 64 predictions = torch.from_numpy(np.zeros(( len(sub)))) for fold,(train_index, val_index)in tqdm(enumerate(skf.split(train_df["id_code"], train_df["diagnosis"]))): print(fold) filename = model_name + "fold_" + str(fold)+".pkl" print("Fold:", filename) print("TRAIN:", train_index, "VALIDATE:", val_index) data_fold =(ImageList.from_df(train_df, PATH, folder='train_images' ,cols="id_code",suffix='.png') .split_by_idxs(train_index, val_index) .label_from_df(cols='diagnosis', label_cls=FloatList) .transform(get_transforms() , size=SIZE) .databunch(bs=bs ).normalize(imagenet_stats) ) learn = cnn_learner(data_fold, models.densenet201, metrics=[quadratic_kappa], pretrained=True) learn.fit_one_cycle(5, 1e-2) learn.data.add_test(ImageList.from_df(sub ,PATH ,folder='test_images',suffix='.png')) test_predsx, _ = learn.get_preds(ds_type=DatasetType.Test) if(fold == 0): test_preds = test_predsx else: test_preds = test_predsx + test_preds if(fold == 2): valid_preds, valid_y = learn.get_preds(ds_type=DatasetType.Valid )<compute_test_metric>
only_in_train_col=list(set(X.columns)-set(test.columns)) only_in_test_col=list(set(test.columns)-set(X.columns)) for col in only_in_train_col: test[col]=0 for col in only_in_test_col: test.drop(col, axis=1, inplace=True) test=test[X.columns]
Titanic - Machine Learning from Disaster
7,959,083
class OptimizedRounder(object): def __init__(self): self.coef_ = 0 def _kappa_loss(self, coef, X, y): X_p = np.copy(X) for i, pred in enumerate(X_p): if pred < coef[0]: X_p[i] = 0 elif pred >= coef[0] and pred < coef[1]: X_p[i] = 1 elif pred >= coef[1] and pred < coef[2]: X_p[i] = 2 elif pred >= coef[2] and pred < coef[3]: X_p[i] = 3 else: X_p[i] = 4 ll = cohen_kappa_score(y, X_p, weights='quadratic') return -ll def fit(self, X, y): loss_partial = partial(self._kappa_loss, X=X, y=y) initial_coef = [0.5, 1.5, 2.5, 3.5] self.coef_ = sp.optimize.minimize(loss_partial, initial_coef, method='nelder-mead') def predict(self, X, coef): X_p = np.copy(X) for i, pred in enumerate(X_p): if pred < coef[0]: X_p[i] = 0 elif pred >= coef[0] and pred < coef[1]: X_p[i] = 1 elif pred >= coef[1] and pred < coef[2]: X_p[i] = 2 elif pred >= coef[2] and pred < coef[3]: X_p[i] = 3 else: X_p[i] = 4 return X_p def coefficients(self): return self.coef_['x']<compute_train_metric>
kf=KFold(n_splits=5, random_state=0, shuffle=True) X_train,X_valid, y_train,y_valid=train_test_split(X,y, test_size=0.3, random_state=0) def cv_scores(model, X): cv_scores=cross_val_score(model, X, y, cv=5) return cv_scores warnings.filterwarnings(action="ignore" )
Titanic - Machine Learning from Disaster
7,959,083
test_preds = test_preds/5 optR = OptimizedRounder() optR.fit(valid_preds, valid_y) coefficients = optR.coefficients() valid_predictions = optR.predict(valid_preds, coefficients)[:,0].astype(int) test_predictions = optR.predict(test_preds, coefficients)[:,0].astype(int) valid_score = cohen_kappa_score(valid_y.numpy().astype(int), valid_predictions, weights="quadratic" )<compute_test_metric>
dt=DecisionTreeClassifier(random_state=0) rf=RandomForestClassifier(n_estimators=100, max_features=None, max_depth=None) ada=AdaBoostClassifier(n_estimators=100,learning_rate=0.4) gdb=GradientBoostingClassifier(n_estimators=100, learning_rate=0.4) xgb=XGBClassifier(learning_rate=0.5 )
Titanic - Machine Learning from Disaster
7,959,083
valid_score = cohen_kappa_score(valid_y.numpy().astype(int), valid_predictions, weights="quadratic" )<compute_test_metric>
cv_score_dt=cv_scores(dt,X ).mean() cv_score_rf=cv_scores(rf,X ).mean() cv_score_ada=cv_scores(ada,X ).mean() cv_score_gdb=cv_scores(gdb,X ).mean() cv_score_xgb=cv_scores(xgb,X ).mean()
Titanic - Machine Learning from Disaster
7,959,083
print("coefficients:", coefficients) print("validation score:", valid_score )<save_to_csv>
print(' dt cv score: ',cv_scores(dt,X ).mean() , 'std: ',cv_scores(dt,X ).std() ,' ', 'rf cv score: ',cv_scores(rf,X ).mean() , 'std: ',cv_scores(rf,X ).std() ,' ', 'ada cv score: ',cv_scores(ada,X ).mean() , 'std: ',cv_scores(ada,X ).std() ,' ', 'gdb cv score: ',cv_scores(gdb,X ).mean() , 'std: ',cv_scores(gdb,X ).std() ,' ', 'xgb cv score: ',cv_scores(xgb,X ).mean() , 'std: ',cv_scores(xgb,X ).std() , )
Titanic - Machine Learning from Disaster
7,959,083
sub.diagnosis = test_predictions sub.to_csv("submission.csv", index=None) sub.head()<set_options>
dt.fit(X_train,y_train) rf.fit(X_train,y_train) ada.fit(X_train,y_train) gdb.fit(X_train,y_train) xgb.fit(X_train,y_train )
Titanic - Machine Learning from Disaster
7,959,083
%matplotlib inline np.random.seed(2019) tf.set_random_seed(2019 )<load_from_csv>
y_dt=dt.predict(X_valid) y_rf=rf.predict(X_valid) y_ada=ada.predict(X_valid) y_gdb=gdb.predict(X_valid) y_xgb=xgb.predict(X_valid )
Titanic - Machine Learning from Disaster
7,959,083
train_df = pd.read_csv('.. /input/aptos2019-blindness-detection/train.csv') test_df = pd.read_csv('.. /input/aptos2019-blindness-detection/test.csv') print(train_df.shape) print(test_df.shape) train_df.head()<prepare_x_and_y>
print(' dt accuracy: ',accuracy_score(y_valid,y_dt), ' ', 'rf accuracy: ',accuracy_score(y_valid,y_rf), ' ', 'ada accuracy: ',accuracy_score(y_valid,y_ada), ' ', 'gdb accuracy: ',accuracy_score(y_valid,y_gdb), ' ', 'xgb accuracy: ',accuracy_score(y_valid,y_xgb), )
Titanic - Machine Learning from Disaster
7,959,083
N = train_df.shape[0] x_train = np.empty(( N, 224, 224, 3), dtype=np.uint8) for i, image_id in enumerate(tqdm(train_df['id_code'])) : x_train[i, :, :, :] = preprocess_image( f'.. /input/aptos2019-blindness-detection/train_images/{image_id}.png' )<prepare_x_and_y>
test_pred=gdb.predict(test )
Titanic - Machine Learning from Disaster
7,959,083
<categorify><EOS>
output=pd.DataFrame({'PassengerId': test.index, 'Survived': test_pred}) output.to_csv('my_submission.csv', index=False )
Titanic - Machine Learning from Disaster
2,064,952
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<categorify>
%%javascript $.getScript('https://kmahelona.github.io/ipython_notebook_goodies/ipython_notebook_toc.js' )
Titanic - Machine Learning from Disaster
2,064,952
y_train_multi = np.empty(y_train.shape, dtype=y_train.dtype) y_train_multi[:, 4] = y_train[:, 4] for i in range(3, -1, -1): y_train_multi[:, i] = np.logical_or(y_train[:, i], y_train_multi[:, i+1]) print("Original y_train:", y_train.sum(axis=0)) print("Multilabel version:", y_train_multi.sum(axis=0))<split>
get_ipython().run_line_magic('matplotlib', 'inline') sns.set(style='white', context='notebook', palette='deep') warnings.filterwarnings('ignore') train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv") IDtest = test["PassengerId"] train.info() test.info()
Titanic - Machine Learning from Disaster
2,064,952
x_train, x_val, y_train, y_val = train_test_split( x_train, y_train_multi, test_size=0.50, random_state=2019 )<train_model>
train_na =(train.isnull().sum() / len(train)) * 100 train_na = train_na.drop(train_na[train_na == 0].index ).sort_values(ascending=False)[:30] miss_train = pd.DataFrame({'Train Missing Ratio' :train_na}) miss_train.head()
Titanic - Machine Learning from Disaster
2,064,952
BATCH_SIZE = 13 def create_datagen() : return ImageDataGenerator( zoom_range=0.15, fill_mode='constant', cval=0., horizontal_flip=True, vertical_flip=True, ) data_generator = create_datagen().flow(x_train, y_train, batch_size=BATCH_SIZE, seed=2019 )<choose_model_class>
test_na =(test.isnull().sum() / len(test)) * 100 test_na = test_na.drop(test_na[test_na == 0].index ).sort_values(ascending=False)[:30] miss_test = pd.DataFrame({'Test Missing Ratio' :test_na}) miss_test.head()
Titanic - Machine Learning from Disaster
2,064,952
densenet = DenseNet121( weights='.. /input/densenet-keras/DenseNet-BC-121-32-no-top.h5', include_top=False, input_shape=(224,224,3) )<choose_model_class>
train = train.fillna(np.nan) test = test.fillna(np.nan )
Titanic - Machine Learning from Disaster
2,064,952
def build_model() : model = Sequential() model.add(densenet) model.add(layers.GlobalAveragePooling2D()) model.add(layers.Dropout(0.80)) model.add(layers.Dense(5, activation='sigmoid')) model.compile( loss='binary_crossentropy', optimizer=Adam(lr=0.00010509613402110064), metrics=['accuracy'] ) return model<compute_train_metric>
train['source']='train' test['source']='test' combdata = pd.concat([train, test],ignore_index=True) print(train.shape, test.shape, combdata.shape )
Titanic - Machine Learning from Disaster
2,064,952
class Metrics(Callback): def on_train_begin(self, logs={}): self.val_kappas = [] def on_epoch_end(self, epoch, logs={}): X_val, y_val = self.validation_data[:2] y_val = y_val.sum(axis=1)- 1 y_pred = self.model.predict(X_val)> 0.5 y_pred = y_pred.astype(int ).sum(axis=1)- 1 _val_kappa = cohen_kappa_score( y_val, y_pred, weights='quadratic' ) self.val_kappas.append(_val_kappa) print(f"val_kappa: {_val_kappa:.4f}") if _val_kappa == max(self.val_kappas): print("Validation Kappa has improved.Saving model.") self.model.save('model.h5') return<train_model>
combdata.drop(labels = ["PassengerId"], axis = 1, inplace = True )
Titanic - Machine Learning from Disaster