kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
7,258,897
if os.path.exists(PRED_PATH): predictions = [] for index, row in tqdm(df.iterrows() , total = df.shape[0]): image_id = row['image_id'] img_path = PRED_PATH + image_id + '.tiff' img = skimage.io.MultiImage(img_path)[1] patches = tile(img) patches1 = patches.copy() patches2 = patches.copy() k = 0 while k < 42: patches1[k, ] = transforms(image=patches1[k, ])['image'] patches2[k, ] = transforms(image=patches2[k, ])['image'] k += 1 image = np.stack([patches, patches1, patches2]) image = image / 255.0 pred = model.predict(image) isup = np.round(np.mean(pred)) if isup < 0: isup = 0 if isup > 5: isup = 5 predictions.append(int(isup)) del patches, img gc.collect() <save_to_csv>
grid_hard = VotingClassifier(estimators = [('Random Forest', ran), ('Logistic Regression', log), ('XGBoost', xgb), ('Gradient Boosting', gbc), ('Extra Trees', ext), ('AdaBoost', ada), ('Gaussian Process', gpc), ('SVC', svc), ('K Nearest Neighbour', knn), ('Bagging Classifier', bag)], voting = 'hard') grid_hard_cv = model_selection.cross_validate(grid_hard, X_train, y_train, cv = 10) grid_hard.fit(X_train, y_train) print("Hard voting on test set score mean: {:.2f}".format(grid_hard_cv['test_score'].mean() *100))
Titanic - Machine Learning from Disaster
7,258,897
if os.path.exists(PRED_PATH): sub['isup_grade'] = predictions sub.to_csv("submission.csv", index=False) else: sub.to_csv("submission.csv", index=False )<define_variables>
grid_soft = VotingClassifier(estimators = [('Random Forest', ran), ('Logistic Regression', log), ('XGBoost', xgb), ('Gradient Boosting', gbc), ('Extra Trees', ext), ('AdaBoost', ada), ('Gaussian Process', gpc), ('SVC', svc), ('K Nearest Neighbour', knn), ('Bagging Classifier', bag)], voting = 'soft') grid_soft_cv = model_selection.cross_validate(grid_soft, X_train, y_train, cv = 10) grid_soft.fit(X_train, y_train) print("Soft voting on test set score mean: {:.2f}".format(grid_soft_cv['test_score'].mean() *100))
Titanic - Machine Learning from Disaster
7,258,897
<define_variables><EOS>
predictions = grid_soft.predict(X_test) submission = pd.concat([pd.DataFrame(passId), pd.DataFrame(predictions)], axis = 'columns') submission.columns = ["PassengerId", "Survived"] submission.to_csv('titanic_submission.csv', header = True, index = False )
Titanic - Machine Learning from Disaster
5,412,769
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<import_modules>
import numpy as np import pandas as pd from sklearn import ensemble from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from sklearn.impute import SimpleImputer from sklearn.model_selection import GridSearchCV from sklearn.ensemble import RandomForestClassifier from time import time
Titanic - Machine Learning from Disaster
5,412,769
import skimage.io import numpy as np import pandas as pd import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader, Dataset from efficientnet_pytorch import model as enet import matplotlib.pyplot as plt from tqdm import tqdm_notebook as tqdm <load_from_csv>
train = pd.read_csv('.. /input/titanic/train.csv') X_test = pd.read_csv('.. /input/titanic/test.csv') id_for_subm = X_test['PassengerId'].copy() train.head()
Titanic - Machine Learning from Disaster
5,412,769
data_dir = '.. /input/prostate-cancer-grade-assessment' df_train = pd.read_csv(os.path.join(data_dir, 'train.csv')) df_test = pd.read_csv(os.path.join(data_dir, 'test.csv')) df_sub = pd.read_csv(os.path.join(data_dir, 'sample_submission.csv')) image_folder = os.path.join(data_dir, 'test_images') is_test = os.path.exists(image_folder) image_folder = image_folder if is_test else os.path.join(data_dir, 'train_images') df = df_test if is_test else df_train.loc[:100] n_tiles = 36 tile_size = 256 image_size = 256 batch_size = 8 num_workers = 4 device = torch.device('cuda') print(image_folder )<define_search_model>
X = train.drop('Survived', axis=1) y = train['Survived']
Titanic - Machine Learning from Disaster
5,412,769
class enetv2(nn.Module): def __init__(self, backbone, out_dim): super(enetv2, self ).__init__() self.enet = enet.EfficientNet.from_name(backbone) self.myfc = nn.Linear(self.enet._fc.in_features, out_dim) self.enet._fc = nn.Identity() def extract(self, x): return self.enet(x) def forward(self, x): x = self.extract(x) x = self.myfc(x) return x def load_models(model_files): models = [] for model_dict in model_files: model_f = model_dict['file_name'] backbone = model_dict['model'] model = enetv2(backbone, out_dim=5) model.load_state_dict(torch.load(model_f, map_location=lambda storage, loc: storage), strict=True) model.eval() model.to(device) models.append(model) print(f'{model_f} loaded!') return models <load_pretrained>
def preproc(train, test=[]): num_col = [] cat_col = [] cat_to_encode = [] new_train = train.copy() new_test = test.copy() for col in train.columns: if train[col].dtype == 'object': cat_col.append(col) else: num_col.append(col) num_imp = SimpleImputer(strategy='mean') new_train[num_col] = num_imp.fit_transform(new_train[num_col]) new_test[num_col] = num_imp.transform(new_test[num_col]) cat_imp = SimpleImputer(strategy='most_frequent') new_train[cat_col] = cat_imp.fit_transform(new_train[cat_col]) new_test[cat_col] = cat_imp.transform(new_test[cat_col]) for col in cat_col: a = new_train[col].unique() b = new_test[col].unique() if all(x in b for x in a): cat_to_encode.append(col) else: print('Drop', col) print('Numerical columns:', num_col) print('Categorical columns:', cat_to_encode) for col in cat_to_encode: cat_encoder = LabelEncoder() new_train[col] = cat_encoder.fit_transform(new_train[col]) new_test[col] = cat_encoder.transform(new_test[col]) col_to_keep = num_col+cat_to_encode return new_train[col_to_keep], new_test[col_to_keep]
Titanic - Machine Learning from Disaster
5,412,769
model_files = [ {'file_name':'.. /input/efficientnetb2/efficientnetb2100epoch_best_fold1.pth','model':'efficientnet-b2','n_tiles':25}, {'file_name':'.. /input/panda-public-models/cls_effnet_b0_Rand36r36tiles256_big_bce_lr0.3_augx2_30epo_model_fold0.pth','model':'efficientnet-b0','n_tiles':36}, {'file_name':'.. /input/efficientnetb1/how_to_train_effnet_b0_to_get_LB_0.86_best_fold0.pth','model':'efficientnet-b1','n_tiles':25} ] models = load_models(model_files )<categorify>
X_train, X_test = preproc(X, X_test )
Titanic - Machine Learning from Disaster
5,412,769
def get_tiles(img, mode=0): result = [] h, w, c = img.shape pad_h =(tile_size - h % tile_size)% tile_size +(( tile_size * mode)// 2) pad_w =(tile_size - w % tile_size)% tile_size +(( tile_size * mode)// 2) img2 = np.pad(img,[[pad_h // 2, pad_h - pad_h // 2], [pad_w // 2,pad_w - pad_w//2], [0,0]], constant_values=255) img3 = img2.reshape( img2.shape[0] // tile_size, tile_size, img2.shape[1] // tile_size, tile_size, 3 ) img3 = img3.transpose(0,2,1,3,4 ).reshape(-1, tile_size, tile_size,3) n_tiles_with_info =(img3.reshape(img3.shape[0],-1 ).sum(1)< tile_size ** 2 * 3 * 255 ).sum() if len(img)< n_tiles: img3 = np.pad(img3,[[0,N-len(img3)],[0,0],[0,0],[0,0]], constant_values=255) idxs = np.argsort(img3.reshape(img3.shape[0],-1 ).sum(-1)) [:n_tiles] img3 = img3[idxs] for i in range(len(img3)) : result.append({'img':img3[i], 'idx':i}) return result, n_tiles_with_info >= n_tiles class PANDADataset(Dataset): def __init__(self, df, image_size, n_tiles=n_tiles, tile_mode=0, rand=False, sub_imgs=False ): self.df = df.reset_index(drop=True) self.image_size = image_size self.n_tiles = n_tiles self.tile_mode = tile_mode self.rand = rand self.sub_imgs = sub_imgs def __len__(self): return self.df.shape[0] def __getitem__(self, index): row = self.df.iloc[index] img_id = row.image_id tiff_file = os.path.join(image_folder, f'{img_id}.tiff') image = skimage.io.MultiImage(tiff_file)[1] tiles, OK = get_tiles(image, self.tile_mode) if self.rand: idxes = np.random.choice(list(range(self.n_tiles)) , self.n_tiles, replace=False) else: idxes = list(range(self.n_tiles)) idxes = np.asarray(idxes)+ self.n_tiles if self.sub_imgs else idxes n_row_tiles = int(np.sqrt(self.n_tiles)) images = np.zeros(( image_size * n_row_tiles, image_size * n_row_tiles, 3)) for h in range(n_row_tiles): for w in range(n_row_tiles): i = h * n_row_tiles + w if len(tiles)> idxes[i]: this_img = tiles[idxes[i]]['img'] else: this_img = np.ones(( self.image_size, self.image_size, 3)).astype(np.uint8)* 255 this_img = 255 - this_img h1 = h * image_size w1 = w * image_size images[h1:h1+image_size, w1:w1+image_size] = this_img images = images.astype(np.float32) images /= 255 images = images.transpose(2, 0, 1) return torch.tensor(images) <create_dataframe>
clf = RandomForestClassifier() param_grid = {"max_depth": [7, 5, 3], "max_features": [3, 5, 7], "min_samples_split": [2, 3, 5], "bootstrap": [True, False], "criterion": ["gini", "entropy"], "n_estimators": [150, 200, 250, 300]} grid_search = GridSearchCV(clf, param_grid=param_grid, cv=5, iid=False) start = time() grid_search.fit(X_train, y) print("GridSearchCV took %.2f seconds for %d candidate parameter settings." %(time() - start, len(grid_search.cv_results_['params'])) )
Titanic - Machine Learning from Disaster
5,412,769
LOGITS_FINAL = [] for i, model in enumerate(models): LOGITS = [] LOGITS2 = [] n_tiles = model_files[i]['n_tiles'] dataset = PANDADataset(df, image_size, n_tiles, 0) loader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, shuffle=False) dataset2 = PANDADataset(df, image_size, n_tiles, 2) loader2 = DataLoader(dataset2, batch_size=batch_size, num_workers=num_workers, shuffle=False) with torch.no_grad() : for data in tqdm(loader): data = data.to(device) logits = model(data) LOGITS.append(logits) for data in tqdm(loader2): data = data.to(device) logits = model(data) LOGITS2.append(logits) LOGITS_TMP =(torch.cat(LOGITS ).sigmoid().cpu() + torch.cat(LOGITS2 ).sigmoid().cpu())/ 2 LOGITS_FINAL.append(LOGITS_TMP) model_num = len(LOGITS_FINAL) PREDS = 0 for ans in LOGITS_FINAL: PREDS += ans/model_num PREDS = PREDS.sum(1 ).round().numpy() df['isup_grade'] = PREDS.astype(int) df[['image_id', 'isup_grade']].to_csv('submission.csv', index=False) print(df.head()) print() print(df.isup_grade.value_counts() )<define_variables>
def report(results, n_top=3): for i in range(1, n_top + 1): candidates = np.flatnonzero(results['rank_test_score'] == i) for candidate in candidates: print("Model with rank: {0}".format(i)) print("Mean validation score: {0:.3f}(std: {1:.3f})".format( results['mean_test_score'][candidate], results['std_test_score'][candidate])) print("Parameters: {0}".format(results['params'][candidate])) print("" )
Titanic - Machine Learning from Disaster
5,412,769
DEBUG = False<define_variables>
best = np.argmin(grid_search.cv_results_['rank_test_score']) par = grid_search.cv_results_['params'][best]
Titanic - Machine Learning from Disaster
5,412,769
sys.path = [ '.. /input/efficientnet-pytorch/EfficientNet-PyTorch/EfficientNet-PyTorch-master', ] + sys.path<import_modules>
eval_clf = RandomForestClassifier(**par) eval_clf.fit(X_train, y) pred = eval_clf.predict(X_test )
Titanic - Machine Learning from Disaster
5,412,769
import skimage.io import numpy as np import pandas as pd import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader, Dataset from efficientnet_pytorch import model as enet import matplotlib.pyplot as plt from tqdm import tqdm_notebook as tqdm <load_from_csv>
data_to_submit = pd.DataFrame({ 'PassengerId':id_for_subm, 'Survived':pred }) data_to_submit.to_csv('csv_to_submit.csv', index = False )
Titanic - Machine Learning from Disaster
12,918,821
data_dir = '.. /input/prostate-cancer-grade-assessment' df_train = pd.read_csv(os.path.join(data_dir, 'train.csv')) df_test = pd.read_csv(os.path.join(data_dir, 'test.csv')) df_sub = pd.read_csv(os.path.join(data_dir, 'sample_submission.csv')) model_dir = '.. /input/panda-public-models' image_folder = os.path.join(data_dir, 'test_images') is_test = os.path.exists(image_folder) image_folder = image_folder if is_test else os.path.join(data_dir, 'train_images') df = df_test if is_test else df_train.loc[:100] tile_size = 256 image_size = 256 n_tiles = 36 batch_size = 8 num_workers = 4 device = torch.device('cuda') print(image_folder )<define_search_model>
import matplotlib.pyplot as plt import sklearn import seaborn as sb
Titanic - Machine Learning from Disaster
12,918,821
class enetv2(nn.Module): def __init__(self, backbone, out_dim): super(enetv2, self ).__init__() self.enet = enet.EfficientNet.from_name(backbone) self.myfc = nn.Linear(self.enet._fc.in_features, out_dim) self.enet._fc = nn.Identity() def extract(self, x): return self.enet(x) def forward(self, x): x = self.extract(x) x = self.myfc(x) return x def load_models(model_files): models = [] for model_f in model_files: model_f = os.path.join(model_dir, model_f) backbone = 'efficientnet-b0' model = enetv2(backbone, out_dim=5) model.load_state_dict(torch.load(model_f, map_location=lambda storage, loc: storage), strict=True) model.eval() model.to(device) models.append(model) print(f'{model_f} loaded!') return models model_files = [ 'cls_effnet_b0_Rand36r36tiles256_big_bce_lr0.3_augx2_30epo_model_fold0.pth' ] models = load_models(model_files )<categorify>
train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') train.head()
Titanic - Machine Learning from Disaster
12,918,821
def get_tiles(img, mode=0): result = [] h, w, c = img.shape pad_h =(tile_size - h % tile_size)% tile_size +(( tile_size * mode)// 2) pad_w =(tile_size - w % tile_size)% tile_size +(( tile_size * mode)// 2) img2 = np.pad(img,[[pad_h // 2, pad_h - pad_h // 2], [pad_w // 2,pad_w - pad_w//2], [0,0]], constant_values=255) img3 = img2.reshape( img2.shape[0] // tile_size, tile_size, img2.shape[1] // tile_size, tile_size, 3 ) img3 = img3.transpose(0,2,1,3,4 ).reshape(-1, tile_size, tile_size,3) n_tiles_with_info =(img3.reshape(img3.shape[0],-1 ).sum(1)< tile_size ** 2 * 3 * 255 ).sum() if len(img)< n_tiles: img3 = np.pad(img3,[[0,N-len(img3)],[0,0],[0,0],[0,0]], constant_values=255) idxs = np.argsort(img3.reshape(img3.shape[0],-1 ).sum(-1)) [:n_tiles] img3 = img3[idxs] for i in range(len(img3)) : result.append({'img':img3[i], 'idx':i}) return result, n_tiles_with_info >= n_tiles class PANDADataset(Dataset): def __init__(self, df, image_size, n_tiles=n_tiles, tile_mode=0, rand=False, sub_imgs=False ): self.df = df.reset_index(drop=True) self.image_size = image_size self.n_tiles = n_tiles self.tile_mode = tile_mode self.rand = rand self.sub_imgs = sub_imgs def __len__(self): return self.df.shape[0] def __getitem__(self, index): row = self.df.iloc[index] img_id = row.image_id tiff_file = os.path.join(image_folder, f'{img_id}.tiff') image = skimage.io.MultiImage(tiff_file)[1] tiles, OK = get_tiles(image, self.tile_mode) if self.rand: idxes = np.random.choice(list(range(self.n_tiles)) , self.n_tiles, replace=False) else: idxes = list(range(self.n_tiles)) idxes = np.asarray(idxes)+ self.n_tiles if self.sub_imgs else idxes n_row_tiles = int(np.sqrt(self.n_tiles)) images = np.zeros(( image_size * n_row_tiles, image_size * n_row_tiles, 3)) for h in range(n_row_tiles): for w in range(n_row_tiles): i = h * n_row_tiles + w if len(tiles)> idxes[i]: this_img = tiles[idxes[i]]['img'] else: this_img = np.ones(( self.image_size, self.image_size, 3)).astype(np.uint8)* 255 this_img = 255 - this_img h1 = h * image_size w1 = w * image_size images[h1:h1+image_size, w1:w1+image_size] = this_img images = images.astype(np.float32) images /= 255 images = images.transpose(2, 0, 1) return torch.tensor(images) <load_pretrained>
combine = [train,test]
Titanic - Machine Learning from Disaster
12,918,821
dataset = PANDADataset(df, image_size, n_tiles, 0) loader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, shuffle=False) dataset2 = PANDADataset(df, image_size, n_tiles, 2) loader2 = DataLoader(dataset2, batch_size=batch_size, num_workers=num_workers, shuffle=False )<save_to_csv>
for c in train.columns: print(c, str(100*train[c].isnull().sum() /len(train)))
Titanic - Machine Learning from Disaster
12,918,821
LOGITS = [] LOGITS2 = [] with torch.no_grad() : for data in tqdm(loader): data = data.to(device) logits = models[0](data) LOGITS.append(logits) for data in tqdm(loader2): data = data.to(device) logits = models[0](data) LOGITS2.append(logits) LOGITS =(torch.cat(LOGITS ).sigmoid().cpu() + torch.cat(LOGITS2 ).sigmoid().cpu())/ 2 PREDS = LOGITS.sum(1 ).round().numpy() df['isup_grade'] = PREDS.astype(int) df[['image_id', 'isup_grade']].to_csv('submission.csv', index=False) print(df.head()) print() print(df.isup_grade.value_counts() )<define_variables>
train['Age'] = train['Age'].fillna(train['Age'].mean() )
Titanic - Machine Learning from Disaster
12,918,821
DEBUG = False<define_variables>
for c in train.columns: print(c, str(100*train[c].isnull().sum() /len(train)) )
Titanic - Machine Learning from Disaster
12,918,821
sys.path = [ '.. /input/efficientnet-pytorch/EfficientNet-PyTorch/EfficientNet-PyTorch-master', ] + sys.path<import_modules>
dependencies_sex = train[['Sex', 'Survived']].groupby(['Sex'],as_index=False ).mean() dependencies_Pclass = train[['Pclass', 'Survived']].groupby(['Pclass'],as_index=False ).mean() print(dependencies_Pclass) print(dependencies_sex )
Titanic - Machine Learning from Disaster
12,918,821
import skimage.io import numpy as np import pandas as pd import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader, Dataset from efficientnet_pytorch import model as enet import matplotlib.pyplot as plt from tqdm import tqdm_notebook as tqdm <load_from_csv>
for dats in combine: dats['Title'] = dats.Name.str.extract('([A-Za-z]+)\.',expand=False )
Titanic - Machine Learning from Disaster
12,918,821
data_dir = '.. /input/prostate-cancer-grade-assessment' df_train = pd.read_csv(os.path.join(data_dir, 'train.csv')) df_test = pd.read_csv(os.path.join(data_dir, 'test.csv')) df_sub = pd.read_csv(os.path.join(data_dir, 'sample_submission.csv')) model_dir = '.. /input/panda-public-models' image_folder = os.path.join(data_dir, 'test_images') is_test = os.path.exists(image_folder) image_folder = image_folder if is_test else os.path.join(data_dir, 'train_images') df = df_test if is_test else df_train.loc[:100] tile_size = 256 image_size = 256 n_tiles = 36 batch_size = 8 num_workers = 4 device = torch.device('cuda') print(image_folder )<define_search_model>
for dataset in combine: dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') dataset['Title'] = dataset['Title'].replace('Ms', 'Miss') dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs' )
Titanic - Machine Learning from Disaster
12,918,821
class enetv2(nn.Module): def __init__(self, backbone, out_dim): super(enetv2, self ).__init__() self.enet = enet.EfficientNet.from_name(backbone) self.myfc = nn.Linear(self.enet._fc.in_features, out_dim) self.enet._fc = nn.Identity() def extract(self, x): return self.enet(x) def forward(self, x): x = self.extract(x) x = self.myfc(x) return x def load_models(model_files): models = [] for model_f in model_files: model_f = os.path.join(model_dir, model_f) backbone = 'efficientnet-b0' model = enetv2(backbone, out_dim=5) model.load_state_dict(torch.load(model_f, map_location=lambda storage, loc: storage), strict=True) model.eval() model.to(device) models.append(model) print(f'{model_f} loaded!') return models model_files = [ 'cls_effnet_b0_Rand36r36tiles256_big_bce_lr0.3_augx2_30epo_model_fold0.pth' ] models = load_models(model_files )<categorify>
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5} for dataset in combine: dataset['Title'] = dataset['Title'].map(title_mapping) dataset['Title'] = dataset['Title'].fillna(0 )
Titanic - Machine Learning from Disaster
12,918,821
def get_tiles(img, mode=0): result = [] h, w, c = img.shape pad_h =(tile_size - h % tile_size)% tile_size +(( tile_size * mode)// 2) pad_w =(tile_size - w % tile_size)% tile_size +(( tile_size * mode)// 2) img2 = np.pad(img,[[pad_h // 2, pad_h - pad_h // 2], [pad_w // 2,pad_w - pad_w//2], [0,0]], constant_values=255) img3 = img2.reshape( img2.shape[0] // tile_size, tile_size, img2.shape[1] // tile_size, tile_size, 3 ) img3 = img3.transpose(0,2,1,3,4 ).reshape(-1, tile_size, tile_size,3) n_tiles_with_info =(img3.reshape(img3.shape[0],-1 ).sum(1)< tile_size ** 2 * 3 * 255 ).sum() if len(img)< n_tiles: img3 = np.pad(img3,[[0,N-len(img3)],[0,0],[0,0],[0,0]], constant_values=255) idxs = np.argsort(img3.reshape(img3.shape[0],-1 ).sum(-1)) [:n_tiles] img3 = img3[idxs] for i in range(len(img3)) : result.append({'img':img3[i], 'idx':i}) return result, n_tiles_with_info >= n_tiles class PANDADataset(Dataset): def __init__(self, df, image_size, n_tiles=n_tiles, tile_mode=0, rand=False, sub_imgs=False ): self.df = df.reset_index(drop=True) self.image_size = image_size self.n_tiles = n_tiles self.tile_mode = tile_mode self.rand = rand self.sub_imgs = sub_imgs def __len__(self): return self.df.shape[0] def __getitem__(self, index): row = self.df.iloc[index] img_id = row.image_id tiff_file = os.path.join(image_folder, f'{img_id}.tiff') image = skimage.io.MultiImage(tiff_file)[1] tiles, OK = get_tiles(image, self.tile_mode) if self.rand: idxes = np.random.choice(list(range(self.n_tiles)) , self.n_tiles, replace=False) else: idxes = list(range(self.n_tiles)) idxes = np.asarray(idxes)+ self.n_tiles if self.sub_imgs else idxes n_row_tiles = int(np.sqrt(self.n_tiles)) images = np.zeros(( image_size * n_row_tiles, image_size * n_row_tiles, 3)) for h in range(n_row_tiles): for w in range(n_row_tiles): i = h * n_row_tiles + w if len(tiles)> idxes[i]: this_img = tiles[idxes[i]]['img'] else: this_img = np.ones(( self.image_size, self.image_size, 3)).astype(np.uint8)* 255 this_img = 255 - this_img h1 = h * image_size w1 = w * image_size images[h1:h1+image_size, w1:w1+image_size] = this_img images = images.astype(np.float32) images /= 255 images = images.transpose(2, 0, 1) return torch.tensor(images) <load_pretrained>
title_dependencies=train[['Title','Survived','Sex']].groupby(['Title','Sex'],as_index=False ).mean() title_dependencies
Titanic - Machine Learning from Disaster
12,918,821
dataset = PANDADataset(df, image_size, n_tiles, 0) loader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, shuffle=False) dataset2 = PANDADataset(df, image_size, n_tiles, 2) loader2 = DataLoader(dataset2, batch_size=batch_size, num_workers=num_workers, shuffle=False )<save_to_csv>
train = train.drop(['Name', 'PassengerId', 'Cabin', 'Embarked','Ticket'], axis=1) test = test.drop(['Name', 'PassengerId', 'Cabin', 'Embarked','Ticket'], axis=1) combine=[train,test] print(train.head()) for dataset in combine: dataset['Sex'] = dataset['Sex'].map({'female': 1, 'male': 0} ).astype(int) print(train.head()) print(test.head())
Titanic - Machine Learning from Disaster
12,918,821
LOGITS = [] LOGITS2 = [] with torch.no_grad() : for data in tqdm(loader): data = data.to(device) logits = models[0](data) LOGITS.append(logits) for data in tqdm(loader2): data = data.to(device) logits = models[0](data) LOGITS2.append(logits) LOGITS =(torch.cat(LOGITS ).sigmoid().cpu() + torch.cat(LOGITS2 ).sigmoid().cpu())/ 2 PREDS = LOGITS.sum(1 ).round().numpy() df['isup_grade'] = PREDS.astype(int) df[['image_id', 'isup_grade']].to_csv('submission.csv', index=False) print(df.head()) print() print(df.isup_grade.value_counts() )<set_options>
X_train, X_test , Y_train, Y_test = train_test_split(train.drop(['Survived'],axis=1),train['Survived'],test_size=0.10,random_state=None )
Titanic - Machine Learning from Disaster
12,918,821
warnings.simplefilter(action='ignore', category=FutureWarning) warnings.simplefilter(action='ignore', category=SettingWithCopyWarning) warnings.simplefilter(action='ignore', category=FutureWarning )<compute_test_metric>
from sklearn.linear_model import LogisticRegression from sklearn import metrics
Titanic - Machine Learning from Disaster
12,918,821
FEATS_EXCLUDED = ['first_active_month', 'target', 'card_id', 'outliers', 'hist_purchase_date_max', 'hist_purchase_date_min', 'hist_card_id_size', 'new_purchase_date_max', 'new_purchase_date_min', 'new_card_id_size', 'OOF_PRED', 'month_0'] @contextmanager def timer(title): t0 = time.time() yield print("{} - done in {:.0f}s".format(title, time.time() - t0)) def rmse(y_true, y_pred): return np.sqrt(mean_squared_error(y_true, y_pred)) def one_hot_encoder(df, nan_as_category = True): original_columns = list(df.columns) categorical_columns = [col for col in df.columns if df[col].dtype == 'object'] df = pd.get_dummies(df, columns= categorical_columns, dummy_na= nan_as_category) new_columns = [c for c in df.columns if c not in original_columns] return df, new_columns def display_importances(feature_importance_df_): cols = feature_importance_df_[["feature", "importance"]].groupby("feature" ).mean().sort_values(by="importance", ascending=False)[:40].index best_features = feature_importance_df_.loc[feature_importance_df_.feature.isin(cols)] plt.figure(figsize=(8, 10)) sns.barplot(x="importance", y="feature", data=best_features.sort_values(by="importance", ascending=False)) plt.title('LightGBM Features(avg over folds)') plt.tight_layout() plt.savefig('lgbm_importances.png') def reduce_mem_usage(df, verbose=True): numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] start_mem = df.memory_usage().sum() / 1024**2 for col in df.columns: col_type = df[col].dtypes if col_type in numerics: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8 ).min and c_max < np.iinfo(np.int8 ).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16 ).min and c_max < np.iinfo(np.int16 ).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32 ).min and c_max < np.iinfo(np.int32 ).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64 ).min and c_max < np.iinfo(np.int64 ).max: df[col] = df[col].astype(np.int64) else: if c_min > np.finfo(np.float16 ).min and c_max < np.finfo(np.float16 ).max: df[col] = df[col].astype(np.float16) elif c_min > np.finfo(np.float32 ).min and c_max < np.finfo(np.float32 ).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) end_mem = df.memory_usage().sum() / 1024**2 if verbose: print('Mem.usage decreased to {:5.2f} Mb({:.1f}% reduction)'.format(end_mem, 100 *(start_mem - end_mem)/ start_mem)) return df<load_from_csv>
modelLR= LogisticRegression(solver='liblinear',C=0.21,random_state=1) modelLR.fit(X_train,Y_train)
Titanic - Machine Learning from Disaster
12,918,821
%%time def load_data() : train_df = pd.read_csv('.. /input/elo-blending/train_feature.csv') test_df = pd.read_csv('.. /input/elo-blending/test_feature.csv') display(train_df.head()) display(test_df.head()) print(train_df.shape,test_df.shape) train = pd.read_csv('.. /input/elo-merchant-category-recommendation/train.csv', index_col=['card_id']) test = pd.read_csv('.. /input/elo-merchant-category-recommendation/test.csv', index_col=['card_id']) train_df['card_id'] = train.index test_df['card_id'] = test.index train_df.index = train_df['card_id'] test_df.index = test_df['card_id'] display(train_df.head()) display(test.head()) print(train.shape,test.shape) del train,test return(train_df,test_df) print(gc.collect() )<define_variables>
Y_pred_log=modelLR.predict(X_test) acc_LR = metrics.accuracy_score(Y_test, Y_pred_log )
Titanic - Machine Learning from Disaster
12,918,821
boosting = ["goss","dart"] boosting[0],boosting[1]<load_pretrained>
print(Y_pred_log) print("We see that Logistic regression gives an Accuracy of ",acc_LR*100,"% on the traing set.")
Titanic - Machine Learning from Disaster
12,918,821
%%time boosting = ["goss","dart"] def kfold_lightgbm(train_df, test_df, num_folds, stratified = False, boosting = boosting[0]): print("Starting LightGBM.Train shape: {}, test shape: {}".format(train_df.shape, test_df.shape)) if stratified: folds = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=326) else: folds = KFold(n_splits= num_folds, shuffle=True, random_state=2045) oof_preds = np.zeros(train_df.shape[0]) sub_preds = np.zeros(test_df.shape[0]) feature_importance_df = pd.DataFrame() feats = [f for f in train_df.columns if f not in FEATS_EXCLUDED] for n_fold,(train_idx, valid_idx)in enumerate(folds.split(train_df[feats], train_df['outliers'])) : train_x, train_y = train_df[feats].iloc[train_idx], train_df['target'].iloc[train_idx] valid_x, valid_y = train_df[feats].iloc[valid_idx], train_df['target'].iloc[valid_idx] lgb_train = lgb.Dataset(train_x,label=train_y,free_raw_data=False) lgb_test = lgb.Dataset(valid_x,label=valid_y,free_raw_data=False) params ={ 'task': 'train', 'boosting': 'goss', 'objective': 'regression', 'metric': 'rmse', 'learning_rate': 0.005, 'subsample': 0.9855232997390695, 'max_depth': 8, 'top_rate': 0.9064148448434349, 'num_leaves': 87, 'min_child_weight': 41.9612869171337, 'other_rate': 0.0721768246018207, 'reg_alpha': 9.677537745007898, 'colsample_bytree': 0.5665320670155495, 'min_split_gain': 9.820197773625843, 'reg_lambda': 8.2532317400459, 'min_data_in_leaf': 21, 'verbose': -1, 'seed':int(2**n_fold), 'bagging_seed':int(2**n_fold), 'drop_seed':int(2**n_fold) } reg = lgb.train( params, lgb_train, valid_sets=[lgb_train, lgb_test], valid_names=['train', 'test'], num_boost_round=10000, early_stopping_rounds= 200, verbose_eval=100 ) oof_preds[valid_idx] = reg.predict(valid_x, num_iteration=reg.best_iteration) sub_preds += reg.predict(test_df[feats], num_iteration=reg.best_iteration)/ folds.n_splits fold_importance_df = pd.DataFrame() fold_importance_df["feature"] = feats fold_importance_df["importance"] = np.log1p(reg.feature_importance(importance_type='gain', iteration=reg.best_iteration)) fold_importance_df["fold"] = n_fold + 1 feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0) print('Fold %2d RMSE : %.6f' %(n_fold + 1, rmse(valid_y, oof_preds[valid_idx]))) del reg, train_x, train_y, valid_x, valid_y gc.collect() display_importances(feature_importance_df) submission = pd.read_csv(".. /input/elo-merchant-category-recommendation/sample_submission.csv") submission['target'] = sub_preds submission.to_csv(boosting+".csv", index=False) display(submission.head()) return(submission )<split>
from sklearn.tree import DecisionTreeClassifier
Titanic - Machine Learning from Disaster
12,918,821
%%time train_df,test_df = load_data() print(gc.collect()) submission = kfold_lightgbm(train_df, test_df, num_folds=7, stratified=False, boosting=boosting[0] )<concatenate>
dtree = DecisionTreeClassifier(class_weight=None, criterion='gini', max_depth=1, max_features=7, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, random_state=None, splitter='best') dtree.fit(X_train, Y_train) y_pred_tree = dtree.predict(X_test )
Titanic - Machine Learning from Disaster
12,918,821
submission1 = kfold_lightgbm(train_df, test_df, num_folds=7, stratified=False, boosting=boosting[1] )<save_to_csv>
acc_DT = metrics.accuracy_score(y_pred_tree, Y_test) print(y_pred_tree) print("We see that Decision Tree gives an Accuracy of ",acc_DT*100,"% on the traing set." )
Titanic - Machine Learning from Disaster
12,918,821
final = pd.read_csv(".. /input/elo-merchant-category-recommendation/sample_submission.csv") final['target'] = submission['target'] * 0.5 + submission1['target'] * 0.5 final.to_csv("blend.csv",index = False )<set_options>
from sklearn.ensemble import RandomForestClassifier
Titanic - Machine Learning from Disaster
12,918,821
%matplotlib inline <choose_model_class>
rforest = RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini', max_depth=None, max_features=7, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, min_samples_leaf=1, min_samples_split=2, min_weight_fraction_leaf=0.0, n_estimators=20, n_jobs=1, oob_score=False, random_state=42, verbose=0, warm_start=False) rforest.fit(X_train, Y_train) y_pred_forest = rforest.predict(X_test )
Titanic - Machine Learning from Disaster
12,918,821
max_seq_len = 60 embed_size = 300 max_features = 50000 EMBEDDING = 'glove.840B.300d' MODEL = 'attention' embedding_matrix = 'None' embeddings_idx = 'None' checkpoint = ModelCheckpoint('./checkpoints/', monitor='val_acc', verbose=0, save_best_only=True) earlystop = EarlyStopping(monitor='val_acc', min_delta=0, patience=1, verbose=0) tensorboard = TensorBoard(log_dir='./logs', histogram_freq=0, batch_size=32, write_graph=True, write_grads=True, write_images=True) reducelr = ReduceLROnPlateau(monitor='val_acc', factor=0.1, patience=3, verbose=0, mode='auto', min_delta=0.0001, cooldown=0, min_lr=0) thresh = 0.4<load_from_csv>
acc_RF = metrics.accuracy_score(y_pred_forest, Y_test) print(y_pred_forest) print("We see that Random Forest classifier gives an Accuracy of ",acc_RF*100,"% on the traing set." )
Titanic - Machine Learning from Disaster
12,918,821
train_set = pd.read_csv('.. /input/train.csv') test_set = pd.read_csv('.. /input/test.csv') train_set.head()<count_values>
from sklearn.svm import SVC
Titanic - Machine Learning from Disaster
12,918,821
x = train_set['target'].value_counts(dropna=False) print(x) sincere_examples = x[0] insincere_examples = x[1]<define_variables>
svc = SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, decision_function_shape='ovr', degree=3, gamma='auto', kernel='rbf', max_iter=-1, probability=False, random_state=None, shrinking=True, tol=0.001, verbose=False) svc.fit(X_train, Y_train) y_pred_svc = svc.predict(X_test )
Titanic - Machine Learning from Disaster
12,918,821
print(len(lengths_without_puncs)- np.count_nonzero(lengths_without_puncs))<define_variables>
acc_SVC = metrics.accuracy_score(y_pred_svc, Y_test) print(y_pred_svc) print("We see that SVC classifier gives an Accuracy of ",acc_SVC*100,"% on the traing set." )
Titanic - Machine Learning from Disaster
12,918,821
contraction_mapping = {"ain't": "is not", "aren't": "are not","can't": "cannot", "'cause": "because", "could've": "could have", "couldn't": "could not", "didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hasn't": "has not", "haven't": "have not", "he'd": "he would","he'll": "he will", "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is", "I'd": "I would", "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have","I'm": "I am", "I've": "I have", "i'd": "i would", "i'd've": "i would have", "i'll": "i will", "i'll've": "i will have","i'm": "i am", "i've": "i have", "isn't": "is not", "it'd": "it would", "it'd've": "it would have", "it'll": "it will", "it'll've": "it will have","it's": "it is", "let's": "let us", "ma'am": "madam", "mayn't": "may not", "might've": "might have","mightn't": "might not","mightn't've": "might not have", "must've": "must have", "mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have","o'clock": "of the clock", "oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have", "she'd": "she would", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is", "should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have","so's": "so as", "this's": "this is","that'd": "that would", "that'd've": "that would have", "that's": "that is", "there'd": "there would", "there'd've": "there would have", "there's": "there is", "here's": "here is","they'd": "they would", "they'd've": "they would have", "they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have", "wasn't": "was not", "we'd": "we would", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are", "we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have", "what're": "what are", "what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is", "where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have", "why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not", "won't've": "will not have", "would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all", "y'all'd": "you all would","y'all'd've": "you all would have","y'all're": "you all are","y'all've": "you all have","you'd": "you would", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have", "you're": "you are", "you've": "you have" } punct = "/-'?!., punct_mapping = {"‘": "'", "₹": "e", "´": "'", "°": "", "€": "e", "™": "tm", "√": " sqrt ", "×": "x", "²": "2", "—": "-", "–": "-", "’": "'", "_": "-", "`": "'", '“': '"', '”': '"', '“': '"', "£": "e", '∞': 'infinity', 'θ': 'theta', '÷': '/', 'α': 'alpha', '•': '.', 'à': 'a', '−': '-', 'β': 'beta', '∅': '', '³': '3', 'π': 'pi', } mispell_dict = {'colour': 'color', 'centre': 'center', 'favourite': 'favorite', 'travelling': 'traveling', 'counselling': 'counseling', 'theatre': 'theater', 'cancelled': 'canceled', 'labour': 'labor', 'organisation': 'organization', 'wwii': 'world war 2', 'citicise': 'criticize', 'youtu ': 'youtube ', 'Qoura': 'Quora', 'sallary': 'salary', 'Whta': 'What', 'narcisist': 'narcissist', 'howdo': 'how do', 'whatare': 'what are', 'howcan': 'how can', 'howmuch': 'how much', 'howmany': 'how many', 'whydo': 'why do', 'doI': 'do I', 'theBest': 'the best', 'howdoes': 'how does', 'mastrubation': 'masturbation', 'mastrubate': 'masturbate', "mastrubating": 'masturbating', 'pennis': 'penis', 'Etherium': 'Ethereum', 'narcissit': 'narcissist', 'bigdata': 'big data', '2k17': '2017', '2k18': '2018', 'qouta': 'quota', 'exboyfriend': 'ex boyfriend', 'airhostess': 'air hostess', "whst": 'what', 'watsapp': 'whatsapp', 'demonitisation': 'demonetization', 'demonitization': 'demonetization', 'demonetisation': 'demonetization'} def clean_contractions(text, mapping=contraction_mapping): specials = ["’", "‘", "´", "`"] for s in specials: text = text.replace(s, "'") text = ' '.join([mapping[t] if t in mapping else t for t in text.split(" ")]) return text def clean_special_chars(text, punct=punct, mapping=punct_mapping): for p in mapping: text = text.replace(p, mapping[p]) for p in punct: text = text.replace(p, f' {p} ') specials = {'\u200b': ' ', '…': '...', '\ufeff': '', 'करना': '', 'है': ''} for s in specials: text = text.replace(s, specials[s]) return text def correct_spelling(x, dictionary=mispell_dict): for word in dictionary.keys() : x = x.replace(word, dictionary[word]) return x def clean(text): text = text.lower() text = clean_contractions(text) text = clean_special_chars(text) text = correct_spelling(text) return text<count_values>
from xgboost import XGBClassifier
Titanic - Machine Learning from Disaster
12,918,821
sincere_counts = Counter() insincere_counts = Counter() word_dict = Counter() sincere_to_insincere_ratio = Counter() def prepare_dicts() : qs = [clean(i)for i in train_set['question_text']] lbl = [j for j in train_set['target']] for i,j in zip(qs,lbl): words = i.split() for word in words: word_dict[word] += 1 if j == 0: sincere_counts[word] += 1 elif j == 1: insincere_counts[word] += 1 tst_qs = [clean(i)for i in test_set['question_text']] for i in tst_qs: i = i.split() for j in i: word_dict[j] += 1 print('Words in sincere Questions: {}'.format(len(sincere_counts))) print('Words in insincere Questions: {}'.format(len(insincere_counts))) print('Total Words in corpus: {}'.format(len(word_dict))) print('Most Common Words in Sincere Questions : ') print(sincere_counts.most_common() [:10]) print('Most Common Words in Insincere Questions : ') print(insincere_counts.most_common() [:10]) for i in sincere_counts: if sincere_counts[i] >= 100: sincere_to_insincere_ratio[i] = np.log(sincere_counts[i]/(insincere_counts[i] + 1)) print('The Most Sincere Words : ') print(sincere_to_insincere_ratio.most_common() [:10]) print('The Most Insincere Words : ') print(list(reversed(sincere_to_insincere_ratio.most_common())) [:10]) <define_variables>
xgb=XGBClassifier(learning_rate=0.05, n_estimators=500) xgb.fit(X_train,Y_train) y_pred_xgb=xgb.predict(X_test )
Titanic - Machine Learning from Disaster
12,918,821
prepare_dicts()<split>
acc_XGB= metrics.accuracy_score(y_pred_xgb,Y_test) print(y_pred_xgb) print("We see that XGB classifier gives an Accuracy of ",acc_XGB*100,"% on the traing set." )
Titanic - Machine Learning from Disaster
12,918,821
train_x = list(train_set['question_text'].fillna("_na_" ).values) train_y = train_set['target'].values test_x = list(test_set['question_text'].fillna("_na_" ).values) train_x, val_x, train_y, val_y = train_test_split(train_x, train_y, test_size=0.2) train_x = [clean(i)for i in train_x] val_x = [clean(i)for i in val_x] test_x = [clean(i)for i in test_x] print('An Example From Train Set: ') print(train_x[0]) tokenizer = text.Tokenizer(num_words=max_features) tokenizer.fit_on_texts(list(train_x)) train_X = tokenizer.texts_to_sequences(train_x) val_X = tokenizer.texts_to_sequences(val_x) test_X = tokenizer.texts_to_sequences(test_x) print('After Tokenizing: ') print(train_X[0]) train_X = seq.pad_sequences(train_X, maxlen=max_seq_len) val_X = seq.pad_sequences(val_X, maxlen=max_seq_len) test_X = seq.pad_sequences(test_X, maxlen=max_seq_len) print('After Padding: ') print(train_X[0]) print(np.shape(train_X), np.shape(train_y), np.shape(val_X), np.shape(val_y))<categorify>
import keras from keras.layers import Dense from keras.models import Sequential from sklearn.metrics import classification_report
Titanic - Machine Learning from Disaster
12,918,821
def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') def get_embeddings(embedding_name, mode='new'): filePath = '.. /input/embeddings/{0}/{0}.txt'.format(embedding_name) if mode == 'new': embeddings_idx = dict(get_coefs(*i.split(" ")) for i in open(filePath)) all_embs = np.stack(embeddings_idx.values()) emb_mean,emb_std = all_embs.mean() , all_embs.std() embed_size = all_embs.shape[1] word_index = tokenizer.word_index nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_idx.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embeddings_idx, embedding_matrix<categorify>
nn = Sequential() nn.add(Dense(units= 14, activation = 'relu', input_dim=7, kernel_initializer="uniform")) nn.add(Dense(units= 14, activation = 'relu',kernel_initializer="uniform")) nn.add(Dense(units= 1, activation = 'sigmoid',kernel_initializer="uniform")) nn.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])
Titanic - Machine Learning from Disaster
12,918,821
def check_coverage(vocab, embeddings_index): known_words = {} unknown_words = {} nb_known_words = 0 nb_unknown_words = 0 for word in vocab.keys() : try: known_words[word] = embeddings_index[word] nb_known_words += vocab[word] except: unknown_words[word] = vocab[word] nb_unknown_words += vocab[word] pass print('Found embeddings for {:.2%} of vocab'.format(len(known_words)/ len(vocab))) print('Found embeddings for {:.2%} of all text'.format(nb_known_words /(nb_known_words + nb_unknown_words))) unknown_words = sorted(unknown_words.items() , key=operator.itemgetter(1)) [::-1] return unknown_words<categorify>
nn.fit(X_train,Y_train, batch_size=32,epochs=50,verbose= 0) nn_pred = nn.predict(X_test) nn_pred = [ 1 if y>=0.5 else 0 for y in nn_pred] print(nn_pred) acc_NN = metrics.accuracy_score(Y_test, nn_pred )
Titanic - Machine Learning from Disaster
12,918,821
embedding_idxs, embedding_mtx = get_embeddings(EMBEDDING, 'new') unk_wrds = check_coverage(word_dict, embedding_idxs )<choose_model_class>
print("We can see that the neural network gives an Accuracy of ",acc_NN*100 , "% on the training set." )
Titanic - Machine Learning from Disaster
12,918,821
def get_model(model_type): if model_type == 'nb': model = NaiveBayes() elif model_type == 'svm': model = __SVC__() elif model_type == 'rnn': inp = Input(shape=(max_seq_len,)) layer = Embedding(max_features, embed_size, weights=[embedding_mtx], trainable=False )(inp) layer = SimpleRNN(32, return_sequences=True )(layer) layer = GlobalMaxPool1D()(layer) layer = Dense(16, activation='relu' )(layer) layer = Dropout(0.1 )(layer) layer = Dense(1, activation='sigmoid' )(layer) model = Model(inputs=inp, outputs=layer) elif model_type == 'lstm': inp = Input(shape=(max_seq_len,)) layer = Embedding(max_features, embed_size, weights=[embedding_mtx], trainable=False )(inp) layer = Bidirectional(CuDNNLSTM(32, return_sequences=True))(layer) layer = GlobalMaxPool1D()(layer) layer = Dense(16, activation='relu' )(layer) layer = Dropout(0.1 )(layer) layer = Dense(1, activation='sigmoid' )(layer) model = Model(inputs=inp, outputs=layer) elif model_type == 'gru': inp = Input(shape=(max_seq_len,)) layer = Embedding(max_features, embed_size, weights=[embedding_mtx], trainable=False )(inp) layer = Bidirectional(CuDNNGRU(32, return_sequences=True))(layer) layer = GlobalMaxPool1D()(layer) layer = Dense(16, activation='relu' )(layer) layer = Dropout(0.1 )(layer) layer = Dense(1, activation='sigmoid' )(layer) model = Model(inputs=inp, outputs=layer) elif model_type == 'attention': inp = Input(shape=(max_seq_len,)) layer = Embedding(max_features, embed_size, weights=[embedding_mtx], trainable=False )(inp) layer = Bidirectional(CuDNNLSTM(32, return_sequences=True))(layer) layer = Attention(max_seq_len )(layer) layer = Dense(16, activation='relu' )(layer) layer = Dropout(0.1 )(layer) layer = Dense(1, activation='sigmoid' )(layer) model = Model(inputs=inp, outputs=layer) return model <init_hyperparams>
best=best_model['Model'].iloc[0] print(str(best))
Titanic - Machine Learning from Disaster
12,918,821
class NaiveBayes() : def __init__(self): self.sincere_example_count = sincere_examples self.insincere_example_count = insincere_examples self.total_examples = x[0]+x[1] self.sincere_dict = sincere_counts self.insincere_dict = insincere_counts self.word_dict= word_dict self.sincere_word_count = np.sum(list(sincere_counts.values())) self.insincere_word_count = np.sum(list(insincere_counts.values())) self.sincere_prob = self.sincere_example_count / self.total_examples self.insincere_prob = self.insincere_example_count / self.total_examples def summary(self): print('Positive Examples : {}, Negative Examples : {}, Total Examples : {}'.format(self.sincere_example_count, self.insincere_example_count, self.total_examples)) def predict(self, x_test): print('Predicting...') predictions = [] for example in x_test: p_words = np.prod([word_dict[j]/np.sum(list(word_dict.values())) for j in example.split() ]) p_words += 2 sincere_prob_num = np.prod([sincere_counts[j]/self.sincere_word_count for j in example.split() ])* self.sincere_prob insincere_prob_num = np.prod([insincere_counts[j]/self.insincere_word_count for j in example.split() ])* self.insincere_prob sincere_prob = sincere_prob_num/p_words insincere_prob = insincere_prob_num/p_words predictions.append(np.argmax([sincere_prob, insincere_prob])) return predictions class __SVC__(SVC): def __init__(self): super(__SVC__,self ).__init__(verbose=True) print('initializing...') def summary(self): print(self.__dict__) def prepare_data(self): self.X_train = [embedding_mtx[i] for example in train_X for i in example] self.X_val = [embedding_mtx[i] for example in val_X for i in example] self.X_test = [embedding_mtx[i] for example in test_X for i in example]<choose_model_class>
test.head() for c in test.columns: print(c, str(100*test[c].isnull().sum() /len(test))) print(".............. Before") test['Age'] = test['Age'].fillna(test['Age'].mean()) test['Fare'] = test['Fare'].fillna(test['Fare'].mean()) print(" ") for c in test.columns: print(c, str(100*test[c].isnull().sum() /len(test))) print(".............. After" )
Titanic - Machine Learning from Disaster
12,918,821
class Attention(Layer): def __init__(self, step_dim, **kwargs): self.supports_masking = True self.init = initializers.get('glorot_uniform') self.features_dim = 0 self.step_dim = step_dim self.bias = True super(Attention, self ).__init__(**kwargs) def build(self, input_shape): assert len(input_shape)== 3 self.W = self.add_weight(( input_shape[-1],), initializer=self.init, name='{}_W'.format(self.name)) self.features_dim = input_shape[-1] if self.bias: self.b = self.add_weight(( input_shape[1],), initializer='zero', name='{}_b'.format(self.name), ) else: self.b = None self.built = True def call(self, x, mask=None): features_dim = self.features_dim step_dim = self.step_dim eij = K.reshape(K.dot(K.reshape(x,(-1, features_dim)) , K.reshape(self.W,(features_dim, 1))),(-1, step_dim)) if self.bias: eij += self.b eij = K.tanh(eij) a = K.exp(eij) if mask is not None: a *= K.cast(mask, K.floatx()) a /= K.cast(K.sum(a, axis=1, keepdims=True)+ K.epsilon() , K.floatx()) a = K.expand_dims(a) weighted_input = x * a return K.sum(weighted_input, axis=1) def compute_output_shape(self, input_shape): return input_shape[0], self.features_dim<compute_test_metric>
if best == 'Logistic Regression': modelLR.fit(train.drop(['Survived'],axis=1),train['Survived']) test_pred=modelLR.predict(test) print(test_pred) print('Logistic Regression') if best == 'Decision Tree': dtree.fit(train.drop(['Survived'],axis=1),train['Survived']) test_pred=modelLR.predict(test) print(test_pred) print('Decision Tree') if best =='Random Forest': rforest.fit(train.drop(['Survived'],axis=1),train['Survived']) test_pred=rforest.predict(test) print(test_pred) print('Random Forest') if best == 'Support Vector Machine': svc.fit(train.drop(['Survived'],axis=1),train['Survived']) test_pred=svc.predict(test) print(test_pred) print('Support Vector Machines') if best == 'Gradient Boosting Classifier': xgb.fit(train.drop(['Survived'],axis=1),train['Survived']) test_pred=xgb.predict(test) print(test_pred) print('Gradient Boosting Classifier') if best == 'Artificial Neural Network': nn.fit(train.drop(['Survived'],axis=1),train['Survived'], epochs=500 , verbose=0) test_pred=nn.predict(test) test_pred = [ 1 if y>=0.5 else 0 for y in test_pred] print(test_pred) print('Artificial Neural Network')
Titanic - Machine Learning from Disaster
12,918,821
def print_f1s(predictions): for threshold in np.arange(0.1, 0.501, 0.01): threshold = np.round(threshold, 2) print("F1 score at threshold {0} is {1}".format(threshold, f1_score(val_y,(predictions>threshold ).astype(int))))<prepare_output>
test_data=pd.read_csv('/kaggle/input/titanic/gender_submission.csv') test_data = test_data.drop(['PassengerId'], axis=1) test_data.head()
Titanic - Machine Learning from Disaster
12,918,821
<find_best_model_class>
test_data.values.tolist() test_acc = metrics.accuracy_score(test_data, test_pred) print("Here we see that the test data has an Accuracy of ",test_acc*100,"% " )
Titanic - Machine Learning from Disaster
12,918,821
rnn = get_model('rnn') rnn.summary() rnn.compile(loss='binary_crossentropy', optimizer=Adam(lr=1e-3), metrics=['accuracy']) rnn.fit(train_X, train_y, batch_size=512, epochs=2, validation_data=(val_X, val_y), callbacks=[earlystop, reducelr]) predictions_rnn_real = rnn.predict(test_X) predictions_rnn =(predictions_rnn_real >= thresh ).astype(int) predictions_val_rnn = rnn.predict(val_X, batch_size=1024) print_f1s(predictions_val_rnn) del rnn gc.collect() time.sleep(10 )<save_to_csv>
test_predict = pd.DataFrame(test_pred, columns= ['Survived']) test_new= pd.read_csv('/kaggle/input/titanic/test.csv') new_test = pd.concat([test_new, test_predict], axis=1, join='inner' )
Titanic - Machine Learning from Disaster
12,918,821
<find_best_model_class><EOS>
submit=new_test[['PassengerId','Survived']] submit.to_csv('predictions.csv',index=False )
Titanic - Machine Learning from Disaster
9,456,372
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<find_best_model_class>
random.seed(123) sns.set_style("darkgrid") train = pd.read_csv("/kaggle/input/titanic/train.csv") test = pd.read_csv("/kaggle/input/titanic/test.csv") len_train = len(train) train_y = train["Survived"]
Titanic - Machine Learning from Disaster
9,456,372
attention = get_model('attention') attention.summary() attention.compile(loss='binary_crossentropy', optimizer=Adam(lr=1e-3), metrics=['accuracy']) attention.fit(train_X, train_y, batch_size=512, epochs=2, validation_data=(val_X, val_y), callbacks=[earlystop, reducelr]) predictions_attention_real = attention.predict(test_X) predictions_attention =(predictions_attention_real >= thresh ).astype(int) predictions_val_attention = attention.predict(val_X, batch_size=1024) print_f1s(predictions_val_attention) del attention gc.collect() time.sleep(10) <data_type_conversions>
lm = ols('Age~ C(Pclass)+ C(Sex)* C(Embarked)+ C(SibSp)+ C(Parch)', pd.concat([train,test],axis=0)).fit() anova_lm(lm,typ=2 )
Titanic - Machine Learning from Disaster
9,456,372
val_preds = 0.50*predictions_val_gru + 0.25*predictions_val_lstm + 0.25*predictions_val_attention val_preds =(val_preds > thresh ).astype(int) print_f1s(val_preds )<save_to_csv>
lm = ols('Fare~ C(Pclass)+ C(Sex)+ C(Embarked)+ C(SibSp)+ C(Parch)', pd.concat([train,test],axis=0)).fit() anova_lm(lm,typ=2 )
Titanic - Machine Learning from Disaster
9,456,372
final_preds = 0.50*predictions_gru_real + 0.25*predictions_lstm_real + 0.25*predictions_attention_real final_preds =(final_preds > thresh ).astype(int) final_prediction = pd.DataFrame({"qid":test_set["qid"].values}) final_prediction['prediction'] = final_preds final_prediction.to_csv("submission.csv", index=False )<import_modules>
mapping = {'Mlle': 'Miss', 'Major': 'Mr', 'Col': 'Mr', 'Sir': 'Mr', 'Don': 'Mr', 'Mme': 'Miss', 'Jonkheer': 'Mr', 'Lady': 'Mrs', 'Capt': 'Mr', 'the Countess': 'Mrs', 'Ms': 'Miss', 'Dona': 'Mrs'} class DataFrameSelector(BaseEstimator, TransformerMixin): def __init__(self, attribute_names): self.attribute_names = attribute_names def fit(self, X, y=None): return self def transform(self, X): return X[self.attribute_names] class FirstTransformer(BaseEstimator, TransformerMixin): def __init__(self, name): self.name = name def fit(self, X, y=None): return self def transform(self, X): value = [i[0][0] for i in X[self.name]] X[self.name] = value return X class ValueImputer(BaseEstimator, TransformerMixin): def __init__(self, name, value): self.value = value self.name = name def fit(self, X, y=None): return self def transform(self, X): X[self.name].fillna(self.value,inplace=True) return X class GroupbyImputer(BaseEstimator, TransformerMixin): def __init__(self, name, group_by): self.name = name self.group_by = group_by def fit(self, X, y=None): self.value = X.groupby(self.group_by)[self.name] return self def transform(self, X): X[self.name] = self.value.apply(lambda x: x.fillna(x.median())) i=1 while(X[self.name].isnull().sum() >0): k = X.groupby(self.group_by[:-i])[self.name] X[self.name] =(k.apply(lambda x: x.fillna(x.median()))) i+=1 if i == len(self.group_by): break return X class MostTransformer(BaseEstimator, TransformerMixin): def __init__(self, names,mapping): self.names = names self.mapping = mapping def fit(self, X, y=None): self.freq = X[self.names].str.split(', ', expand=True)[1].str.split('.', expand=True)[0] return self def transform(self, X, y=None): X[self.names + "_map"] = self.freq X.replace({self.names+"_map": self.mapping}, inplace=True) return X.drop("Name",axis=1) class FrequentTransformer(BaseEstimator, TransformerMixin): def __init__(self, names): self.names = names def fit(self, X, y=None): self.freq = X.groupby(self.names)[self.names].transform('count') return self def transform(self, X, y=None): X[self.names + "_freq"] = self.freq return X.drop(self.names, axis=1) imputer_pipeline = Pipeline([ ('Cabin_imputer', ValueImputer(name = "Cabin",value="None")) , ('Embarked_imputer', ValueImputer(name= "Embarked",value="S")) , ('Fare_imputer', GroupbyImputer(name= "Fare",group_by = ['Pclass', 'Parch','SibSp'])) , ('Age_imputer', GroupbyImputer(name= "Age",group_by = ['Pclass','Parch','Embarked'])) , ]) transform_pipeline = Pipeline([ ('Cabin', FirstTransformer(name= "Cabin")) , ('Name_transformer', MostTransformer(names = "Name", mapping = mapping)) , ('Ticket_transformer', FrequentTransformer(names= "Ticket")) , ] )
Titanic - Machine Learning from Disaster
9,456,372
FOLD = 0 SEED = 1337 NOTIFY_EACH_EPOCH = False WORKERS = 0 BATCH_SIZE = 512 N_SPLITS = 10 random.seed(SEED) os.environ['PYTHONHASHSEED'] = str(SEED) np.random.seed(SEED) torch.manual_seed(SEED) torch.cuda.manual_seed(SEED) torch.backends.cudnn.deterministic = True device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") def get_n_params(model): pp=0 for p in list(model.parameters()): nn=1 for s in list(p.size()): nn = nn*s pp += nn return pp def save_checkpoint(state): print(" Saving checkpoint") filename = f'./checkpoint-{state["epoch"]}.pt.tar' torch.save(state, filename) def initialize(model, path=None, optimizer=None): if path == None: checkpoints = glob.glob('./*.pt.tar') path = checkpoints[np.argmax([int(checkpoint.split('checkpoint-')[1].split('.')[0])for checkpoint in checkpoints])] checkpoint = torch.load(path) model.load_state_dict(checkpoint['model']) print(f' Loaded checkpoint {path} | Trained for {checkpoint["epoch"] + 1} epochs') if optimizer: optimizer.load_state_dict(checkpoint['optimizer']) epoch = checkpoint['epoch'] + 1 train_iteration = checkpoint['train_iteration'] val_iteration = checkpoint['val_iteration'] return model, optimizer, epoch, train_iteration, val_iteration else: return model<load_from_csv>
class TaitanicProcessing(BaseEstimator, TransformerMixin): def fit(self, X, y=None): self.y = y return self def transform(self, X): X["Sex"] = X["Sex"].map({"male":1, "female":0}) X["Family"] = X["SibSp"] + X["Parch"] + 1 X["Family"] = X["Family"].astype(str) X['Family'] = X['Family'].replace("1", 'Alone') X['Family'] = X['Family'].replace(["2","3","4"], 'Normal') X['Family'] = X['Family'].replace(["5","6"], 'Mid') X['Family'] = X['Family'].replace(["7","8","11"], 'Big') X["Fare"] = np.log1p(X["Fare"]) X["Fare"] = MinMaxScaler().fit_transform(X['Fare'].values.reshape(-1, 1)) X['Cabin'] = X['Cabin'].replace(['A', 'B', 'C','T'], 'ABCT') X['Cabin'] = X['Cabin'].replace(['D', 'E'], 'DE') X['Cabin'] = X['Cabin'].replace(['F', 'G'], 'FG') X['Age'] = pd.qcut(X['Age'], 10) X['Age'] = LabelEncoder().fit_transform(X['Age']) return X class DummyCategory(BaseEstimator, TransformerMixin): def __init__(self, names): self.names = names def fit(self, X, y=None): return self def transform(self, X): encoded_fea = [] for c in self.names: encoded = OneHotEncoder().fit_transform(X[c].values.reshape(-1, 1)).toarray() n = X[c].nunique() cols = ['{}_{}'.format(c, n)for n in range(1, n + 1)] encoded_df = pd.DataFrame(encoded, columns=cols) encoded_df.index = X.index encoded_fea.append(encoded_df) return pd.concat([X, *encoded_fea[:5]], axis=1) process_pipeline = Pipeline([ ("process", TaitanicProcessing()), ("cat_pipeline", DummyCategory(["Embarked", "Name_map","Cabin","Family"])) , ]) full_pipeline = Pipeline([ ("imputer", imputer_pipeline), ("transform", transform_pipeline), ("process", process_pipeline), ] )
Titanic - Machine Learning from Disaster
9,456,372
sample_submission = pd.read_csv('.. /input/sample_submission.csv') train = pd.read_csv('.. /input/train.csv') test = pd.read_csv('.. /input/test.csv' )<feature_engineering>
df = pd.concat([train.drop("Survived",axis=1),test],axis=0,sort=False) df.set_index("PassengerId",drop=True, inplace=True) df = full_pipeline.fit_transform(df,train_y) df.reset_index(drop=True,inplace=True) df.info()
Titanic - Machine Learning from Disaster
9,456,372
def add_features(df): df2 = df.copy(deep=True) df2['question_text'] = df2['question_text'].apply(lambda x:str(x)) df2['total_length'] = df2['question_text'].apply(len) df2['capitals'] = df2['question_text'].apply(lambda comment: sum(1 for c in comment if c.isupper())) df2['caps_vs_length'] = df2.apply(lambda row: float(row['capitals'])/float(row['total_length']), axis=1) df2['num_words'] = df2.question_text.str.count('\S+') df2['num_unique_words'] = df2['question_text'].apply(lambda comment: len(set(w for w in comment.split()))) df2['words_vs_unique'] = df2['num_unique_words'] / df2['num_words'] return np.concatenate(( df2['caps_vs_length'].values.reshape(-1, 1), df2['words_vs_unique'].values.reshape(-1, 1)) , axis=1 )<save_to_csv>
df.drop(["Embarked", "Name_map","Cabin","Family",'SibSp','Parch'],axis=1,inplace=True) df.head()
Titanic - Machine Learning from Disaster
9,456,372
kfold = KFold(n_splits=N_SPLITS, shuffle=True, random_state=SEED) train_idx, val_idx = list(kfold.split(train)) [FOLD] x_train, x_val = train.iloc[train_idx], train.iloc[val_idx] x_train_meta = add_features(x_train) x_val_meta = add_features(x_val) test_meta = add_features(test) x_train = x_train.reset_index() x_val = x_val.reset_index() x_test = test.reset_index() x_train.to_csv('train.csv') x_val.to_csv('val.csv') x_test.to_csv('test.csv' )<define_variables>
train = df.iloc[:len_train,:].reset_index(drop=True) test = df.iloc[len_train:,:].reset_index(drop=True )
Titanic - Machine Learning from Disaster
9,456,372
puncts = [',', '.', '"', ':', ')', '(', '-', '!', '?', '|', ';', "'", '$', '&', '/', '[', ']', '>', '%', '=', ' '·', '_', '{', '}', '©', '^', '®', '`', '<', '→', '°', '€', '™', '›', '♥', '←', '×', '§', '″', '′', 'Â', '█', '½', 'à', '…', '“', '★', '”', '–', '●', 'â', '►', '−', '¢', '²', '¬', '░', '¶', '↑', '±', '¿', '▾', '═', '¦', '║', '―', '¥', '▓', '—', '‹', '─', '▒', ':', '¼', '⊕', '▼', '▪', '†', '■', '’', '▀', '¨', '▄', '♫', '☆', 'é', '¯', '♦', '¤', '▲', 'è', '¸', '¾', 'Ã', '⋅', '‘', '∞', '∙', ')', '↓', '、', '│', '(', '»', ',', '♪', '╩', '╚', '³', '・', '╦', '╣', '╔', '╗', '▬', '❤', 'ï', 'Ø', '¹', '≤', '‡', '√', ]<string_transform>
def rf_cv(n_estimators, max_depth, min_samples_split, min_samples_leaf, ccp_alpha, x_data=None, y_data=None, n_splits=5, output='score'): score = 0 kf = StratifiedKFold(n_splits=n_splits, random_state=5, shuffle=True) models = [] for train_index, valid_index in kf.split(x_data, y_data): x_train, y_train = x_data.iloc[train_index], y_data[train_index] x_valid, y_valid = x_data.iloc[valid_index], y_data[valid_index] model = RandomForestClassifier( criterion='gini', max_features='auto', n_estimators = int(n_estimators), max_depth = int(max_depth), min_samples_split = int(min_samples_split), min_samples_leaf = int(min_samples_leaf), ccp_alpha = ccp_alpha, random_state = 123, oob_score=True, n_jobs=-1 ) model.fit(x_train, y_train) models.append(model) pred = model.predict_proba(x_valid)[:, 1] true = y_valid score += roc_auc_score(true, pred)/n_splits if output == 'score': return score if output == 'model': return models func_fixed = partial(rf_cv, x_data=train, y_data=train_y, n_splits=5, output='score') rf_ba = BayesianOptimization( func_fixed, { 'n_estimators':(1000, 2000), 'max_depth' :(5,13), 'min_samples_split' :(4,8), 'min_samples_leaf' :(4,8), 'ccp_alpha' :(0.0001, 0.01) }, random_state=4321 ) rf_ba.maximize(init_points=5, n_iter=20 )
Titanic - Machine Learning from Disaster
9,456,372
nlp = English() def tokenize(sentence): sentence = str(sentence) for punct in puncts: sentence = sentence.replace(punct, f' {punct} ') x = nlp(sentence) return [token.text for token in x]<define_variables>
params = rf_ba.max['params'] rf_model = rf_cv( params['n_estimators'], params['max_depth'], params['min_samples_split'], params['min_samples_leaf'], params['ccp_alpha'], x_data=train, y_data=train_y, n_splits=5, output='model') importances = pd.DataFrame(np.zeros(( train.shape[1], 5)) , columns=['Fold_{}'.format(i)for i in range(1, 6)], index=train.columns) preds = [] for i, model in enumerate(rf_model): importances.iloc[:, i] = model.feature_importances_ pred = model.predict(test) preds.append(pred) pred = np.mean(preds, axis=0 )
Titanic - Machine Learning from Disaster
9,456,372
%%time index_field = data.Field(sequential=False, use_vocab=False, batch_first=True) question_field = data.Field(tokenize=tokenize, lower=True, batch_first=True, include_lengths=True) target_field = data.Field(sequential=False, use_vocab=False, batch_first=True) train_fields = [ ('id', index_field), ('index', None), ('qid', None), ('question_text', question_field), ('target', target_field) ] test_fields = [ ('id', index_field), ('index', None), ('qid', None), ('question_text', question_field) ] train_dataset, val_dataset = data.TabularDataset.splits('./', train='train.csv', validation='val.csv', format='CSV', skip_header=True, fields=train_fields) test_dataset = data.TabularDataset('./test.csv', format='CSV', skip_header=True, fields=test_fields) question_field.build_vocab(train_dataset, val_dataset, max_size=95000) train_dataloader, val_dataloader = data.BucketIterator.splits(( train_dataset, val_dataset),(BATCH_SIZE, BATCH_SIZE), sort_key=lambda x: len(x.question_text), sort_within_batch=True) test_dataloader = data.BucketIterator(test_dataset, 1024, sort=False, shuffle=False) print(f'Train Dataset: {len(train_dataset)}') print(f'Val Dataset: {len(val_dataset)}') print(f'Test Dataset: {len(test_dataset)}' )<categorify>
y_pred = pred y_pred[y_pred >= 0.5] = 1 y_pred = y_pred.astype(int) submission = pd.read_csv("/kaggle/input/titanic/gender_submission.csv") submission["Survived"] = y_pred submission.to_csv("submission.csv",index = False )
Titanic - Machine Learning from Disaster
9,456,372
class SelfAttention(nn.Module): def __init__(self, hidden_size, batch_first=False): super(SelfAttention, self ).__init__() self.hidden_size = hidden_size self.batch_first = batch_first self.att_weights = nn.Parameter(torch.Tensor(1, hidden_size), requires_grad=True) nn.init.xavier_uniform_(self.att_weights.data) def get_mask(self): pass def forward(self, inputs, lengths): if self.batch_first: batch_size, max_len = inputs.size() [:2] else: max_len, batch_size = inputs.size() [:2] weights = torch.bmm(inputs, self.att_weights .permute(1, 0) .unsqueeze(0) .repeat(batch_size, 1, 1) ) attentions = torch.softmax(F.relu(weights.squeeze()), dim=-1) mask = torch.ones(attentions.size() , requires_grad=True ).cuda() for i, l in enumerate(lengths): if l < max_len: mask[i, l:] = 0 masked = attentions * mask _sums = masked.sum(-1 ).unsqueeze(-1) attentions = masked.div(_sums) weighted = torch.mul(inputs, attentions.unsqueeze(-1 ).expand_as(inputs)) representations = weighted.sum(1 ).squeeze() return representations, attentions class net(nn.Module): def __init__(self, embedding): super(net, self ).__init__() self.embedding = nn.Embedding.from_pretrained(embedding) self.skip_lstm = nn.LSTM(input_size=300, hidden_size=128, num_layers=1, batch_first=True, bidirectional=True) self.skip_attention = SelfAttention(128*2, batch_first=True) self.lstm = nn.LSTM(input_size=300, hidden_size=128, num_layers=1, batch_first=True, bidirectional=True) self.attention = SelfAttention(128*2, batch_first=True) self.avg_pool = nn.AdaptiveAvgPool1d(1) self.max_pool = nn.AdaptiveMaxPool1d(1) self.fc = nn.Linear(128*2*4+2, 1) self.logit = nn.Linear(1, 1) def forward(self, x, x_meta, x_len): x = self.embedding(x) x = nn.utils.rnn.pack_padded_sequence(x, x_len, batch_first=True) skip_lstm, _ = self.skip_lstm(x) lstm, _ = self.lstm(x) lstm, lstm_lengths = nn.utils.rnn.pad_packed_sequence(lstm, batch_first=True) skip_lstm, skip_lstm_lengths = nn.utils.rnn.pad_packed_sequence(skip_lstm, batch_first=True) skip_attention, _ = self.skip_attention(skip_lstm, skip_lstm_lengths) attention, _ = self.attention(lstm, lstm_lengths) avg_pool = self.avg_pool(lstm.transpose(1, 2)) max_pool = self.max_pool(lstm.transpose(1, 2)) x = torch.cat([ skip_attention.view(-1, 128*2), attention.view(-1, 128*2), avg_pool.view(-1, 128*2), max_pool.view(-1, 128*2), x_meta.view(-1, 2) ], dim=1) x = self.fc(x) x = self.logit(x ).view(-1) return x<find_best_params>
rfc_model = RandomForestClassifier(criterion='gini',n_estimators=1800,max_depth=7, min_samples_split=6,min_samples_leaf=6, max_features='auto', oob_score=True, random_state=123,n_jobs=-1) oob = 0 probs = pd.DataFrame(np.zeros(( len(test),10)) , columns=['Fold_{}_Sur_{}'.format(i, j)for i in range(1, 6)for j in range(2)]) kf = StratifiedKFold(n_splits=5, random_state=5, shuffle=True) for fold,(trn_idx, val_idx)in enumerate(kf.split(train, train_y), 1): rfc_model.fit(train.iloc[trn_idx,:], train_y[trn_idx]) probs.loc[:, ['Fold_{}_Sur_0'.format(fold),'Fold_{}_Sur_1'.format(fold)]] = rfc_model.predict_proba(test) oob += rfc_model.oob_score_ / 5 print('Fold {} OOB : {}'.format(fold, rfc_model.oob_score_)) print('Average Score: {}'.format(oob))
Titanic - Machine Learning from Disaster
9,456,372
def choose_threshold(val_preds, y_val): thresholds = np.arange(0.1, 0.501, 0.01) val_scores = [] for threshold in thresholds: threshold = np.round(threshold, 2) f1 = f1_score(y_val,(val_preds > threshold ).astype(int)) val_scores.append(f1) best_val_f1 = np.max(val_scores) best_threshold = np.round(thresholds[np.argmax(val_scores)], 2) return best_threshold, best_val_f1<train_on_grid>
survived =[col for col in probs.columns if col.endswith('Sur_1')] probs['survived'] = probs[survived].mean(axis=1) probs['unsurvived'] = probs.drop(columns=survived ).mean(axis=1) probs['pred'] = 0 sub = probs[probs['survived'] >= 0.5].index probs.loc[sub, 'pred'] = 1 y_pred = probs['pred'].astype(int )
Titanic - Machine Learning from Disaster
9,456,372
<load_pretrained><EOS>
submission = pd.read_csv("/kaggle/input/titanic/gender_submission.csv") submission["Survived"] = y_pred submission.to_csv("submission.csv",index = False )
Titanic - Machine Learning from Disaster
8,095,919
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<load_pretrained>
import pandas as pd from matplotlib import pyplot as plt import seaborn as sns from sklearn.linear_model import LogisticRegression from sklearn.model_selection import RepeatedKFold import numpy as np import re
Titanic - Machine Learning from Disaster
8,095,919
paragram_vectors = torchtext.vocab.Vectors('.. /input/embeddings/paragram_300_sl999/paragram_300_sl999.txt') for file in os.listdir('./.vector_cache/'): os.remove(f'./.vector_cache/{file}' )<feature_engineering>
df = pd.DataFrame(pd.read_csv('.. /input/titanic/train.csv')) df_test = pd.DataFrame(pd.read_csv('.. /input/titanic/test.csv'))
Titanic - Machine Learning from Disaster
8,095,919
%%time mean_vectors = torch.zeros(( len(question_field.vocab.stoi), 300)) for word, i in tqdm_notebook(question_field.vocab.stoi.items() , total=len(question_field.vocab.stoi)) : glove_vector = glove_vectors[word] paragram_vector = paragram_vectors[word] vector = torch.stack([glove_vector, paragram_vector]) vector = torch.sum(vector, dim=0 ).reshape(1, -1)/ 2 mean_vectors[i] = vector del glove_vectors, paragram_vectors gc.collect()<train_model>
df.isnull().sum()
Titanic - Machine Learning from Disaster
8,095,919
val_preds, y_val, _, _, _, message = train(train_dataset, val_dataset, train_dataloader, val_dataloader, x_train_meta, x_val_meta, net, question_field, mean_vectors, question_field.vocab.stoi )<save_to_csv>
print(df.groupby(['Pclass'] ).mean() ['Age']) print(' ') print(df.groupby(['Sex'] ).mean() ['Age'] )
Titanic - Machine Learning from Disaster
8,095,919
preds =(preds > best_threshold ).astype(int) sample_submission['prediction'] = preds mlc.kaggle.save_sub(sample_submission, 'submission.csv') sample_submission.head()<save_to_csv>
def age_nan(df): for i in df.Sex.unique() : for j in df.Pclass.unique() : x = df.loc[(( df.Sex == i)&(df.Pclass == j)) , 'Age'].mean() df.loc[(( df.Sex == i)&(df.Pclass == j)) , 'Age'] = df.loc[(( df.Sex == i)&(df.Pclass == j)) , 'Age'].fillna(x) age_nan(df) age_nan(df_test )
Titanic - Machine Learning from Disaster
8,095,919
x_test['target'] = preds pseudo_df = pd.concat([x_train, x_val, x_test] ).reset_index().drop('level_0', axis=1) pseudo_df.to_csv('x_pseudo.csv' )<create_dataframe>
df['Embarked'] = df['Embarked'].fillna('S') df_test['Embarked'] = df_test['Embarked'].fillna('S' )
Titanic - Machine Learning from Disaster
8,095,919
pseudo_meta = np.concatenate(( x_train_meta, x_val_meta, test_meta)) pseudo_dataset = data.TabularDataset('./x_pseudo.csv', format='CSV', skip_header=True, fields=train_fields) pseudo_dataloader = data.BucketIterator(pseudo_dataset, 512, sort_key=lambda x: len(x.question_text), sort_within_batch=True )<train_model>
df['Cabin_NaN'] = df['Cabin'].isnull().astype(int) df_test['Cabin_NaN'] = df_test['Cabin'].isnull().astype(int) countplot('Cabin_NaN' )
Titanic - Machine Learning from Disaster
8,095,919
pseudo_val_preds, pseudo_y_val, _, _, _, message = train(pseudo_dataset, val_dataset, pseudo_dataloader, val_dataloader, pseudo_meta, x_val_meta, net, question_field, mean_vectors, question_field.vocab.stoi )<set_options>
df_test.isnull().sum()
Titanic - Machine Learning from Disaster
8,095,919
del mean_vectors gc.collect()<prepare_output>
df_test.Fare = df_test.Fare.fillna(-1 )
Titanic - Machine Learning from Disaster
8,095,919
pseudo_preds =(pseudo_preds > pseudo_best_threshold ).astype(int) sample_submission['prediction'] = pseudo_preds mlc.kaggle.save_sub(sample_submission, 'submission.csv') sample_submission.head()<import_modules>
def reg_cross_val(variables): X = df[variables] y = df['Survived'] rkfold = RepeatedKFold(n_splits = 2, n_repeats = 10, random_state = 10) result = [] for treino, teste in rkfold.split(X): X_train, X_test = X.iloc[treino], X.iloc[teste] y_train, y_test = y.iloc[treino], y.iloc[teste] reg = LogisticRegression(max_iter = 500) reg.fit(X_train, y_train) result.append(reg.score(X_test, y_test)) return np.mean(result )
Titanic - Machine Learning from Disaster
8,095,919
import keras from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.layers import Input, Embedding, Dense, Dropout, Concatenate, Lambda, Flatten from keras.layers import GlobalMaxPool1D from keras.models import Model import tqdm <define_variables>
def is_female(x): if x == 'female': return 1 else: return 0 df['Sex_bin'] = df['Sex'].map(is_female) df_test['Sex_bin'] = df_test['Sex'].map(is_female )
Titanic - Machine Learning from Disaster
8,095,919
MAX_SEQUENCE_LENGTH = 60 MAX_WORDS = 45000 EMBEDDINGS_TRAINED_DIMENSIONS = 100 EMBEDDINGS_LOADED_DIMENSIONS = 300<compute_test_metric>
def embarked_s(x): if x == 'S': return 1 else: return 0 df['Embarked_S'] = df['Embarked'].map(embarked_s) df_test['Embarked_S'] = df_test['Embarked'].map(embarked_s) def embarked_c(x): if x == 'C': return 1 else: return 0 df['Embarked_C'] = df['Embarked'].map(embarked_c) df_test['Embarked_C'] = df_test['Embarked'].map(embarked_c )
Titanic - Machine Learning from Disaster
8,095,919
def load_embeddings(file): embeddings = {} with open(file)as f: def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings = dict(get_coefs(*line.split(" ")) for line in f) print('Found %s word vectors.' % len(embeddings)) return embeddings<load_pretrained>
df['Family'] = df.SibSp + df.Parch df_test['Family'] = df_test.SibSp + df_test.Parch
Titanic - Machine Learning from Disaster
8,095,919
pretrained_embeddings = load_embeddings(".. /input/embeddings/glove.840B.300d/glove.840B.300d.txt" )<load_from_csv>
text_ticket = '' for i in df.Ticket: text_ticket += i lista = re.findall('[a-zA-Z]+', text_ticket) print('Most repeated terms in Tickets: ') print(pd.Series(lista ).value_counts().head(10))
Titanic - Machine Learning from Disaster
8,095,919
df_train = pd.read_csv(".. /input/train.csv") df_test = pd.read_csv(".. /input/test.csv" )<define_variables>
df['CA'] = df['Ticket'].str.contains('CA|C.A.' ).astype(int) df['SOTON'] = df['Ticket'].str.contains('SOTON|STON' ).astype(int) df['PC'] = df['Ticket'].str.contains('PC' ).astype(int) df['SC'] = df['Ticket'].str.contains('SC|S.C' ).astype(int) df['C'] = df['Ticket'].str.contains('C' ).astype(int) df_test['CA'] = df_test['Ticket'].str.contains('CA|C.A.' ).astype(int) df_test['SOTON'] = df_test['Ticket'].str.contains('SOTON|STON' ).astype(int) df_test['PC'] = df_test['Ticket'].str.contains('PC' ).astype(int) df_test['SC'] = df_test['Ticket'].str.contains('SC|S.C' ).astype(int) df_test['C'] = df_test['Ticket'].str.contains('C' ).astype(int )
Titanic - Machine Learning from Disaster
8,095,919
BATCH_SIZE = 512 Q_FRACTION = 1 questions = df_train.sample(frac=Q_FRACTION) question_texts = questions["question_text"].values question_targets = questions["target"].values test_texts = df_test["question_text"].fillna("_na_" ).values print(f"Working on {len(questions)} questions" )<train_model>
text_name = '' for i in df.Name: text_name += i lista = re.findall('[a-zA-Z]+', text_name) print('Most repeated words in Name column: ') print(pd.Series(lista ).value_counts().head(10))
Titanic - Machine Learning from Disaster
8,095,919
tokenizer = Tokenizer(num_words=MAX_WORDS) tokenizer.fit_on_texts(list(df_train["question_text"].values))<categorify>
df['Master'] = df['Name'].str.contains('Master' ).astype(int) df['Mr'] = df['Name'].str.contains('Mr' ).astype(int) df['Miss'] = df['Name'].str.contains('Miss' ).astype(int) df['Mrs'] = df['Name'].str.contains('Mrs' ).astype(int) df_test['Master'] = df_test['Name'].str.contains('Master' ).astype(int) df_test['Mr'] = df_test['Name'].str.contains('Mr' ).astype(int) df_test['Miss'] = df_test['Name'].str.contains('Miss' ).astype(int) df_test['Mrs'] = df_test['Name'].str.contains('Mrs' ).astype(int )
Titanic - Machine Learning from Disaster
8,095,919
<load_pretrained><EOS>
variables = ['Age', 'Sex_bin', 'Pclass', 'Fare','Family', 'Embarked_S','Embarked_C','Cabin_NaN',\ 'CA', 'SOTON', 'PC', 'SC', 'Master', 'Mr', 'Miss', 'C', 'Mrs'] X = df[variables] y = df['Survived'] reg = LogisticRegression(max_iter = 500) reg.fit(X,y) resp = reg.predict(df_test[variables]) submit = pd.Series(resp, index=df_test['PassengerId'], name='Survived') submit.to_csv("model.csv", header=True )
Titanic - Machine Learning from Disaster
5,639,062
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<train_model>
warnings.filterwarnings('ignore' )
Titanic - Machine Learning from Disaster
5,639,062
THRESHOLD = 0.35 class EpochMetricsCallback(keras.callbacks.Callback): def on_train_begin(self, logs={}): self.f1s = [] self.precisions = [] self.recalls = [] def on_epoch_end(self, epoch, logs={}): predictions = self.model.predict(self.validation_data[0]) predictions =(predictions > THRESHOLD ).astype(int) predictions = np.asarray(predictions) targets = self.validation_data[1] f1 = metrics.f1_score(targets, predictions) precision = metrics.precision_score(targets, predictions) recall = metrics.recall_score(targets, predictions) print(" - F1 score: {0:.4f}, Precision: {1:.4f}, Recall: {2:.4f}" .format(f1, precision, recall)) self.f1s.append(f1) self.precisions.append(precision) self.recalls.append(recall) return def display_model_history(history): data = pd.DataFrame(data={'Train': history.history['loss'], 'Test': history.history['val_loss']}) ax = sns.lineplot(data=data, palette="pastel", linewidth=2.5, dashes=False) ax.set(xlabel='Epoch', ylabel='Loss', title='Loss') sns.despine() plt.show() def display_model_epoch_metrics(epoch_callback): data = pd.DataFrame(data = { 'F1': epoch_callback.f1s, 'Precision': epoch_callback.precisions, 'Recall': epoch_callback.recalls}) sns.lineplot(data=data, palette='muted', linewidth=2.5, dashes=False) sns.despine() plt.show()<prepare_x_and_y>
Xy_train = pd.read_csv(".. /input/titanic/train.csv", index_col="PassengerId") class CC: def __init__(self, dataframe): for col in dataframe.columns: setattr(self, col, col) cc = CC(Xy_train) X_train = Xy_train.drop(columns=[cc.Survived]) y_train = Xy_train[cc.Survived]
Titanic - Machine Learning from Disaster
5,639,062
X = pad_sequences(tokenizer.texts_to_sequences(question_texts), maxlen=MAX_SEQUENCE_LENGTH) Y = question_targets test_word_tokens = pad_sequences(tokenizer.texts_to_sequences(test_texts), maxlen=MAX_SEQUENCE_LENGTH )<choose_model_class>
class GenericTransformer(BaseEstimator, TransformerMixin): def __init__(self, transformer, fitter=None): self.transformer = transformer self.fitter = fitter def fit(self, X, y=None): self.fit_val = None if self.fitter is None else self.fitter(X) return self def transform(self, X): return self.transformer(X)if self.fit_val is None else self.transformer(X, self.fit_val )
Titanic - Machine Learning from Disaster
5,639,062
def make_model(filter_size, num_filters): tokenized_input = Input(shape=(MAX_SEQUENCE_LENGTH,), name="tokenized_input") pretrained = Embedding(MAX_WORDS, EMBEDDINGS_LOADED_DIMENSIONS, weights=[pretrained_emb_weights], trainable=False )(tokenized_input) pretrained = Reshape(( MAX_SEQUENCE_LENGTH, EMBEDDINGS_LOADED_DIMENSIONS, 1))(pretrained) pretrained = Dropout(0.1 )(pretrained) conv_0 = Conv2D(num_filters, kernel_size=(filter_size, EMBEDDINGS_LOADED_DIMENSIONS), kernel_initializer='he_normal', activation='tanh' )(pretrained) maxpool_0 = MaxPool2D(pool_size=(MAX_SEQUENCE_LENGTH - filter_size + 1, 1))(conv_0) d0 = Dropout(0.15 )(maxpool_0) d0 = Dense(10 )(d0) x = Flatten()(d0) x = Dropout(0.3 )(x) x = BatchNormalization()(x) out = Dense(1, activation="sigmoid" )(x) model = Model(inputs=[tokenized_input], outputs=out) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() return model<split>
default_imputer = GenericTransformer( fitter = lambda X: { col: "missing" if X[col].dtype == "object" else X[col].median() for col in X.columns}, transformer = lambda X, default_values:( X.assign(**{col: X[col].fillna(default_values[col])for col in X.columns})) , ) assert not default_imputer.fit_transform(X_train ).isna().any().any()
Titanic - Machine Learning from Disaster
5,639,062
filter_sizes = [1, 2, 3, 5] num_filters = 45 test_predictions = [] kaggle_predictions = [] train_X, test_X, train_Y, test_Y = train_test_split(X, Y, test_size=0.025) for f in filter_sizes: print("CNN MODEL WITH FILTER OF SIZE {0}".format(f)) epoch_callback = EpochMetricsCallback() model = make_model(f, num_filters) x, val_x, y, val_y = train_test_split(train_X, train_Y, test_size=0.01) history = model.fit( x=x, y=y, validation_data=(val_x, val_y), batch_size=512, epochs=23, callbacks=[epoch_callback], verbose=2) display_model_history(history) display_model_epoch_metrics(epoch_callback) kaggle_predictions.append(model.predict([test_word_tokens], batch_size=1024, verbose=2)) test_predictions.append(model.predict([test_X])) <save_to_csv>
class DummiesTransformer(BaseEstimator, TransformerMixin): def __init__(self, drop_first=False): self.drop_first = drop_first def fit_transform(self, x, y=None): result = pd.get_dummies(x, drop_first=self.drop_first) self.output_cols = result.columns return result def fit(self, x, y = None): self.fit_transform(x, y) return self def transform(self, x): x_dummies = pd.get_dummies(x, drop_first=self.drop_first) new_cols = set(x_dummies.columns) return pd.DataFrame({col: x_dummies[col] if col in new_cols else 0 for col in self.output_cols}) add_dummies = DummiesTransformer() assert(X_train.dtypes == "object" ).any() assert not(add_dummies.fit_transform(X_train ).dtypes == "object" ).any()
Titanic - Machine Learning from Disaster
5,639,062
avg = np.average(kaggle_predictions, axis=0) df_out = pd.DataFrame({"qid":df_test["qid"].values}) df_out['prediction'] =(avg > THRESHOLD ).astype(int) df_out.to_csv("submission.csv", index=False )<save_to_csv>
drop_original = GenericTransformer(lambda X: X.drop(columns=X_train.columns)) assert len(drop_original.fit_transform(X_train ).columns)== 0
Titanic - Machine Learning from Disaster
5,639,062
<define_variables>
feat_accumulator = pd.DataFrame() model = RidgeClassifier(random_state=0) def test_features(label, isolated, combination, prev_label=None, extra_imputers=[]): isolated_pipeline = make_pipeline(*extra_imputers, default_imputer, *isolated, drop_original, add_dummies, model) iso_accuracy = cross_val_score(isolated_pipeline, X_train, y_train, cv=5 ).mean() feat_accumulator.loc[label, "isolated"] = iso_accuracy combo_pipeline = make_pipeline(*extra_imputers, default_imputer, *combination, drop_original, add_dummies, model) combo_accuracy = cross_val_score(combo_pipeline, X_train, y_train, cv=5 ).mean() feat_accumulator.loc[label, "combination"] = combo_accuracy if prev_label is not None: old_combo_accuracy = feat_accumulator.loc[prev_label, "combination"] feat_accumulator.loc[label, "improvement"] = combo_accuracy - old_combo_accuracy display(feat_accumulator) test_features("null", [add_const], [add_const] )
Titanic - Machine Learning from Disaster
5,639,062
embed_size = 300 max_features = 95000 maxlen = 70<import_modules>
add_age = GenericTransformer( lambda X: X.assign(AGE_BINNED = pd.cut( X[cc.Age], [0,7,14,35,60,1000], labels=["young child","child", "young adult", "adult", "old"]))) test_features("age", [add_age], [add_age], "null" )
Titanic - Machine Learning from Disaster
5,639,062
import os import time import numpy as np import pandas as pd from tqdm import tqdm import math from sklearn.model_selection import train_test_split from sklearn import metrics from sklearn.model_selection import GridSearchCV, StratifiedKFold from sklearn.metrics import f1_score, roc_auc_score from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.layers import Dense, Input, CuDNNLSTM, Embedding, Dropout, Activation, CuDNNGRU, Conv1D from keras.layers import Bidirectional, GlobalMaxPool1D, GlobalMaxPooling1D, GlobalAveragePooling1D from keras.layers import Input, Embedding, Dense, Conv2D, MaxPool2D, concatenate from keras.layers import Reshape, Flatten, Concatenate, Dropout, SpatialDropout1D from keras.optimizers import Adam from keras.models import Model from keras import backend as K from keras.engine.topology import Layer from keras import initializers, regularizers, constraints, optimizers, layers from keras.layers import concatenate from keras.callbacks import *<load_from_csv>
add_class = GenericTransformer(lambda X: X.assign(CLASS_BINNED=X[cc.Pclass].astype(str))) test_features("class", [add_class], [add_age, add_class], "age" )
Titanic - Machine Learning from Disaster
5,639,062
def load_and_prec() : train_df = pd.read_csv(".. /input/train.csv") test_df = pd.read_csv(".. /input/test.csv") print("Train shape : ",train_df.shape) print("Test shape : ",test_df.shape) train_X = train_df["question_text"].fillna("_ test_X = test_df["question_text"].fillna("_ tokenizer = Tokenizer(num_words=max_features) tokenizer.fit_on_texts(list(train_X)) train_X = tokenizer.texts_to_sequences(train_X) test_X = tokenizer.texts_to_sequences(test_X) train_X = pad_sequences(train_X, maxlen=maxlen) test_X = pad_sequences(test_X, maxlen=maxlen) train_y = train_df['target'].values np.random.seed(2018) trn_idx = np.random.permutation(len(train_X)) train_X = train_X[trn_idx] train_y = train_y[trn_idx] return train_X, test_X, train_y, tokenizer.word_index<statistical_test>
class TicketSurvivalTransformer(BaseEstimator, TransformerMixin): def __init__(self, xy): self.xy = xy def fit(self, X, y=None): X_with_survival = X.assign(Survived = self.xy.reindex(X.index)[cc.Survived]) self.mean_survival = X_with_survival[cc.Survived].mean() self.group_stats =(X_with_survival.groupby(cc.Ticket)[cc.Survived].agg(["count", "sum"])) self.fit_X = X.copy() return self def transform(self, X): X_with_survival = X.assign(Survived = self.xy.reindex(X.index)[cc.Survived]) group_stats_by_passenger = self.group_stats.reindex(X[cc.Ticket].unique() , fill_value=0) X_counts = group_stats_by_passenger.loc[X[cc.Ticket]].set_index(X.index) is_overlap = np.array([x in self.fit_X.index for x in X.index]) other_counts =(X_counts["count"]-1 ).where(is_overlap, X_counts["count"]) other_survivor_count =(( X_counts["sum"] - self.xy[cc.Survived].reindex(X.index)) .where(is_overlap, X_counts["sum"])) survival_fraction =(other_survivor_count + self.mean_survival)/(other_counts + 1) return X.assign(SurvivorFraction=survival_fraction) assert np.isclose(( TicketSurvivalTransformer(Xy_train ).fit(X_train.loc[:400]) .transform(X_train.loc[400:])["SurvivorFraction"].mean()), Xy_train.loc[400:][cc.Survived].mean() , rtol=0.1 )
Titanic - Machine Learning from Disaster
5,639,062
def load_glove(word_index): EMBEDDING_FILE = '.. /input/embeddings/glove.840B.300d/glove.840B.300d.txt' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE)) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = all_embs.mean() , all_embs.std() embed_size = all_embs.shape[1] nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix def load_fasttext(word_index): EMBEDDING_FILE = '.. /input/embeddings/wiki-news-300d-1M/wiki-news-300d-1M.vec' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE)if len(o)>100) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = all_embs.mean() , all_embs.std() embed_size = all_embs.shape[1] nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix def load_para(word_index): EMBEDDING_FILE = '.. /input/embeddings/paragram_300_sl999/paragram_300_sl999.txt' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE, encoding="utf8", errors='ignore')if len(o)>100) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = all_embs.mean() , all_embs.std() embed_size = all_embs.shape[1] nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix<set_options>
add_tkt = TicketSurvivalTransformer(Xy_train) test_features("tkt", [add_tkt], [add_age, add_class, add_tkt], "class" )
Titanic - Machine Learning from Disaster
5,639,062
class Attention(Layer): def __init__(self, step_dim, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs): self.supports_masking = True self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.step_dim = step_dim self.features_dim = 0 super(Attention, self ).__init__(**kwargs) def build(self, input_shape): assert len(input_shape)== 3 self.W = self.add_weight(( input_shape[-1],), initializer=self.init, name='{}_W'.format(self.name), regularizer=self.W_regularizer, constraint=self.W_constraint) self.features_dim = input_shape[-1] if self.bias: self.b = self.add_weight(( input_shape[1],), initializer='zero', name='{}_b'.format(self.name), regularizer=self.b_regularizer, constraint=self.b_constraint) else: self.b = None self.built = True def compute_mask(self, input, input_mask=None): return None def call(self, x, mask=None): features_dim = self.features_dim step_dim = self.step_dim eij = K.reshape(K.dot(K.reshape(x,(-1, features_dim)) , K.reshape(self.W,(features_dim, 1))),(-1, step_dim)) if self.bias: eij += self.b eij = K.tanh(eij) a = K.exp(eij) if mask is not None: a *= K.cast(mask, K.floatx()) a /= K.cast(K.sum(a, axis=1, keepdims=True)+ K.epsilon() , K.floatx()) a = K.expand_dims(a) weighted_input = x * a return K.sum(weighted_input, axis=1) def compute_output_shape(self, input_shape): return input_shape[0], self.features_dim<choose_model_class>
test_features("sex", [add_sex], [add_age, add_class, add_tkt, add_sex], "tkt" )
Titanic - Machine Learning from Disaster
5,639,062
class CyclicLR(Callback): def __init__(self, base_lr=0.001, max_lr=0.006, step_size=2000., mode='triangular', gamma=1., scale_fn=None, scale_mode='cycle'): super(CyclicLR, self ).__init__() self.base_lr = base_lr self.max_lr = max_lr self.step_size = step_size self.mode = mode self.gamma = gamma if scale_fn == None: if self.mode == 'triangular': self.scale_fn = lambda x: 1. self.scale_mode = 'cycle' elif self.mode == 'triangular2': self.scale_fn = lambda x: 1/(2.**(x-1)) self.scale_mode = 'cycle' elif self.mode == 'exp_range': self.scale_fn = lambda x: gamma**(x) self.scale_mode = 'iterations' else: self.scale_fn = scale_fn self.scale_mode = scale_mode self.clr_iterations = 0. self.trn_iterations = 0. self.history = {} self._reset() def _reset(self, new_base_lr=None, new_max_lr=None, new_step_size=None): if new_base_lr != None: self.base_lr = new_base_lr if new_max_lr != None: self.max_lr = new_max_lr if new_step_size != None: self.step_size = new_step_size self.clr_iterations = 0. def clr(self): cycle = np.floor(1+self.clr_iterations/(2*self.step_size)) x = np.abs(self.clr_iterations/self.step_size - 2*cycle + 1) if self.scale_mode == 'cycle': return self.base_lr +(self.max_lr-self.base_lr)*np.maximum(0,(1-x)) *self.scale_fn(cycle) else: return self.base_lr +(self.max_lr-self.base_lr)*np.maximum(0,(1-x)) *self.scale_fn(self.clr_iterations) def on_train_begin(self, logs={}): logs = logs or {} if self.clr_iterations == 0: K.set_value(self.model.optimizer.lr, self.base_lr) else: K.set_value(self.model.optimizer.lr, self.clr()) def on_batch_end(self, epoch, logs=None): logs = logs or {} self.trn_iterations += 1 self.clr_iterations += 1 self.history.setdefault('lr', [] ).append(K.get_value(self.model.optimizer.lr)) self.history.setdefault('iterations', [] ).append(self.trn_iterations) for k, v in logs.items() : self.history.setdefault(k, [] ).append(v) K.set_value(self.model.optimizer.lr, self.clr()) def f1(y_true, y_pred): def recall(y_true, y_pred): true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) recall = true_positives /(possible_positives + K.epsilon()) return recall def precision(y_true, y_pred): true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives /(predicted_positives + K.epsilon()) return precision precision = precision(y_true, y_pred) recall = recall(y_true, y_pred) return 2*(( precision*recall)/(precision+recall+K.epsilon()))<define_search_model>
impute_age = TitleAgeImputer() test_features("age_from_title", [add_age], [add_age, add_class, add_tkt, add_sex], "sex", extra_imputers=[impute_age] )
Titanic - Machine Learning from Disaster
5,639,062
def model_lstm_atten(embedding_matrix): inp = Input(shape=(maxlen,)) x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False )(inp) x = SpatialDropout1D(0.1 )(x) x = Bidirectional(CuDNNLSTM(40, return_sequences=True))(x) y = Bidirectional(CuDNNGRU(40, return_sequences=True))(x) atten_1 = Attention(maxlen )(x) atten_2 = Attention(maxlen )(y) avg_pool = GlobalAveragePooling1D()(y) max_pool = GlobalMaxPooling1D()(y) conc = concatenate([atten_1, atten_2, avg_pool, max_pool]) conc = Dense(16, activation="relu" )(conc) conc = Dropout(0.1 )(conc) outp = Dense(1, activation="sigmoid" )(conc) model = Model(inputs=inp, outputs=outp) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[f1]) return model<predict_on_test>
final_model = RidgeClassifier(random_state=0) final_X_train = final_pipeline.fit_transform(X_train) cvscore = cross_val_score(final_model, final_X_train, y_train, cv=4) print(f"cv = {np.mean(cvscore)}: {list(cvscore)}") final_model.fit(final_X_train, y_train) X_test = pd.read_csv(".. /input/titanic/test.csv", index_col="PassengerId") final_X_test = final_pipeline.transform(X_test) preds_test = final_model.predict(final_X_test) output = pd.DataFrame({'PassengerID': X_test.index, 'Survived': preds_test}) file = 'submission.csv' output.to_csv(file, index=False) print(f"Wrote predictions to '{file}'" )
Titanic - Machine Learning from Disaster
1,266,101
def train_pred(model, train_X, train_y, val_X, val_y, epochs=2, callback=None): for e in range(epochs): model.fit(train_X, train_y, batch_size=512, epochs=1, validation_data=(val_X, val_y), callbacks = callback, verbose=0) pred_val_y = model.predict([val_X], batch_size=1024, verbose=0) best_score = metrics.f1_score(val_y,(pred_val_y > 0.33 ).astype(int)) print("Epoch: ", e, "- Val F1 Score: {:.4f}".format(best_score)) pred_test_y = model.predict([test_X], batch_size=1024, verbose=0) print('=' * 60) return pred_val_y, pred_test_y, best_score<split>
def get_missing_data_table(dataframe): total = dataframe.isnull().sum() percentage = dataframe.isnull().sum() / dataframe.isnull().count() missing_data = pd.concat([total, percentage], axis='columns', keys=['TOTAL','PERCENTAGE']) return missing_data.sort_index(ascending=True) def get_null_observations(dataframe, column): return dataframe[pd.isnull(dataframe[column])] def delete_null_observations(dataframe, column): fixed_df = dataframe.drop(get_null_observations(dataframe,column ).index) return fixed_df def transform_dummy_variables(dataframe, columns): df = dataframe.copy() for column in columns: df[column] = pd.Categorical(df[column]) df = pd.get_dummies(df, drop_first=False) return df def imput_nan_values(dataframe, column, strateg): imp = Imputer(strategy=strateg) df = dataframe.copy() df[column] = imp.fit_transform(df[column].values.reshape(-1,1)) return df print("Everything's ready!" )
Titanic - Machine Learning from Disaster