kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
737,908
threshold, score = get_f1(target, oof_pred) print("F1 score after K fold at threshold {} is {}".format(threshold, score)) fake_test["pred"] =(fake_pred > threshold ).astype(int) print("Fake test F1 score is {}".format(f1_score(fake_test["target"], (fake_test["pred"] ).astype(int)))) test["prediction"] =(pred > threshold ).astype(int )<save_to_csv>
full_set['Fare'] = full_set['Fare'].apply(fare_bin ).astype(int )
Titanic - Machine Learning from Disaster
737,908
submission = test[["qid", "prediction"]] submission.to_csv("submission.csv", index = False) submission.head()<set_options>
full_set['Pclass'] = full_set['Pclass'].astype('str' )
Titanic - Machine Learning from Disaster
737,908
def seed_everything(seed=1234): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True seed_everything(6017) print('Seeding done...' )<feature_engineering>
full_w_dum = pd.get_dummies(full_set) full_w_dum.head()
Titanic - Machine Learning from Disaster
737,908
print('Preproccesing texts.... ') print('lower...') df["question_text"] = df["question_text"].apply(lambda x: x.lower()) df_final["question_text"] = df_final["question_text"].apply(lambda x: x.lower()) contraction_mapping = { "ain't": "is not", "aren't": "are not", "can't": "cannot", "'cause": "because", "could've": "could have", "couldn't": "could not", "didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hasn't": "has not", "haven't": "have not", "he'd": "he would", "he'll": "he will", "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is", "I'd": "I would", "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have", "I'm": "I am", "I've": "I have", "i'd": "i would", "i'd've": "i would have", "i'll": "i will", "i'll've": "i will have", "i'm": "i am", "i've": "i have", "isn't": "is not", "it'd": "it would", "it'd've": "it would have", "it'll": "it will", "it'll've": "it will have", "it's": "it is", "let's": "let us", "ma'am": "madam", "mayn't": "may not", "might've": "might have", "mightn't": "might not", "mightn't've": "might not have", "must've": "must have", "mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have", "o'clock": "of the clock", "oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have", "she'd": "she would", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is", "should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have", "so's": "so as", "this's": "this is", "that'd": "that would", "that'd've": "that would have", "that's": "that is", "there'd": "there would", "there'd've": "there would have", "there's": "there is", "here's": "here is", "they'd": "they would", "they'd've": "they would have", "they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have", "wasn't": "was not", "we'd": "we would", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are", "we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have", "what're": "what are", "what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is", "where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have", "why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not", "won't've": "will not have", "would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all", "y'all'd": "you all would", "y'all'd've": "you all would have", "y'all're": "you all are", "y'all've": "you all have", "you'd": "you would", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have", "you're": "you are", "you've": "you have" } def clean_contractions(text, mapping): specials = ["’", "‘", "´", "`"] for s in specials: text = text.replace(s, "'") text = ' '.join([mapping[t] if t in mapping else t for t in text.split(" ")]) return text print('contractions...') df["question_text"] = df["question_text"].apply(lambda x: clean_contractions(x,contraction_mapping)) df_final["question_text"] = df_final["question_text"].apply(lambda x: clean_contractions(x,contraction_mapping)) punct_mapping = {"‘": "'", "₹": "e", "´": "'", "°": "", "€": "e", "™": "tm", "√": " sqrt ", "×": "x", "²": "2", "—": "-", "–": "-", "’": "'", "_": "-", "`": "'", '“': '"', '”': '"', '“': '"', "£": "e", '∞': 'infinity', 'θ': 'theta', '÷': '/', 'α': 'alpha', '•': '.', 'à': 'a', '−': '-', 'β': 'beta', '∅': '', '³': '3', 'π': 'pi', } punct = [',', '.', '"', ':', ')', '(', '-', '!', '?', '|', ';', "'", '$', '&', '/', '[', ']', '>', '%', '=', ' '·', '_', '{', '}', '©', '^', '®', '`', '<', '→', '°', '€', '™', '›', '♥', '←', '×', '§', '″', '′', 'Â', '█', '½', 'à', '…', '“', '★', '”', '–', '●', 'â', '►', '−', '¢', '²', '¬', '░', '¶', '↑', '±', '¿', '▾', '═', '¦', '║', '―', '¥', '▓', '—', '‹', '─', '▒', ':', '¼', '⊕', '▼', '▪', '†', '■', '’', '▀', '¨', '▄', '♫', '☆', 'é', '¯', '♦', '¤', '▲', 'è', '¸', '¾', 'Ã', '⋅', '‘', '∞', '∙', ')', '↓', '、', '│', '(', '»', ',', '♪', '╩', '╚', '³', '・', '╦', '╣', '╔', '╗', '▬', '❤', 'ï', 'Ø', '¹', '≤', '‡', '√', ] def clean_special_chars(text, punct, mapping): for p in mapping: text = text.replace(p, mapping[p]) for p in punct: text = text.replace(p, f' {p} ') specials = {'\u200b': ' ', '…': '...', '\ufeff': '', 'करना': '', 'है': ''} for s in specials: text = text.replace(s, specials[s]) return text print('clean special chars...') df["question_text"] = df["question_text"].apply(lambda x: clean_special_chars(x, punct, punct_mapping)) df_final["question_text"] = df_final["question_text"].apply(lambda x: clean_special_chars(x, punct, punct_mapping)) mispell_dict = {'colour': 'color', 'centre': 'center', 'favourite': 'favorite', 'travelling': 'traveling', 'counselling': 'counseling', 'theatre': 'theater', 'cancelled': 'canceled', 'labour': 'labor', 'organisation': 'organization', 'wwii': 'world war 2', 'citicise': 'criticize', 'youtu ': 'youtube ', 'Qoura': 'Quora', 'sallary': 'salary', 'Whta': 'What', 'narcisist': 'narcissist', 'howdo': 'how do', 'whatare': 'what are', 'howcan': 'how can', 'howmuch': 'how much', 'howmany': 'how many', 'whydo': 'why do', 'doI': 'do I', 'theBest': 'the best', 'howdoes': 'how does', 'mastrubation': 'masturbation', 'mastrubate': 'masturbate', "mastrubating": 'masturbating', 'pennis': 'penis', 'Etherium': 'Ethereum', 'narcissit': 'narcissist', 'bigdata': 'big data', '2k17': '2017', '2k18': '2018', 'qouta': 'quota', 'exboyfriend': 'ex boyfriend', 'airhostess': 'air hostess', "whst": 'what', 'watsapp': 'whatsapp', 'demonitisation': 'demonetization', 'demonitization': 'demonetization', 'demonetisation': 'demonetization'} def correct_spelling(x, dic): for word in dic.keys() : x = x.replace(word, dic[word]) return x print('clean misspellings...') df["question_text"] = df["question_text"].apply(lambda x: correct_spelling(x,mispell_dict)) df_final["question_text"] = df_final["question_text"].apply(lambda x: correct_spelling(x,mispell_dict))<string_transform>
full_set = full_w_dum
Titanic - Machine Learning from Disaster
737,908
dim = 300 num_words = 95000 max_len = 100 print('Fiting tokenizer') tokenizer = Tokenizer(num_words=num_words) tokenizer.fit_on_texts(list(df['question_text'])+list(df_final['question_text'])) print('text to sequence') x_train = tokenizer.texts_to_sequences(df['question_text']) print('pad sequence') x_train = pad_sequences(x_train,maxlen=max_len) y_train = df['target'].values print(x_train.shape) print(y_train.shape) x_test=tokenizer.texts_to_sequences(df_final['question_text']) x_test = pad_sequences(x_test,maxlen=max_len) print('Test data loaded:',x_test.shape )<compute_train_metric>
cols = list(set(full_set.columns)- set(['Survived'])) X_train, X_test = full_set[:train_set.shape[0]][cols], full_set[train_set.shape[0]:][cols] y_train = full_set[:train_set.shape[0]]['Survived']
Titanic - Machine Learning from Disaster
737,908
print('Glove...') def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open('.. /input/embeddings/glove.840B.300d/glove.840B.300d.txt')) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = all_embs.mean() , all_embs.std() print(len(all_embs)) word_index = tokenizer.word_index embedding_matrix_glov = np.random.normal(emb_mean, emb_std,(num_words, dim)) count=0 for word, i in word_index.items() : if i >= num_words: break embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix_glov[i] = embedding_vector else: count += 1 print('embedding matrix size:',embedding_matrix_glov.shape) print('Number of words not in vocab:',count) del embeddings_index,all_embs gc.collect()<statistical_test>
models = [ LogisticRegression, SVC, LinearSVC, RandomForestClassifier, KNeighborsClassifier, XGBClassifier ] mscores = [] lscores = ['f1','accuracy','recall','roc_auc'] np.random.seed(42) for elem in models: mscores2 = [] model = elem() for sc in lscores: scores = cross_val_score(model, X_train, y_train, scoring=sc) mscores2.append(np.mean(scores)) mscores.append(mscores2 )
Titanic - Machine Learning from Disaster
737,908
print('Para...') EMBEDDING_FILE = '.. /input/embeddings/paragram_300_sl999/paragram_300_sl999.txt' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE, encoding="utf8", errors='ignore')if len(o)>100) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = all_embs.mean() , all_embs.std() print(len(all_embs)) word_index = tokenizer.word_index embedding_matrix_para = np.random.normal(emb_mean, emb_std,(num_words, dim)) count=0 for word, i in word_index.items() : if i >= num_words: break embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix_para[i] = embedding_vector else: count += 1 print('embedding matrix size:',embedding_matrix_para.shape) print('Number of words not in vocab:',count) del embeddings_index,all_embs gc.collect()<train_model>
order = np.argsort(np.mean(np.array(mscores), axis=1)) print(order )
Titanic - Machine Learning from Disaster
737,908
class EarlyStopping: def __init__(self, patience=7, verbose=False): self.patience = patience self.verbose = verbose self.counter = 0 self.best_score = None self.early_stop = False self.val_loss_min = np.Inf def __call__(self, val_loss, model): score = -val_loss if self.best_score is None: self.best_score = score self.save_checkpoint(val_loss, model) elif score < self.best_score: self.counter += 1 if self.verbose: print(f'EarlyStopping counter: {self.counter} out of {self.patience}') if self.counter >= self.patience: self.early_stop = True else: self.best_score = score self.save_checkpoint(val_loss, model) self.counter = 0 def save_checkpoint(self, val_loss, model): if self.verbose: print(f'Validation loss decreased({self.val_loss_min:.6f} --> {val_loss:.6f} ).Saving model...') torch.save(model.state_dict() , 'checkpoint.pt') self.val_loss_min = val_loss<define_variables>
from sklearn.model_selection import StratifiedKFold, KFold
Titanic - Machine Learning from Disaster
737,908
class CyclicLR(object): def __init__(self, optimizer, base_lr=1e-3, max_lr=6e-3, step_size=2000, mode='triangular', gamma=1., scale_fn=None, scale_mode='cycle', last_batch_iteration=-1): if not isinstance(optimizer, torch.optim.Optimizer): raise TypeError('{} is not an Optimizer'.format( type(optimizer ).__name__)) self.optimizer = optimizer if isinstance(base_lr, list)or isinstance(base_lr, tuple): if len(base_lr)!= len(optimizer.param_groups): raise ValueError("expected {} base_lr, got {}".format( len(optimizer.param_groups), len(base_lr))) self.base_lrs = list(base_lr) else: self.base_lrs = [base_lr] * len(optimizer.param_groups) if isinstance(max_lr, list)or isinstance(max_lr, tuple): if len(max_lr)!= len(optimizer.param_groups): raise ValueError("expected {} max_lr, got {}".format( len(optimizer.param_groups), len(max_lr))) self.max_lrs = list(max_lr) else: self.max_lrs = [max_lr] * len(optimizer.param_groups) self.step_size = step_size if mode not in ['triangular', 'triangular2', 'exp_range'] \ and scale_fn is None: raise ValueError('mode is invalid and scale_fn is None') self.mode = mode self.gamma = gamma if scale_fn is None: if self.mode == 'triangular': self.scale_fn = self._triangular_scale_fn self.scale_mode = 'cycle' elif self.mode == 'triangular2': self.scale_fn = self._triangular2_scale_fn self.scale_mode = 'cycle' elif self.mode == 'exp_range': self.scale_fn = self._exp_range_scale_fn self.scale_mode = 'iterations' else: self.scale_fn = scale_fn self.scale_mode = scale_mode self.batch_step(last_batch_iteration + 1) self.last_batch_iteration = last_batch_iteration def batch_step(self, batch_iteration=None): if batch_iteration is None: batch_iteration = self.last_batch_iteration + 1 self.last_batch_iteration = batch_iteration for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()): param_group['lr'] = lr def _triangular_scale_fn(self, x): return 1. def _triangular2_scale_fn(self, x): return 1 /(2.**(x - 1)) def _exp_range_scale_fn(self, x): return self.gamma**(x) def get_lr(self): step_size = float(self.step_size) cycle = np.floor(1 + self.last_batch_iteration /(2 * step_size)) x = np.abs(self.last_batch_iteration / step_size - 2 * cycle + 1) lrs = [] param_lrs = zip(self.optimizer.param_groups, self.base_lrs, self.max_lrs) for param_group, base_lr, max_lr in param_lrs: base_height =(max_lr - base_lr)* np.maximum(0,(1 - x)) if self.scale_mode == 'cycle': lr = base_lr + base_height * self.scale_fn(cycle) else: lr = base_lr + base_height * self.scale_fn(self.last_batch_iteration) lrs.append(lr) return lrs def chaos_lr(r): lr_lambda = lambda iters: rel_val(iters) def rel_val(iteration): x = 0.99 for i in range(iteration): x = x*r*(1.-x) return x return lr_lambda def threshold_search(y_true, y_proba): best_threshold = 0 best_score = 0 for threshold in [i * 0.01 for i in range(100)]: with warnings.catch_warnings() : warnings.simplefilter("ignore") score = f1_score(y_true=y_true, y_pred=y_proba > threshold) if score > best_score: best_threshold = threshold best_score = score best threshold is % f with score %f'%(best_threshold,best_score)) search_result = {'threshold': best_threshold, 'f1': best_score} return search_result torch.cuda.init() torch.cuda.empty_cache() print('CUDA MEM:',torch.cuda.memory_allocated()) print('cuda:', torch.cuda.is_available()) print('cude index:',torch.cuda.current_device()) batch_size = 512 print('batch_size:',batch_size) print('---') X_train1,X_val,y_train1,y_val = train_test_split(x_train, y_train) X_train2,X_test,y_train2,y_test = train_test_split(X_train1, y_train1) x_train_tensor = torch.tensor(X_train2, dtype=torch.long ).cuda() y_train_tensor = torch.tensor(y_train2, dtype=torch.float32 ).cuda() train_dataset = torch.utils.data.TensorDataset(x_train_tensor,y_train_tensor) train_loader = torch.utils.data.DataLoader(dataset=train_dataset,batch_size=batch_size,shuffle=True) x_val_tensor = torch.tensor(X_val, dtype=torch.long ).cuda() y_val_tensor = torch.tensor(y_val, dtype=torch.float32 ).cuda() val_dataset = torch.utils.data.TensorDataset(x_val_tensor,y_val_tensor) val_loader = torch.utils.data.DataLoader(dataset=val_dataset,batch_size=batch_size,shuffle=False) class Attention(nn.Module): def __init__(self,hidden_lstm_size): super(Attention,self ).__init__() self.hidden_lstm_size = hidden_lstm_size self.seq_len = max_len weights = torch.zeros(self.hidden_lstm_size,1) nn.init.xavier_uniform_(weights) self.weights = nn.Parameter(weights) def forward(self,x): x_c = x.contiguous().view(-1, self.hidden_lstm_size) a = torch.mm(x_c, self.weights) a = a.view(-1, self.seq_len) a = torch.tanh(a) s = F.softmax(a,dim=1) s = torch.unsqueeze(s, -1) weighted_input =s*x return torch.sum(weighted_input, 1) class Sentiment(nn.Module): def __init__(self,matrix_glove,matrix_para,batch_size): super(Sentiment,self ).__init__() print('Vocab vectors size:',matrix_glove.shape) self.batch_size = batch_size self.hidden_dim = 128 self.lin_dim = 64 self.n_layers = 1 self.embedding_glove = nn.Embedding(matrix_glove.shape[0],matrix_glove.shape[1]) self.embedding_glove.weight = nn.Parameter(torch.tensor(matrix_glove, dtype=torch.float32)) self.embedding_glove.weight.requires_grad = False self.embedding_para = nn.Embedding(matrix_para.shape[0],matrix_para.shape[1]) self.embedding_para.weight = nn.Parameter(torch.tensor(matrix_para, dtype=torch.float32)) self.embedding_para.weight.requires_grad = False self.lstm = nn.LSTM(input_size=matrix_glove.shape[1]*2, hidden_size=self.hidden_dim, num_layers=self.n_layers, bidirectional=True, batch_first=True, dropout=0.1) self.att_lstm = Attention(self.hidden_dim*2) self.linear1 = nn.Linear(2*self.hidden_dim,self.lin_dim) self.relu = nn.ReLU() self.linear2 = nn.Linear(self.lin_dim,1) self.dropout = nn.Dropout(0.1) def forward(self,x): hidden =(torch.zeros(2*self.n_layers, x.shape[0], self.hidden_dim ).cuda() , torch.zeros(2*self.n_layers, x.shape[0], self.hidden_dim ).cuda()) hidden_gru = torch.zeros(2*self.n_layers, x.shape[0], self.hidden_dim ).cuda() e_glove = self.embedding_glove(x) e_para = self.embedding_para(x) e = torch.cat([e_glove,e_para],dim=-1) lstm_out, hidden = self.lstm(e, hidden) out_lstm = self.att_lstm(lstm_out) out = self.linear1(out_lstm) out = self.relu(out) out = self.dropout(out) return self.linear2(out) model = Sentiment(embedding_matrix_glov,embedding_matrix_para,batch_size=batch_size ).cuda() print(model) print('-'*80) early_stopping = EarlyStopping(patience=2,verbose=True) loss_function = nn.BCEWithLogitsLoss().cuda() optimizer = optim.RMSprop(model.parameters() ,lr=1e-3) scheduler = CyclicLR(optimizer,base_lr=1e-3, max_lr=4e-3, step_size=300., mode='exp_range', gamma=0.99994) losses = [] val_losses=[] epoch_acc=[] epoch_val_acc=[] val_list = list(val_loader) for epoch in range(100): epoch_losses=[] epoch_val_losses = [] preds = [] val_preds=[] targets = [] acc = [] for batch,(x_batch,y_true)in enumerate(list(iter(train_loader)) ,1): model.train() optimizer.zero_grad() y_pred = model(x_batch ).squeeze(1) y_numpy_pred =torch.sigmoid(y_pred ).cpu().detach().numpy() preds += y_numpy_pred.tolist() y_numpy_true = y_true.cpu().detach().numpy() targets += y_numpy_true.tolist() loss = loss_function(y_pred,y_true) epoch_losses.append(loss.item()) loss.backward() optimizer.step() scheduler.batch_step() acc.append(accuracy_score(y_numpy_true,np.round(y_numpy_pred))) if batch % 100 == 0: print('\rtraining(batch,loss,acc)| ',batch,' ===>',loss.item() ,' acc ',np.mean(acc),end='') losses.append(np.mean(epoch_losses)) targets = np.array(targets) preds = np.array(preds) search_result = threshold_search(targets, preds) train_f1 = search_result['f1'] epoch_acc.append(np.mean(acc)) targets = [] val_acc=[] model.eval() with torch.no_grad() : for batch,(x_val_batch,y_true)in enumerate(val_list,1): y_pred = model(x_val_batch ).squeeze(1) y_numpy_pred = torch.sigmoid(y_pred ).cpu().detach().numpy() val_preds += y_numpy_pred.tolist() y_numpy_true = y_true.cpu().detach().numpy() targets += y_numpy_true.tolist() val_loss = loss_function(y_pred,y_true) epoch_val_losses.append(val_loss.item()) val_acc.append(accuracy_score(y_numpy_true,np.round(y_numpy_pred))) if batch % 100 == 0: print('\rvalidation(batch,acc)| ',batch,' ===>', np.mean(val_acc),end='') val_losses.append(np.mean(epoch_val_losses)) epoch_val_acc.append(np.mean(val_acc)) targets = np.array(targets) val_preds = np.array(val_preds) search_result = threshold_search(targets, val_preds) val_f1 = search_result['f1'] print(' EPOCH: ',epoch,' has acc of ',epoch_acc[-1],' ,has loss of ',losses[-1], ' ,f1 of ',train_f1,' val acc of ',epoch_val_acc[-1],' ,val loss of ',val_losses[-1],' ,val f1 of ',val_f1) print('-'*80) if early_stopping.early_stop: print("Early stopping at ",epoch," epoch") break else: early_stopping(1.-val_f1, model) print('Training finished.... ' )<choose_model_class>
results_kfold = [] for K in [5,6,7,8,9,10]: model = XGBClassifier() kfold = KFold(n_splits=K, random_state=42) res = cross_val_score(model, X_train, y_train, cv=kfold) results_kfold.append(( K,res.mean() *100, res.std() *100))
Titanic - Machine Learning from Disaster
737,908
print(os.listdir()) model = Sentiment(embedding_matrix_glov,embedding_matrix_para,batch_size=batch_size ).cuda() model.load_state_dict(torch.load('checkpoint.pt'))<prepare_output>
list(map(lambda x: print('Iteration nº {:2}, with acc.{:.12} and std.dev.{:.12}'.format(*x)) ,results_kfold)) ;
Titanic - Machine Learning from Disaster
737,908
print('Threshold:',search_result['threshold']) print(x_test.shape) submission_dataset = torch.utils.data.TensorDataset(torch.tensor(x_test, dtype=torch.long ).cuda()) submission_loader = torch.utils.data.DataLoader(dataset=submission_dataset,batch_size=batch_size, shuffle=False) pred = [] with torch.no_grad() : for x in list(submission_loader): model.eval() pred += torch.sigmoid(model(x[0] ).squeeze(1)).cpu().data.numpy().tolist() pred = np.array(pred) df_subm = pd.DataFrame() df_subm['qid'] = df_final.qid df_subm['prediction'] =(pred > search_result['threshold'] ).astype(int) print(df_subm.head()) df_subm.to_csv('submission.csv', index=False )<set_options>
results_strat_kfold = [] for K in [5,6,7,8,9,10]: model = XGBClassifier() kfold = StratifiedKFold(n_splits=K, random_state=42) res = cross_val_score(model, X_train, y_train, cv=kfold) results_strat_kfold.append(( K,res.mean() *100, res.std() *100))
Titanic - Machine Learning from Disaster
737,908
tqdm.pandas() warnings.filterwarnings("ignore", message="F-score is ill-defined and being set to 0.0 due to no predicted samples.") %matplotlib inline<load_from_csv>
list(map(lambda x: print('Iteration nº {:2}, with acc.{:.12} and std.dev.{:.12}'.format(*x)) ,results_strat_kfold)) ;
Titanic - Machine Learning from Disaster
737,908
train_df = pd.read_csv(".. /input/train.csv") test_df = pd.read_csv(".. /input/test.csv") print('Train data dimension: ', train_df.shape) display(train_df.head()) print('Test data dimension: ', test_df.shape) display(test_df.head() )<create_dataframe>
xg_scores = [] kfold = StratifiedKFold(n_splits=7, random_state=42) for lamb in [.05,.1,.2,.3,.4,.5,.6 ]: for eta in [.2,.19,.17,.15,.13,.11]: model = XGBClassifier(learning_rate=eta, reg_lambda=lamb) res = cross_val_score(model, X_train, y_train, cv=kfold) xg_scores.append({'lamb':lamb, 'eta':eta, 'acc':res.mean() } )
Titanic - Machine Learning from Disaster
737,908
enable_local_test = True if enable_local_test: n_test = len(test_df) train_df, local_test_df =(train_df.iloc[:-n_test].reset_index(drop=True), train_df.iloc[-n_test:].reset_index(drop=True)) else: local_test_df = pd.DataFrame([[None, None, 0], [None, None, 0]], columns=['qid', 'question_text', 'target']) n_test = 2<set_options>
sorted(xg_scores,key=lambda x: x['acc'], reverse=True)[0]
Titanic - Machine Learning from Disaster
737,908
def seed_everything(seed=1234): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True seed_everything()<compute_test_metric>
model = XGBClassifier(learning_rate=.17, reg_lambda=.5) model.fit(X_train, y_train) predicted = model.predict(X_test )
Titanic - Machine Learning from Disaster
737,908
<compute_test_metric><EOS>
test_set['Survived'] = predicted.astype(int) test_set[['PassengerId','Survived']].to_csv('submission.csv', sep=',', index=False )
Titanic - Machine Learning from Disaster
11,400,975
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<define_variables>
!pip install pywaffle
Titanic - Machine Learning from Disaster
11,400,975
embed_size = 300 max_features = 95000 maxlen = 70<define_variables>
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import pandas_profiling import plotly.express as px import plotly.graph_objects as go import sklearn.metrics as metrics import plotly.offline as py from sklearn.preprocessing import OneHotEncoder, LabelEncoder, StandardScaler from sklearn.compose import ColumnTransformer from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score from sklearn.metrics import accuracy_score, roc_curve,auc, confusion_matrix,precision_recall_curve,precision_recall_curve,plot_precision_recall_curve from pywaffle import Waffle from yellowbrick.classifier import classification_report from plotly.subplots import make_subplots
Titanic - Machine Learning from Disaster
11,400,975
puncts = [',', '.', '"', ':', ')', '(', '-', '!', '?', '|', ';', "'", '$', '&', '/', '[', ']', '>', '%', '=', ' '·', '_', '{', '}', '©', '^', '®', '`', '<', '→', '°', '€', '™', '›', '♥', '←', '×', '§', '″', '′', 'Â', '█', '½', 'à', '…', '“', '★', '”', '–', '●', 'â', '►', '−', '¢', '²', '¬', '░', '¶', '↑', '±', '¿', '▾', '═', '¦', '║', '―', '¥', '▓', '—', '‹', '─', '▒', ':', '¼', '⊕', '▼', '▪', '†', '■', '’', '▀', '¨', '▄', '♫', '☆', 'é', '¯', '♦', '¤', '▲', 'è', '¸', '¾', 'Ã', '⋅', '‘', '∞', '∙', ')', '↓', '、', '│', '(', '»', ',', '♪', '╩', '╚', '³', '・', '╦', '╣', '╔', '╗', '▬', '❤', 'ï', 'Ø', '¹', '≤', '‡', '√', ] def clean_text(x): x = str(x) for punct in puncts: x = x.replace(punct, f' {punct} ') return x<feature_engineering>
custom_colors = [" customPalette = sns.set_palette(sns.color_palette(custom_colors))
Titanic - Machine Learning from Disaster
11,400,975
for df in [train_df, test_df, local_test_df]: df["question_text"] = df["question_text"].str.lower() df["question_text"] = df["question_text"].apply(lambda x: clean_text(x)) df["question_text"].fillna("_ x_train = train_df["question_text"].values x_test = test_df["question_text"].values x_test_local = local_test_df["question_text"].values tokenizer = Tokenizer(num_words=max_features) tokenizer.fit_on_texts(list(x_train)+ list(x_test_local)) x_train = tokenizer.texts_to_sequences(x_train) x_test = tokenizer.texts_to_sequences(x_test) x_test_local = tokenizer.texts_to_sequences(x_test_local) x_train = pad_sequences(x_train, maxlen=maxlen) x_test = pad_sequences(x_test, maxlen=maxlen) x_test_local = pad_sequences(x_test_local, maxlen=maxlen) y_train = train_df['target'].values y_test = local_test_df['target'].values<normalization>
sns.set_context("notebook", font_scale=1.5, rc={"lines.linewidth": 2.5} )
Titanic - Machine Learning from Disaster
11,400,975
seed_everything() glove_embeddings = load_glove(tokenizer.word_index, max_features) paragram_embeddings = load_para(tokenizer.word_index, max_features) embedding_matrix = np.mean([glove_embeddings, paragram_embeddings], axis=0) np.shape(embedding_matrix )<split>
train_data = pd.read_csv("/kaggle/input/titanic/train.csv") train_data.head()
Titanic - Machine Learning from Disaster
11,400,975
splits = list(StratifiedKFold(n_splits=4, shuffle=True, random_state=10 ).split(x_train, y_train))<normalization>
test_data = pd.read_csv("/kaggle/input/titanic/test.csv") test_data.head()
Titanic - Machine Learning from Disaster
11,400,975
class Attention(nn.Module): def __init__(self, feature_dim, step_dim, bias=True, **kwargs): super(Attention, self ).__init__(**kwargs) self.supports_masking = True self.bias = bias self.feature_dim = feature_dim self.step_dim = step_dim self.features_dim = 0 weight = torch.zeros(feature_dim, 1) nn.init.xavier_uniform_(weight) self.weight = nn.Parameter(weight) if bias: self.b = nn.Parameter(torch.zeros(step_dim)) def forward(self, x, mask=None): feature_dim = self.feature_dim step_dim = self.step_dim eij = torch.mm( x.contiguous().view(-1, feature_dim), self.weight ).view(-1, step_dim) if self.bias: eij = eij + self.b eij = torch.tanh(eij) a = torch.exp(eij) if mask is not None: a = a * mask a = a / torch.sum(a, 1, keepdim=True)+ 1e-10 weighted_input = x * torch.unsqueeze(a, -1) return torch.sum(weighted_input, 1) class SpatialDropout(nn.Dropout2d): def forward(self, x): x = x.unsqueeze(2) x = x.permute(0, 3, 2, 1) x = super(SpatialDropout, self ).forward(x) x = x.permute(0, 3, 2, 1) x = x.squeeze(2) return x<choose_model_class>
train_data.isna().sum()
Titanic - Machine Learning from Disaster
11,400,975
class NeuralNet(nn.Module): def __init__(self): super(NeuralNet, self ).__init__() hidden_size = 60 self.embedding = nn.Embedding(max_features, embed_size) self.embedding.weight = nn.Parameter(torch.tensor(embedding_matrix, dtype=torch.float32)) self.embedding.weight.requires_grad = False self.embedding_dropout = SpatialDropout(0.1) self.lstm = nn.LSTM(embed_size, hidden_size, bidirectional=True, batch_first=True) self.gru = nn.GRU(hidden_size * 2, hidden_size, bidirectional=True, batch_first=True) self.lstm_attention = Attention(hidden_size * 2, maxlen) self.gru_attention = Attention(hidden_size * 2, maxlen) self.linear = nn.Linear(480, 16) self.relu = nn.ReLU() self.dropout = nn.Dropout(0.1) self.out = nn.Linear(16, 1) def forward(self, x): h_embedding = self.embedding(x) h_embedding = self.embedding_dropout(h_embedding) h_lstm, _ = self.lstm(h_embedding) h_gru, _ = self.gru(h_lstm) h_lstm_atten = self.lstm_attention(h_lstm) h_gru_atten = self.gru_attention(h_gru) avg_pool = torch.mean(h_gru, 1) max_pool, _ = torch.max(h_gru, 1) conc = torch.cat(( h_lstm_atten, h_gru_atten, avg_pool, max_pool), 1) conc = self.relu(self.linear(conc)) conc = self.dropout(conc) out = self.out(conc) return out<define_variables>
train_data.nunique()
Titanic - Machine Learning from Disaster
11,400,975
batch_size = 512 n_epochs = 5<choose_model_class>
train_data['Cabin'] = train_data['Cabin'].apply(lambda i: i[0] if pd.notnull(i)else 'Z') test_data['Cabin'] = test_data['Cabin'].apply(lambda i: i[0] if pd.notnull(i)else 'Z' )
Titanic - Machine Learning from Disaster
11,400,975
class CyclicLR(object): def __init__(self, optimizer, base_lr=1e-3, max_lr=6e-3, step_size=2000, factor=0.6, min_lr=1e-4, mode='triangular', gamma=1., scale_fn=None, scale_mode='cycle', last_batch_iteration=-1): if not isinstance(optimizer, torch.optim.Optimizer): raise TypeError('{} is not an Optimizer'.format( type(optimizer ).__name__)) self.optimizer = optimizer if isinstance(base_lr, list)or isinstance(base_lr, tuple): if len(base_lr)!= len(optimizer.param_groups): raise ValueError("expected {} base_lr, got {}".format( len(optimizer.param_groups), len(base_lr))) self.base_lrs = list(base_lr) else: self.base_lrs = [base_lr] * len(optimizer.param_groups) if isinstance(max_lr, list)or isinstance(max_lr, tuple): if len(max_lr)!= len(optimizer.param_groups): raise ValueError("expected {} max_lr, got {}".format( len(optimizer.param_groups), len(max_lr))) self.max_lrs = list(max_lr) else: self.max_lrs = [max_lr] * len(optimizer.param_groups) self.step_size = step_size if mode not in ['triangular', 'triangular2', 'exp_range'] \ and scale_fn is None: raise ValueError('mode is invalid and scale_fn is None') self.mode = mode self.gamma = gamma if scale_fn is None: if self.mode == 'triangular': self.scale_fn = self._triangular_scale_fn self.scale_mode = 'cycle' elif self.mode == 'triangular2': self.scale_fn = self._triangular2_scale_fn self.scale_mode = 'cycle' elif self.mode == 'exp_range': self.scale_fn = self._exp_range_scale_fn self.scale_mode = 'iterations' else: self.scale_fn = scale_fn self.scale_mode = scale_mode self.batch_step(last_batch_iteration + 1) self.last_batch_iteration = last_batch_iteration self.last_loss = np.inf self.min_lr = min_lr self.factor = factor def batch_step(self, batch_iteration=None): if batch_iteration is None: batch_iteration = self.last_batch_iteration + 1 self.last_batch_iteration = batch_iteration for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()): param_group['lr'] = lr def step(self, loss): if loss > self.last_loss: self.base_lrs = [max(lr * self.factor, self.min_lr)for lr in self.base_lrs] self.max_lrs = [max(lr * self.factor, self.min_lr)for lr in self.max_lrs] def _triangular_scale_fn(self, x): return 1. def _triangular2_scale_fn(self, x): return 1 /(2.**(x - 1)) def _exp_range_scale_fn(self, x): return self.gamma**(x) def get_lr(self): step_size = float(self.step_size) cycle = np.floor(1 + self.last_batch_iteration /(2 * step_size)) x = np.abs(self.last_batch_iteration / step_size - 2 * cycle + 1) lrs = [] param_lrs = zip(self.optimizer.param_groups, self.base_lrs, self.max_lrs) for param_group, base_lr, max_lr in param_lrs: base_height =(max_lr - base_lr)* np.maximum(0,(1 - x)) if self.scale_mode == 'cycle': lr = base_lr + base_height * self.scale_fn(cycle) else: lr = base_lr + base_height * self.scale_fn(self.last_batch_iteration) lrs.append(lr) return lrs<train_model>
train_data[train_data['Cabin']=='T'].index.values
Titanic - Machine Learning from Disaster
11,400,975
def train_model(model, x_train, y_train, x_val, y_val, validate=True): optimizer = torch.optim.Adam(model.parameters()) step_size = 300 scheduler = CyclicLR(optimizer, base_lr=0.001, max_lr=0.003, step_size=step_size, mode='exp_range', gamma=0.99994) train = torch.utils.data.TensorDataset(x_train, y_train) valid = torch.utils.data.TensorDataset(x_val, y_val) train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid, batch_size=batch_size, shuffle=False) loss_fn = torch.nn.BCEWithLogitsLoss(reduction='mean' ).cuda() best_score = -np.inf for epoch in range(n_epochs): start_time = time.time() model.train() avg_loss = 0. for x_batch, y_batch in tqdm(train_loader, disable=True): y_pred = model(x_batch) scheduler.batch_step() loss = loss_fn(y_pred, y_batch) optimizer.zero_grad() loss.backward() optimizer.step() avg_loss += loss.item() / len(train_loader) model.eval() valid_preds = np.zeros(( x_val_fold.size(0))) if validate: avg_val_loss = 0. for i,(x_batch, y_batch)in enumerate(valid_loader): y_pred = model(x_batch ).detach() avg_val_loss += loss_fn(y_pred, y_batch ).item() / len(valid_loader) valid_preds[i * batch_size:(i+1)* batch_size] = sigmoid(y_pred.cpu().numpy())[:, 0] search_result = threshold_search(y_val.cpu().numpy() , valid_preds) val_f1, val_threshold = search_result['f1'], search_result['threshold'] elapsed_time = time.time() - start_time print('Epoch {}/{} \t loss={:.4f} \t val_loss={:.4f} \t val_f1={:.4f} best_t={:.2f} \t time={:.2f}s'.format( epoch + 1, n_epochs, avg_loss, avg_val_loss, val_f1, val_threshold, elapsed_time)) else: elapsed_time = time.time() - start_time print('Epoch {}/{} \t loss={:.4f} \t time={:.2f}s'.format( epoch + 1, n_epochs, avg_loss, elapsed_time)) valid_preds = np.zeros(( x_val_fold.size(0))) avg_val_loss = 0. for i,(x_batch, y_batch)in enumerate(valid_loader): y_pred = model(x_batch ).detach() avg_val_loss += loss_fn(y_pred, y_batch ).item() / len(valid_loader) valid_preds[i * batch_size:(i+1)* batch_size] = sigmoid(y_pred.cpu().numpy())[:, 0] print('Validation loss: ', avg_val_loss) test_preds = np.zeros(( len(test_loader.dataset))) for i,(x_batch,)in enumerate(test_loader): y_pred = model(x_batch ).detach() test_preds[i * batch_size:(i+1)* batch_size] = sigmoid(y_pred.cpu().numpy())[:, 0] test_preds_local = np.zeros(( len(test_local_loader.dataset))) for i,(x_batch,)in enumerate(test_local_loader): y_pred = model(x_batch ).detach() test_preds_local[i * batch_size:(i+1)* batch_size] = sigmoid(y_pred.cpu().numpy())[:, 0] return valid_preds, test_preds, test_preds_local<define_variables>
test_data[test_data['Cabin']=='T'].index.values
Titanic - Machine Learning from Disaster
11,400,975
seed = 6017<load_pretrained>
train_data.iloc[339]
Titanic - Machine Learning from Disaster
11,400,975
x_test_cuda = torch.tensor(x_test, dtype=torch.long ).cuda() test = torch.utils.data.TensorDataset(x_test_cuda) test_loader = torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=False) x_test_local_cuda = torch.tensor(x_test_local, dtype=torch.long ).cuda() test_local = torch.utils.data.TensorDataset(x_test_local_cuda) test_local_loader = torch.utils.data.DataLoader(test_local, batch_size=batch_size, shuffle=False )<prepare_x_and_y>
index = train_data[train_data['Cabin'] == 'T'].index train_data.loc[index, 'Cabin'] = 'A'
Titanic - Machine Learning from Disaster
11,400,975
train_preds = np.zeros(len(train_df)) test_preds = np.zeros(( len(test_df), len(splits))) test_preds_local = np.zeros(( n_test, len(splits))) for i,(train_idx, valid_idx)in enumerate(splits): x_train_fold = torch.tensor(x_train[train_idx], dtype=torch.long ).cuda() y_train_fold = torch.tensor(y_train[train_idx, np.newaxis], dtype=torch.float32 ).cuda() x_val_fold = torch.tensor(x_train[valid_idx], dtype=torch.long ).cuda() y_val_fold = torch.tensor(y_train[valid_idx, np.newaxis], dtype=torch.float32 ).cuda() train = torch.utils.data.TensorDataset(x_train_fold, y_train_fold) valid = torch.utils.data.TensorDataset(x_val_fold, y_val_fold) train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid, batch_size=batch_size, shuffle=False) print(f'Fold {i + 1}') seed_everything(seed + i) model = NeuralNet() model.cuda() valid_preds_fold, test_preds_fold, test_preds_local_fold = train_model(model, x_train_fold, y_train_fold, x_val_fold, y_val_fold, validate=True) train_preds[valid_idx] = valid_preds_fold test_preds[:, i] = test_preds_fold test_preds_local[:, i] = test_preds_local_fold<compute_test_metric>
train_data['Cabin'] = train_data['Cabin'].replace(['A', 'B', 'C'], 'ABC') train_data['Cabin'] = train_data['Cabin'].replace(['D', 'E'], 'DE') train_data['Cabin'] = train_data['Cabin'].replace(['F', 'G'], 'FG') test_data['Cabin'] = test_data['Cabin'].replace(['A', 'B', 'C'], 'ABC') test_data['Cabin'] = test_data['Cabin'].replace(['D', 'E'], 'DE') test_data['Cabin'] = test_data['Cabin'].replace(['F', 'G'], 'FG' )
Titanic - Machine Learning from Disaster
11,400,975
search_result = threshold_search(y_train, train_preds) search_result<compute_test_metric>
train_data.drop(["Ticket", "Name", "PassengerId"], axis=1, inplace=True) test_data.drop(["Ticket", "Name", "PassengerId"], axis=1, inplace=True) train_data["Age"].fillna(train_data["Age"].median(skipna=True), inplace=True) test_data["Age"].fillna(test_data["Age"].median(skipna=True), inplace=True) test_data["Fare"].fillna(test_data["Fare"].median(skipna=True), inplace=True) train_data["Embarked"].fillna('S', inplace=True) test_data["Embarked"].fillna('S', inplace=True )
Titanic - Machine Learning from Disaster
11,400,975
f1_score(y_test, test_preds_local.mean(axis=1)> search_result['threshold'] )<save_to_csv>
gender = {'male': 0, 'female': 1} train_data.Sex = [gender[item] for item in train_data.Sex] test_data.Sex = [gender[item] for item in test_data.Sex] embarked = {'S': 0, 'C': 1, 'Q':2} train_data.Embarked = [embarked[item] for item in train_data.Embarked] test_data.Embarked = [embarked[item] for item in test_data.Embarked] train_data['Cabin'] = LabelEncoder().fit_transform(train_data['Cabin']) test_data['Cabin'] = LabelEncoder().fit_transform(test_data['Cabin'] )
Titanic - Machine Learning from Disaster
11,400,975
submission = test_df[['qid']].copy() submission['prediction'] = test_preds.mean(axis=1)> search_result['threshold'] submission.to_csv('submission.csv', index=False )<define_variables>
td = pd.read_csv("/kaggle/input/titanic/train.csv") td["Cabin"]=td.Cabin.str[0]
Titanic - Machine Learning from Disaster
11,400,975
test_scores = [0.6894145809793863, 0.6904706309470233, 0.6905915253597362, 0.6908101789878276, 0.6910334464526553, 0.6916507797390641, 0.6903868185698696, 0.6908830283890897] train_scores = [0.669555770620476, 0.6708382008438574, 0.6700974173065081, 0.6701065866112219, 0.6704778141088164, 0.6708436318389969, 0.6705310002773053, 0.6710429366071224] seeds = [42853, 73399, 21152, 58237, 25688, 6017, 29547, 65803]<prepare_output>
expected_values = train_data["Survived"] train_data.drop("Survived", axis=1, inplace=True )
Titanic - Machine Learning from Disaster
11,400,975
eval_df = pd.DataFrame() eval_df['cv_score'] = train_scores eval_df['local_test_score'] = test_scores eval_df['seed'] = seeds eval_df.head()<filter>
train_data.drop("Cabin", axis=1, inplace=True) test_data.drop("Cabin", axis=1, inplace=True )
Titanic - Machine Learning from Disaster
11,400,975
eval_df.loc[[eval_df['local_test_score'].idxmax() ]]<import_modules>
X = train_data.values y = expected_values.values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1, stratify=y )
Titanic - Machine Learning from Disaster
11,400,975
tqdm.pandas(desc='Progress') <define_variables>
model = RandomForestClassifier(criterion='gini', n_estimators=1750, max_depth=7, min_samples_split=6, min_samples_leaf=6, max_features='auto', oob_score=True, random_state=42, n_jobs=-1, verbose=1 )
Titanic - Machine Learning from Disaster
11,400,975
embed_size = 300 max_features = 120000 maxlen = 70 batch_size = 512 n_epochs = 5 n_splits = 5 SEED = 10 debug =0<choose_model_class>
model.fit(X_train, y_train) y_pred_train = model.predict(X_train) y_pred_test = model.predict(X_test )
Titanic - Machine Learning from Disaster
11,400,975
loss_fn = torch.nn.BCEWithLogitsLoss(reduction='sum' )<set_options>
print("Training accuracy: ", accuracy_score(y_train, y_pred_train)) print("Testing accuracy: ", accuracy_score(y_test, y_pred_test))
Titanic - Machine Learning from Disaster
11,400,975
def seed_everything(seed=10): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True seed_everything()<features_selection>
column_values = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked'] X_train_df = pd.DataFrame(data = X_train, columns = column_values) X_test_df = pd.DataFrame(data = X_test, columns = column_values )
Titanic - Machine Learning from Disaster
11,400,975
def load_glove(word_index): EMBEDDING_FILE = '.. /input/embeddings/glove.840B.300d/glove.840B.300d.txt' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')[:300] embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE)) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = -0.005838499,0.48782197 embed_size = all_embs.shape[1] nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector else: embedding_vector = embeddings_index.get(word.capitalize()) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix def load_fasttext(word_index): EMBEDDING_FILE = '.. /input/embeddings/wiki-news-300d-1M/wiki-news-300d-1M.vec' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE)if len(o)>100) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = all_embs.mean() , all_embs.std() embed_size = all_embs.shape[1] nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix def load_para(word_index): EMBEDDING_FILE = '.. /input/embeddings/paragram_300_sl999/paragram_300_sl999.txt' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE, encoding="utf8", errors='ignore')if len(o)>100) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = -0.0053247833,0.49346462 embed_size = all_embs.shape[1] nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix<feature_engineering>
feature_importance(model )
Titanic - Machine Learning from Disaster
11,400,975
def build_vocab(texts): sentences = texts.apply(lambda x: x.split() ).values vocab = {} for sentence in sentences: for word in sentence: try: vocab[word] += 1 except KeyError: vocab[word] = 1 return vocab def known_contractions(embed): known = [] for contract in contraction_mapping: if contract in embed: known.append(contract) return known def clean_contractions(text, mapping): specials = ["’", "‘", "´", "`"] for s in specials: text = text.replace(s, "'") text = ' '.join([mapping[t] if t in mapping else t for t in text.split(" ")]) return text def correct_spelling(x, dic): for word in dic.keys() : x = x.replace(word, dic[word]) return x def unknown_punct(embed, punct): unknown = '' for p in punct: if p not in embed: unknown += p unknown += ' ' return unknown def clean_special_chars(text, punct, mapping): for p in mapping: text = text.replace(p, mapping[p]) for p in punct: text = text.replace(p, f' {p} ') specials = {'\u200b': ' ', '…': '...', '\ufeff': '', 'करना': '', 'है': ''} for s in specials: text = text.replace(s, specials[s]) return text def add_lower(embedding, vocab): count = 0 for word in vocab: if word in embedding and word.lower() not in embedding: embedding[word.lower() ] = embedding[word] count += 1 print(f"Added {count} words to embedding") <define_variables>
model.fit(train_data, expected_values) print("%.4f" % model.oob_score_ )
Titanic - Machine Learning from Disaster
11,400,975
puncts = [',', '.', '"', ':', ')', '(', '-', '!', '?', '|', ';', "'", '$', '&', '/', '[', ']', '>', '%', '=', ' '·', '_', '{', '}', '©', '^', '®', '`', '<', '→', '°', '€', '™', '›', '♥', '←', '×', '§', '″', '′', 'Â', '█', '½', 'à', '…', '“', '★', '”', '–', '●', 'â', '►', '−', '¢', '²', '¬', '░', '¶', '↑', '±', '¿', '▾', '═', '¦', '║', '―', '¥', '▓', '—', '‹', '─', '▒', ':', '¼', '⊕', '▼', '▪', '†', '■', '’', '▀', '¨', '▄', '♫', '☆', 'é', '¯', '♦', '¤', '▲', 'è', '¸', '¾', 'Ã', '⋅', '‘', '∞', '∙', ')', '↓', '、', '│', '(', '»', ',', '♪', '╩', '╚', '³', '・', '╦', '╣', '╔', '╗', '▬', '❤', 'ï', 'Ø', '¹', '≤', '‡', '√', ] def clean_text(x): x = str(x) for punct in puncts: if punct in x: x = x.replace(punct, f' {punct} ') return x def clean_numbers(x): if bool(re.search(r'\d', x)) : x = re.sub('[0-9]{5,}', ' x = re.sub('[0-9]{4}', ' x = re.sub('[0-9]{3}', ' x = re.sub('[0-9]{2}', ' return x mispell_dict = {"ain't": "is not", "aren't": "are not","can't": "cannot", "'cause": "because", "could've": "could have", "couldn't": "could not", "didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hasn't": "has not", "haven't": "have not", "he'd": "he would","he'll": "he will", "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is", "I'd": "I would", "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have","I'm": "I am", "I've": "I have", "i'd": "i would", "i'd've": "i would have", "i'll": "i will", "i'll've": "i will have","i'm": "i am", "i've": "i have", "isn't": "is not", "it'd": "it would", "it'd've": "it would have", "it'll": "it will", "it'll've": "it will have","it's": "it is", "let's": "let us", "ma'am": "madam", "mayn't": "may not", "might've": "might have","mightn't": "might not","mightn't've": "might not have", "must've": "must have", "mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have","o'clock": "of the clock", "oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have", "she'd": "she would", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is", "should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have","so's": "so as", "this's": "this is","that'd": "that would", "that'd've": "that would have", "that's": "that is", "there'd": "there would", "there'd've": "there would have", "there's": "there is", "here's": "here is","they'd": "they would", "they'd've": "they would have", "they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have", "wasn't": "was not", "we'd": "we would", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are", "we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have", "what're": "what are", "what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is", "where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have", "why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not", "won't've": "will not have", "would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all", "y'all'd": "you all would","y'all'd've": "you all would have","y'all're": "you all are","y'all've": "you all have","you'd": "you would", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have", "you're": "you are", "you've": "you have", 'colour': 'color', 'centre': 'center', 'favourite': 'favorite', 'travelling': 'traveling', 'counselling': 'counseling', 'theatre': 'theater', 'cancelled': 'canceled', 'labour': 'labor', 'organisation': 'organization', 'wwii': 'world war 2', 'citicise': 'criticize', 'youtu ': 'youtube ', 'Qoura': 'Quora', 'sallary': 'salary', 'Whta': 'What', 'narcisist': 'narcissist', 'howdo': 'how do', 'whatare': 'what are', 'howcan': 'how can', 'howmuch': 'how much', 'howmany': 'how many', 'whydo': 'why do', 'doI': 'do I', 'theBest': 'the best', 'howdoes': 'how does', 'mastrubation': 'masturbation', 'mastrubate': 'masturbate', "mastrubating": 'masturbating', 'pennis': 'penis', 'Etherium': 'Ethereum', 'narcissit': 'narcissist', 'bigdata': 'big data', '2k17': '2017', '2k18': '2018', 'qouta': 'quota', 'exboyfriend': 'ex boyfriend', 'airhostess': 'air hostess', "whst": 'what', 'watsapp': 'whatsapp', 'demonitisation': 'demonetization', 'demonitization': 'demonetization', 'demonetisation': 'demonetization'} def _get_mispell(mispell_dict): mispell_re = re.compile('(%s)' % '|'.join(mispell_dict.keys())) return mispell_dict, mispell_re mispellings, mispellings_re = _get_mispell(mispell_dict) def replace_typical_misspell(text): def replace(match): return mispellings[match.group(0)] return mispellings_re.sub(replace, text) <concatenate>
passenger_IDs = pd.read_csv("/kaggle/input/titanic/test.csv")[["PassengerId"]].values preds = model.predict(test_data.values) preds
Titanic - Machine Learning from Disaster
11,400,975
def parallelize_apply(df,func,colname,num_process,newcolnames): pool =Pool(processes=num_process) arraydata = pool.map(func,tqdm(df[colname].values)) pool.close() newdf = pd.DataFrame(arraydata,columns = newcolnames) df = pd.concat([df,newdf],axis=1) return df def parallelize_dataframe(df, func): df_split = np.array_split(df, 4) pool = Pool(4) df = pd.concat(pool.map(func, df_split)) pool.close() pool.join() return df def count_regexp_occ(regexp="", text=None): return len(re.findall(regexp, text)) def add_features(df): df['question_text'] = df['question_text'].progress_apply(lambda x:str(x)) df["lower_question_text"] = df["question_text"].apply(lambda x: x.lower()) df['total_length'] = df['question_text'].progress_apply(len) df['capitals'] = df['question_text'].progress_apply(lambda comment: sum(1 for c in comment if c.isupper())) df['caps_vs_length'] = df.progress_apply(lambda row: float(row['capitals'])/float(row['total_length']), axis=1) df['num_words'] = df.question_text.str.count('\S+') df['num_unique_words'] = df['question_text'].progress_apply(lambda comment: len(set(w for w in comment.split()))) df['words_vs_unique'] = df['num_unique_words'] / df['num_words'] return df def load_and_prec() : if debug: train_df = pd.read_csv(".. /input/train.csv")[:80000] test_df = pd.read_csv(".. /input/test.csv")[:20000] else: train_df = pd.read_csv(".. /input/train.csv") test_df = pd.read_csv(".. /input/test.csv") print("Train shape : ",train_df.shape) print("Test shape : ",test_df.shape) train = parallelize_dataframe(train_df, add_features) test = parallelize_dataframe(test_df, add_features) train_df["question_text"] = train_df["question_text"].apply(lambda x: x.lower()) test_df["question_text"] = test_df["question_text"].apply(lambda x: x.lower()) train_df["question_text"] = train_df["question_text"].progress_apply(lambda x: clean_text(x)) test_df["question_text"] = test_df["question_text"].apply(lambda x: clean_text(x)) train_df["question_text"] = train_df["question_text"].progress_apply(lambda x: clean_numbers(x)) test_df["question_text"] = test_df["question_text"].apply(lambda x: clean_numbers(x)) train_df["question_text"] = train_df["question_text"].progress_apply(lambda x: replace_typical_misspell(x)) test_df["question_text"] = test_df["question_text"].apply(lambda x: replace_typical_misspell(x)) train_X = train_df["question_text"].fillna("_ test_X = test_df["question_text"].fillna("_ features = train[['num_unique_words','words_vs_unique']].fillna(0) test_features = test[['num_unique_words','words_vs_unique']].fillna(0) ss = StandardScaler() pc = PCA(n_components=5) ss.fit(np.vstack(( features, test_features))) features = ss.transform(features) test_features = ss.transform(test_features) tokenizer = Tokenizer(num_words=max_features) tokenizer.fit_on_texts(list(train_X)) train_X = tokenizer.texts_to_sequences(train_X) test_X = tokenizer.texts_to_sequences(test_X) train_X = pad_sequences(train_X, maxlen=maxlen) test_X = pad_sequences(test_X, maxlen=maxlen) train_y = train_df['target'].values np.random.seed(SEED) trn_idx = np.random.permutation(len(train_X)) train_X = train_X[trn_idx] train_y = train_y[trn_idx] features = features[trn_idx] return train_X, test_X, train_y, features, test_features, tokenizer.word_index <categorify>
df = {'PassengerId': passenger_IDs.ravel() , 'Survived': preds} df_predictions = pd.DataFrame(df ).set_index(['PassengerId']) df_predictions.head(10 )
Titanic - Machine Learning from Disaster
11,400,975
<normalization><EOS>
df_predictions.to_csv('/kaggle/working/Predictions.csv' )
Titanic - Machine Learning from Disaster
5,515,762
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<choose_model_class>
df_train = pd.read_csv('/kaggle/input/titanic/train.csv') df_test = pd.read_csv('/kaggle/input/titanic/test.csv') df_train.head()
Titanic - Machine Learning from Disaster
5,515,762
class CyclicLR(object): def __init__(self, optimizer, base_lr=1e-3, max_lr=6e-3, step_size=2000, mode='triangular', gamma=1., scale_fn=None, scale_mode='cycle', last_batch_iteration=-1): if not isinstance(optimizer, Optimizer): raise TypeError('{} is not an Optimizer'.format( type(optimizer ).__name__)) self.optimizer = optimizer if isinstance(base_lr, list)or isinstance(base_lr, tuple): if len(base_lr)!= len(optimizer.param_groups): raise ValueError("expected {} base_lr, got {}".format( len(optimizer.param_groups), len(base_lr))) self.base_lrs = list(base_lr) else: self.base_lrs = [base_lr] * len(optimizer.param_groups) if isinstance(max_lr, list)or isinstance(max_lr, tuple): if len(max_lr)!= len(optimizer.param_groups): raise ValueError("expected {} max_lr, got {}".format( len(optimizer.param_groups), len(max_lr))) self.max_lrs = list(max_lr) else: self.max_lrs = [max_lr] * len(optimizer.param_groups) self.step_size = step_size if mode not in ['triangular', 'triangular2', 'exp_range'] \ and scale_fn is None: raise ValueError('mode is invalid and scale_fn is None') self.mode = mode self.gamma = gamma if scale_fn is None: if self.mode == 'triangular': self.scale_fn = self._triangular_scale_fn self.scale_mode = 'cycle' elif self.mode == 'triangular2': self.scale_fn = self._triangular2_scale_fn self.scale_mode = 'cycle' elif self.mode == 'exp_range': self.scale_fn = self._exp_range_scale_fn self.scale_mode = 'iterations' else: self.scale_fn = scale_fn self.scale_mode = scale_mode self.batch_step(last_batch_iteration + 1) self.last_batch_iteration = last_batch_iteration def batch_step(self, batch_iteration=None): if batch_iteration is None: batch_iteration = self.last_batch_iteration + 1 self.last_batch_iteration = batch_iteration for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()): param_group['lr'] = lr def _triangular_scale_fn(self, x): return 1. def _triangular2_scale_fn(self, x): return 1 /(2.**(x - 1)) def _exp_range_scale_fn(self, x): return self.gamma**(x) def get_lr(self): step_size = float(self.step_size) cycle = np.floor(1 + self.last_batch_iteration /(2 * step_size)) x = np.abs(self.last_batch_iteration / step_size - 2 * cycle + 1) lrs = [] param_lrs = zip(self.optimizer.param_groups, self.base_lrs, self.max_lrs) for param_group, base_lr, max_lr in param_lrs: base_height =(max_lr - base_lr)* np.maximum(0,(1 - x)) if self.scale_mode == 'cycle': lr = base_lr + base_height * self.scale_fn(cycle) else: lr = base_lr + base_height * self.scale_fn(self.last_batch_iteration) lrs.append(lr) return lrs <categorify>
df_missing = pd.DataFrame(df_train.isna().sum() + df_test.isna().sum() , columns=['Missing']) df_missing = df_missing.drop('Survived') df_missing = df_missing.sort_values(by='Missing', ascending=False) df_missing = df_missing[df_missing.Missing > 0] df_missing
Titanic - Machine Learning from Disaster
5,515,762
class MyDataset(Dataset): def __init__(self,dataset): self.dataset = dataset def __getitem__(self,index): data,target = self.dataset[index] return data,target,index def __len__(self): return len(self.dataset )<compute_train_metric>
df_train, df_test = [x.drop('Cabin', axis=1)for x in [df_train, df_test]]
Titanic - Machine Learning from Disaster
5,515,762
def pytorch_model_run_cv(x_train,y_train,features,x_test, model_obj, feats = False,clip = True): seed_everything() avg_losses_f = [] avg_val_losses_f = [] train_preds = np.zeros(( len(x_train))) test_preds = np.zeros(( len(x_test))) splits = list(StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=SEED ).split(x_train, y_train)) for i,(train_idx, valid_idx)in enumerate(splits): seed_everything(i*1000+i) x_train = np.array(x_train) y_train = np.array(y_train) if feats: features = np.array(features) x_train_fold = torch.tensor(x_train[train_idx.astype(int)], dtype=torch.long ).cuda() y_train_fold = torch.tensor(y_train[train_idx.astype(int), np.newaxis], dtype=torch.float32 ).cuda() if feats: kfold_X_features = features[train_idx.astype(int)] kfold_X_valid_features = features[valid_idx.astype(int)] x_val_fold = torch.tensor(x_train[valid_idx.astype(int)], dtype=torch.long ).cuda() y_val_fold = torch.tensor(y_train[valid_idx.astype(int), np.newaxis], dtype=torch.float32 ).cuda() model = copy.deepcopy(model_obj) model.cuda() loss_fn = torch.nn.BCEWithLogitsLoss(reduction='sum') step_size = 300 base_lr, max_lr = 0.001, 0.003 optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=max_lr) scheduler = CyclicLR(optimizer, base_lr=base_lr, max_lr=max_lr, step_size=step_size, mode='exp_range', gamma=0.99994) train = MyDataset(torch.utils.data.TensorDataset(x_train_fold, y_train_fold)) valid = MyDataset(torch.utils.data.TensorDataset(x_val_fold, y_val_fold)) train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid, batch_size=batch_size, shuffle=False) print(f'Fold {i + 1}') for epoch in range(n_epochs): start_time = time.time() model.train() avg_loss = 0. for i,(x_batch, y_batch, index)in enumerate(train_loader): if feats: f = kfold_X_features[index] y_pred = model([x_batch,f]) else: y_pred = model(x_batch) if scheduler: scheduler.batch_step() loss = loss_fn(y_pred, y_batch) optimizer.zero_grad() loss.backward() if clip: nn.utils.clip_grad_norm_(model.parameters() ,1) optimizer.step() avg_loss += loss.item() / len(train_loader) model.eval() valid_preds_fold = np.zeros(( x_val_fold.size(0))) test_preds_fold = np.zeros(( len(x_test))) avg_val_loss = 0. for i,(x_batch, y_batch,index)in enumerate(valid_loader): if feats: f = kfold_X_valid_features[index] y_pred = model([x_batch,f] ).detach() else: y_pred = model(x_batch ).detach() avg_val_loss += loss_fn(y_pred, y_batch ).item() / len(valid_loader) valid_preds_fold[index] = sigmoid(y_pred.cpu().numpy())[:, 0] elapsed_time = time.time() - start_time print('Epoch {}/{} \t loss={:.4f} \t val_loss={:.4f} \t time={:.2f}s'.format( epoch + 1, n_epochs, avg_loss, avg_val_loss, elapsed_time)) avg_losses_f.append(avg_loss) avg_val_losses_f.append(avg_val_loss) for i,(x_batch,)in enumerate(test_loader): if feats: f = test_features[i * batch_size:(i+1)* batch_size] y_pred = model([x_batch,f] ).detach() else: y_pred = model(x_batch ).detach() test_preds_fold[i * batch_size:(i+1)* batch_size] = sigmoid(y_pred.cpu().numpy())[:, 0] train_preds[valid_idx] = valid_preds_fold test_preds += test_preds_fold / len(splits) print('All \t loss={:.4f} \t val_loss={:.4f} \t '.format(np.average(avg_losses_f),np.average(avg_val_losses_f))) return train_preds, test_preds <choose_model_class>
lb_encoder = LabelBinarizer(sparse_output=False) age_bins = [0, 14, 25, 75, 120] age_labels = ['Child', 'Teen', 'Adult', 'Elder'] for ds in [df_train, df_test]: ds['Age'].fillna(ds['Age'].median() , inplace=True) ds['AgeBin'] = pd.cut(ds['Age'], bins=age_bins, labels=age_labels, include_lowest=True) g = sns.FacetGrid(df_train, col="AgeBin" ).map(plt.hist, "Survived") lb_encoder.fit(df_train['AgeBin']) df_train, df_test = [x.join(pd.DataFrame(lb_encoder.transform(x['AgeBin']), columns=age_labels)) for x in [df_train, df_test]] df_train, df_test = [x.drop(['Age', 'AgeBin'], axis=1)for x in [df_train, df_test]]
Titanic - Machine Learning from Disaster
5,515,762
class Alex_NeuralNet_Meta(nn.Module): def __init__(self,hidden_size,lin_size, embedding_matrix=embedding_matrix): super(Alex_NeuralNet_Meta, self ).__init__() self.hidden_size = hidden_size drp = 0.1 self.embedding = nn.Embedding(max_features, embed_size) self.embedding.weight = nn.Parameter(torch.tensor(embedding_matrix, dtype=torch.float32)) self.embedding.weight.requires_grad = False self.embedding_dropout = nn.Dropout2d(0.1) self.lstm = nn.LSTM(embed_size, hidden_size, bidirectional=True, batch_first=True) for name, param in self.lstm.named_parameters() : if 'bias' in name: nn.init.constant_(param, 0.0) elif 'weight_ih' in name: nn.init.kaiming_normal_(param) elif 'weight_hh' in name: nn.init.orthogonal_(param) self.gru = nn.GRU(hidden_size*2, hidden_size, bidirectional=True, batch_first=True) for name, param in self.gru.named_parameters() : if 'bias' in name: nn.init.constant_(param, 0.0) elif 'weight_ih' in name: nn.init.kaiming_normal_(param) elif 'weight_hh' in name: nn.init.orthogonal_(param) self.linear = nn.Linear(hidden_size*6 + features.shape[1], lin_size) self.relu = nn.ReLU() self.dropout = nn.Dropout(drp) self.out = nn.Linear(lin_size, 1) def forward(self, x): h_embedding = self.embedding(x[0]) embeddings = h_embedding.unsqueeze(2) embeddings = embeddings.permute(0, 3, 2, 1) embeddings = self.embedding_dropout(embeddings) embeddings = embeddings.permute(0, 3, 2, 1) h_embedding = embeddings.squeeze(2) h_lstm, _ = self.lstm(h_embedding) h_gru, hh_gru = self.gru(h_lstm) hh_gru = hh_gru.view(-1, 2*self.hidden_size) avg_pool = torch.mean(h_gru, 1) max_pool, _ = torch.max(h_gru, 1) f = torch.tensor(x[1], dtype=torch.float ).cuda() conc = torch.cat(( hh_gru, avg_pool, max_pool,f), 1) conc = self.relu(self.linear(conc)) conc = self.dropout(conc) out = self.out(conc) return out<normalization>
df_train, df_test = [x.fillna('S')for x in [df_train, df_test]] lb_encoder.fit(df_train['Embarked']) df_train, df_test = [x.join(pd.DataFrame(lb_encoder.transform(x['Embarked']), columns=lb_encoder.classes_)) for x in [df_train, df_test]] df_train, df_test = [x.drop('Embarked', axis=1)for x in [df_train, df_test]]
Titanic - Machine Learning from Disaster
5,515,762
def sigmoid(x): return 1 /(1 + np.exp(-x)) seed_everything() x_test_cuda = torch.tensor(x_test, dtype=torch.long ).cuda() test = torch.utils.data.TensorDataset(x_test_cuda) test_loader = torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=False )<compute_train_metric>
for ds in [df_train, df_test]: ds['Title'] = [x.split(',')[1].split('.')[0].strip() for x in ds['Name']] ds['Title'] = ds['Title'].replace(to_replace=['Mlle', 'Ms'], value='Miss') ds['Title'] = ds['Title'].replace(to_replace='Mme', value='Mrs') ds['Title'] = ds['Title'].apply(lambda i: i if i in ['Mr', 'Mrs', 'Miss', 'Master'] else 'Rare') print(df_train['Title'].value_counts() + df_test['Title'].value_counts()) print(df_train[['Title', 'Survived']].groupby('Title', as_index=True ).mean().sort_values('Survived', ascending=False)) lb_encoder.fit(df_train['Title']) df_train, df_test = [x.join(pd.DataFrame(lb_encoder.transform(x['Title']), columns=lb_encoder.classes_)) for x in [df_train, df_test]] df_train, df_test = [x.drop(['Name', 'Title'], axis=1)for x in [df_train, df_test]]
Titanic - Machine Learning from Disaster
5,515,762
train_preds , test_preds = pytorch_model_run_cv(x_train,y_train,features,x_test,Alex_NeuralNet_Meta(70,16, embedding_matrix=embedding_matrix), feats = True )<compute_test_metric>
pclass_cols = ['UpperClass', 'MiddleClass', 'LowerClass'] lb_encoder.fit(df_train['Pclass']) df_test = df_test.join(pd.DataFrame(lb_encoder.transform(df_test['Pclass']), columns=pclass_cols)) df_train = df_train.join(pd.DataFrame(lb_encoder.transform(df_train['Pclass']), columns=pclass_cols)) df_test.drop('Pclass', axis=1, inplace=True) df_train.drop('Pclass', axis=1, inplace=True )
Titanic - Machine Learning from Disaster
5,515,762
def bestThresshold(y_train,train_preds): tmp = [0,0,0] delta = 0 for tmp[0] in tqdm(np.arange(0.1, 0.501, 0.01)) : tmp[1] = f1_score(y_train, np.array(train_preds)>tmp[0]) if tmp[1] > tmp[2]: delta = tmp[0] tmp[2] = tmp[1] print('best threshold is {:.4f} with F1 score: {:.4f}'.format(delta, tmp[2])) return delta , tmp[2] delta, _ = bestThresshold(y_train,train_preds )<save_to_csv>
for ds in [df_train, df_test]: df_family =(ds['Parch'] + ds['SibSp'] + 1 ).astype(int) ds.drop(['Parch', 'SibSp'], axis=1, inplace=True) ds['IsAlone'] = df_family.map(lambda x: 1 if x == 1 else 0) ds['SmallFamily'] = df_family.map(lambda x: 1 if 2 <= x <= 4 else 0) ds['LargeFamily'] = df_family.map(lambda x: 1 if x >= 5 else 0 )
Titanic - Machine Learning from Disaster
5,515,762
if debug: df_test = pd.read_csv(".. /input/test.csv")[:20000] else: df_test = pd.read_csv(".. /input/test.csv") submission = df_test[['qid']].copy() submission['prediction'] =(test_preds > delta ).astype(int) submission.to_csv('submission.csv', index=False )<set_options>
for ds in [df_train, df_test]: ds['IsFemale'] =(ds['Sex'] == 'female' ).astype(int) ds.drop('Sex', axis=1, inplace=True) g = sns.FacetGrid(df_train, col="IsFemale" ).map(plt.hist, "Survived" )
Titanic - Machine Learning from Disaster
5,515,762
def seed_torch(seed=1029): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True embed_size = 300 max_features = 95000 maxlen = 72 batch_size = 1536 train_epochs = 8 SEED = 1029 puncts = [',', '.', '"', ':', ')', '(', '-', '!', '?', '|', ';', "'", '$', '&', '/', '[', ']', '>', '%', '=', ' '·', '_', '{', '}', '©', '^', '®', '`', '<', '→', '°', '€', '™', '›', '♥', '←', '×', '§', '″', '′', 'Â', '█', '½', 'à', '…', '“', '★', '”', '–', '●', 'â', '►', '−', '¢', '²', '¬', '░', '¶', '↑', '±', '¿', '▾', '═', '¦', '║', '―', '¥', '▓', '—', '‹', '─', '▒', ':', '¼', '⊕', '▼', '▪', '†', '■', '’', '▀', '¨', '▄', '♫', '☆', 'é', '¯', '♦', '¤', '▲', 'è', '¸', '¾', 'Ã', '⋅', '‘', '∞', '∙', ')', '↓', '、', '│', '(', '»', ',', '♪', '╩', '╚', '³', '・', '╦', '╣', '╔', '╗', '▬', '❤', 'ï', 'Ø', '¹', '≤', '‡', '√', ] def clean_text(x): x = str(x) for punct in puncts: x = x.replace(punct, f' {punct} ') return x def clean_numbers(x): x = re.sub('[0-9]{5,}', ' x = re.sub('[0-9]{4}', ' x = re.sub('[0-9]{3}', ' x = re.sub('[0-9]{2}', ' return x mispell_dict = {"aren't" : "are not", "can't" : "cannot", "couldn't" : "could not", "didn't" : "did not", "doesn't" : "does not", "don't" : "do not", "hadn't" : "had not", "hasn't" : "has not", "haven't" : "have not", "he'd" : "he would", "he'll" : "he will", "he's" : "he is", "i'd" : "I would", "i'd" : "I had", "i'll" : "I will", "i'm" : "I am", "isn't" : "is not", "it's" : "it is", "it'll":"it will", "i've" : "I have", "let's" : "let us", "mightn't" : "might not", "mustn't" : "must not", "shan't" : "shall not", "she'd" : "she would", "she'll" : "she will", "she's" : "she is", "shouldn't" : "should not", "that's" : "that is", "there's" : "there is", "they'd" : "they would", "they'll" : "they will", "they're" : "they are", "they've" : "they have", "we'd" : "we would", "we're" : "we are", "weren't" : "were not", "we've" : "we have", "what'll" : "what will", "what're" : "what are", "what's" : "what is", "what've" : "what have", "where's" : "where is", "who'd" : "who would", "who'll" : "who will", "who're" : "who are", "who's" : "who is", "who've" : "who have", "won't" : "will not", "wouldn't" : "would not", "you'd" : "you would", "you'll" : "you will", "you're" : "you are", "you've" : "you have", "'re": " are", "wasn't": "was not", "we'll":" will", "didn't": "did not", "tryin'":"trying"} def _get_mispell(mispell_dict): mispell_re = re.compile('(%s)' % '|'.join(mispell_dict.keys())) return mispell_dict, mispell_re mispellings, mispellings_re = _get_mispell(mispell_dict) def replace_typical_misspell(text): def replace(match): return mispellings[match.group(0)] return mispellings_re.sub(replace, text) def load_and_prec() : train_df = pd.read_csv(".. /input/train.csv") test_df = pd.read_csv(".. /input/test.csv") print("Train shape : ",train_df.shape) print("Test shape : ",test_df.shape) train_df["question_text"] = train_df["question_text"].progress_apply(lambda x: x.lower()) test_df["question_text"] = test_df["question_text"].progress_apply(lambda x: x.lower()) train_df["question_text"] = train_df["question_text"].progress_apply(lambda x: clean_text(x)) test_df["question_text"] = test_df["question_text"].progress_apply(lambda x: clean_text(x)) train_df["question_text"] = train_df["question_text"].progress_apply(lambda x: clean_numbers(x)) test_df["question_text"] = test_df["question_text"].progress_apply(lambda x: clean_numbers(x)) train_df["question_text"] = train_df["question_text"].progress_apply(lambda x: replace_typical_misspell(x)) test_df["question_text"] = test_df["question_text"].progress_apply(lambda x: replace_typical_misspell(x)) train_X = train_df["question_text"].fillna("_ test_X = test_df["question_text"].fillna("_ tokenizer = Tokenizer(num_words=max_features) tokenizer.fit_on_texts(list(train_X)) train_X = tokenizer.texts_to_sequences(train_X) test_X = tokenizer.texts_to_sequences(test_X) train_X = pad_sequences(train_X, maxlen=maxlen) test_X = pad_sequences(test_X, maxlen=maxlen) train_y = train_df['target'].values np.random.seed(SEED) trn_idx = np.random.permutation(len(train_X)) train_X = train_X[trn_idx] train_y = train_y[trn_idx] return train_X, test_X, train_y, tokenizer.word_index def load_glove(word_index): EMBEDDING_FILE = '.. /input/embeddings/glove.840B.300d/glove.840B.300d.txt' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE)) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = all_embs.mean() , all_embs.std() embed_size = all_embs.shape[1] nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix def load_para(word_index): EMBEDDING_FILE = '.. /input/embeddings/paragram_300_sl999/paragram_300_sl999.txt' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE, encoding="utf8", errors='ignore')if len(o)>100) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = all_embs.mean() , all_embs.std() embed_size = all_embs.shape[1] nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix tqdm.pandas() start_time = time.time() train_X, test_X, train_y, word_index = load_and_prec() embedding_matrix_1 = load_glove(word_index) embedding_matrix_2 = load_para(word_index) total_time =(time.time() - start_time)/ 60 print("Took {:.2f} minutes".format(total_time)) embedding_matrix = np.mean([embedding_matrix_1, embedding_matrix_2], axis=0) print(np.shape(embedding_matrix)) del embedding_matrix_1, embedding_matrix_2 gc.collect()<normalization>
test_passenger_ids = np.array(df_test['PassengerId']) df_test.drop(['PassengerId', 'Ticket'], axis=1, inplace=True) df_train.drop(['PassengerId', 'Ticket'], axis=1, inplace=True )
Titanic - Machine Learning from Disaster
5,515,762
class Attention(nn.Module): def __init__(self, feature_dim, step_dim, bias=True, **kwargs): super(Attention, self ).__init__(**kwargs) self.supports_masking = True self.bias = bias self.feature_dim = feature_dim self.step_dim = step_dim self.features_dim = 0 weight = torch.zeros(feature_dim, 1) nn.init.xavier_uniform_(weight) self.weight = nn.Parameter(weight) if bias: self.b = nn.Parameter(torch.zeros(step_dim)) def forward(self, x, mask=None): feature_dim = self.feature_dim step_dim = self.step_dim eij = torch.mm( x.contiguous().view(-1, feature_dim), self.weight ).view(-1, step_dim) if self.bias: eij = eij + self.b eij = torch.tanh(eij) a = torch.exp(eij) if mask is not None: a = a * mask a = a / torch.sum(a, 1, keepdim=True)+ 1e-10 weighted_input = x * torch.unsqueeze(a, -1) return torch.sum(weighted_input, 1) class NeuralNet(nn.Module): def __init__(self): super(NeuralNet, self ).__init__() hidden_size = 60 self.embedding = nn.Embedding(max_features, embed_size) self.embedding.weight = nn.Parameter(torch.tensor(embedding_matrix, dtype=torch.float32)) self.embedding.weight.requires_grad = False self.embedding_dropout = nn.Dropout2d(0.1) self.lstm = nn.LSTM(embed_size, hidden_size, bidirectional=True, batch_first=True) self.gru = nn.GRU(hidden_size*2, hidden_size, bidirectional=True, batch_first=True) self.lstm_attention = Attention(hidden_size*2, maxlen) self.gru_attention = Attention(hidden_size*2, maxlen) self.linear = nn.Linear(480, 16) self.relu = nn.ReLU() self.dropout = nn.Dropout(0.1) self.out = nn.Linear(16, 1) def forward(self, x): h_embedding = self.embedding(x) h_embedding = torch.squeeze(self.embedding_dropout(torch.unsqueeze(h_embedding, 0))) h_lstm, _ = self.lstm(h_embedding) h_gru, _ = self.gru(h_lstm) h_lstm_atten = self.lstm_attention(h_lstm) h_gru_atten = self.gru_attention(h_gru) avg_pool = torch.mean(h_gru, 1) max_pool, _ = torch.max(h_gru, 1) conc = torch.cat(( h_lstm_atten, h_gru_atten, avg_pool, max_pool), 1) conc = self.relu(self.linear(conc)) conc = self.dropout(conc) out = self.out(conc) return out splits = list(StratifiedKFold(n_splits=5, shuffle=True, random_state=SEED ).split(train_X, train_y)) def sigmoid(x): return 1 /(1 + np.exp(-x)) def threshold_search(y_true, y_proba): best_threshold = 0 best_score = 0 for threshold in tqdm([i * 0.01 for i in range(100)]): score = f1_score(y_true=y_true, y_pred=y_proba > threshold) if score > best_score: best_threshold = threshold best_score = score search_result = {'threshold': best_threshold, 'f1': best_score} return search_result train_preds = np.zeros(( len(train_X))) test_preds = np.zeros(( len(test_X))) seed_torch(SEED) x_test_cuda = torch.tensor(test_X, dtype=torch.long ).cuda() test = torch.utils.data.TensorDataset(x_test_cuda) test_loader = torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=False) for i,(train_idx, valid_idx)in enumerate(splits): x_train_fold = torch.tensor(train_X[train_idx], dtype=torch.long ).cuda() y_train_fold = torch.tensor(train_y[train_idx, np.newaxis], dtype=torch.float32 ).cuda() x_val_fold = torch.tensor(train_X[valid_idx], dtype=torch.long ).cuda() y_val_fold = torch.tensor(train_y[valid_idx, np.newaxis], dtype=torch.float32 ).cuda() model = NeuralNet() model.cuda() loss_fn = torch.nn.BCEWithLogitsLoss(reduction="sum") optimizer = torch.optim.Adam(model.parameters()) train = torch.utils.data.TensorDataset(x_train_fold, y_train_fold) valid = torch.utils.data.TensorDataset(x_val_fold, y_val_fold) train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid, batch_size=batch_size, shuffle=False) print(f'Fold {i + 1}') for epoch in range(train_epochs): start_time = time.time() model.train() avg_loss = 0. for x_batch, y_batch in tqdm(train_loader, disable=True): y_pred = model(x_batch) loss = loss_fn(y_pred, y_batch) optimizer.zero_grad() loss.backward() optimizer.step() avg_loss += loss.item() / len(train_loader) model.eval() valid_preds_fold = np.zeros(( x_val_fold.size(0))) test_preds_fold = np.zeros(len(test_X)) avg_val_loss = 0. for i,(x_batch, y_batch)in enumerate(valid_loader): y_pred = model(x_batch ).detach() avg_val_loss += loss_fn(y_pred, y_batch ).item() / len(valid_loader) valid_preds_fold[i * batch_size:(i+1)* batch_size] = sigmoid(y_pred.cpu().numpy())[:, 0] elapsed_time = time.time() - start_time print('Epoch {}/{} \t loss={:.4f} \t val_loss={:.4f} \t time={:.2f}s'.format( epoch + 1, train_epochs, avg_loss, avg_val_loss, elapsed_time)) for i,(x_batch,)in enumerate(test_loader): y_pred = model(x_batch ).detach() test_preds_fold[i * batch_size:(i+1)* batch_size] = sigmoid(y_pred.cpu().numpy())[:, 0] train_preds[valid_idx] = valid_preds_fold test_preds += test_preds_fold / len(splits )<compute_test_metric>
X_train = df_train.loc[:, df_train.columns != 'Survived'] X_test = np.array(df_test) y_train = np.array(df_train['Survived']) print(f"Train features: {X_train.shape} Train labels: {X_test.shape} Testing features: {y_train.shape}" )
Titanic - Machine Learning from Disaster
5,515,762
search_result = threshold_search(train_y, train_preds )<save_to_csv>
param_grid = {'n_estimators': [200, 300, 500, 600, 700, 800], 'random_state': [42]} grid_search = GridSearchCV(estimator=RandomForestClassifier() , param_grid=param_grid, cv=4, n_jobs=-1, verbose=2) grid_search.fit(X_train, y_train) print(f"Best score: {round(grid_search.best_score_, 4)} " f"Mean score: {round(grid_search.cv_results_['mean_test_score'].mean() , 4)} " f"Top 5 scores:{sorted(grid_search.cv_results_['mean_test_score'], reverse=True)[:5]} " f"Best params: {grid_search.best_params_}") best_estimator = grid_search.best_estimator_ best_estimator.fit(X_train, y_train) y_test = best_estimator.predict(X_test) df_submission = pd.DataFrame({'PassengerId': test_passenger_ids, 'Survived': y_test}) df_submission.to_csv(r'submission.csv', index=False)
Titanic - Machine Learning from Disaster
2,302,192
sub = pd.read_csv('.. /input/sample_submission.csv') sub.prediction = test_preds > search_result['threshold'] sub.to_csv("submission.csv", index=False )<import_modules>
pd.set_option("display.max_rows",200) pd.set_option("display.max_columns",200) %matplotlib inline sns.set_style('whitegrid')
Titanic - Machine Learning from Disaster
2,302,192
tqdm.pandas() <load_from_csv>
df_train = pd.read_csv('.. /input/train.csv') df_test = pd.read_csv('.. /input/test.csv') print(len(df_train),len(df_test))
Titanic - Machine Learning from Disaster
2,302,192
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv") print(f"Train shape: {train.shape}") print(f"Test shape: {test.shape}") train.sample()<split>
df_test.head()
Titanic - Machine Learning from Disaster
2,302,192
EMBEDDING_FILE = '.. /input/embeddings/glove.840B.300d/glove.840B.300d.txt' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE))<feature_engineering>
len(df_train[df_train['Name'].str.contains('Dr.')] )
Titanic - Machine Learning from Disaster
2,302,192
def check_coverage(vocab,embeddings_index): a, oov, k, i = {}, {}, 0, 0 for word in vocab: try: a[word] = embeddings_index[word] k += vocab[word] except: oov[word] = vocab[word] i += vocab[word] pass print(f'Found embeddings for {(len(a)/ len(vocab)) :.2%} of vocab') print(f'Found embeddings for {(k /(k + i)) :.2%} of all text') sorted_x = sorted(oov.items() , key=(lambda x: x[1]), reverse=True) return sorted_x def get_vocab(question_series): sentences = question_series.str.split().values words = [item for sublist in sentences for item in sublist] return dict(Counter(words))<split>
len(df_train[df_train['Name'].str.contains('Sir.')] )
Titanic - Machine Learning from Disaster
2,302,192
vocab = get_vocab(train["question_text"]) out_of_vocab = check_coverage(vocab, embeddings_index) out_of_vocab[:10]<string_transform>
len(df_train[df_train['Cabin'].isnull() ])
Titanic - Machine Learning from Disaster
2,302,192
punct = set('?!.," embed_punct = punct & set(embeddings_index.keys()) def clean_punctuation(txt): for p in "/-": txt = txt.replace(p, ' ') for p in "'`‘": txt = txt.replace(p, '') for p in punct: txt = txt.replace(p, f' {p} ' if p in embed_punct else ' _punct_ ') return txt<feature_engineering>
df_train.drop(['Name','PassengerId','Ticket','Cabin'],axis=1,inplace=True) df_train.head()
Titanic - Machine Learning from Disaster
2,302,192
train["question_text"] = train["question_text"].map(lambda x: clean_punctuation(x)).str.replace('\d+', ' test["question_text"] = test["question_text"].map(lambda x: clean_punctuation(x)).str.replace('\d+', ' vocab = get_vocab(train["question_text"]) out_of_vocab = check_coverage(vocab, embeddings_index )<string_transform>
df_train[df_train['Embarked'].isnull() ]
Titanic - Machine Learning from Disaster
2,302,192
train, validation = train_test_split(train, test_size=0.08, random_state=20181224) embed_size = 300 vocab_size = 95000 maxlen = 100 train_X = train["question_text"].fillna("_ val_X = validation["question_text"].fillna("_ test_X = test["question_text"].fillna("_ tokenizer = Tokenizer(num_words=vocab_size, filters='', lower=False) tokenizer.fit_on_texts(list(train_X)) train_X = tokenizer.texts_to_sequences(train_X) val_X = tokenizer.texts_to_sequences(val_X) test_X = tokenizer.texts_to_sequences(test_X) train_X = pad_sequences(train_X, maxlen=maxlen) val_X = pad_sequences(val_X, maxlen=maxlen) test_X = pad_sequences(test_X, maxlen=maxlen) train_y = train['target'].values val_y = validation['target'].values<define_variables>
df_train['Embarked'].value_counts()
Titanic - Machine Learning from Disaster
2,302,192
all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = all_embs.mean() , all_embs.std() embed_size = all_embs.shape[1]<feature_engineering>
df_train['Embarked'].fillna('S',inplace=True) df_train[df_train['Embarked'].isnull() ]
Titanic - Machine Learning from Disaster
2,302,192
word_index = tokenizer.word_index nb_words = min(vocab_size, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) num_missed = 0 for word, i in word_index.items() : if i >= vocab_size: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector else: num_missed += 1 print(num_missed )<prepare_x_and_y>
df_train = pd.concat([df_train,pd.get_dummies(df_train['Embarked'],prefix='Embarked')],axis=1) df_train.drop('Embarked',axis=1,inplace=True) df_train.head()
Titanic - Machine Learning from Disaster
2,302,192
hidden_layer_size = 100 BATCH_SIZE = 64 tf.reset_default_graph() X = tf.placeholder(tf.int32, [None, maxlen], name='X') Y = tf.placeholder(tf.float32, [None], name='Y') batch_size = tf.placeholder(tf.int64 )<define_variables>
df_train[df_train['Fare'].isnull() ]
Titanic - Machine Learning from Disaster
2,302,192
dataset = tf.data.Dataset.from_tensor_slices(( X, Y)).shuffle(buffer_size=1000 ).batch(batch_size ).repeat() test_dataset = tf.data.Dataset.from_tensor_slices(( X, Y)).batch(batch_size) iterator = tf.data.Iterator.from_structure(dataset.output_types, dataset.output_shapes) train_init_op = iterator.make_initializer(dataset) test_init_op = iterator.make_initializer(test_dataset) questions, labels = iterator.get_next()<categorify>
avg_fare = pd.DataFrame([fare_notsurv.mean() ,fare_surv.mean() ]) std_fare = pd.DataFrame([fare_notsurv.std() ,fare_surv.std() ]) print("Mean fare for not survived is {} and survived is {}".format(fare_notsurv.mean() ,\ fare_surv.mean()))
Titanic - Machine Learning from Disaster
2,302,192
embeddings = tf.get_variable(name="embeddings", shape=embedding_matrix.shape, initializer=tf.constant_initializer(np.array(embedding_matrix)) , trainable=False) embed = tf.nn.embedding_lookup(embeddings, questions )<choose_model_class>
df_train[df_train['Age'].isnull() ]
Titanic - Machine Learning from Disaster
2,302,192
lstm_cell= tf.nn.rnn_cell.LSTMCell(hidden_layer_size) _, final_state = tf.nn.dynamic_rnn(lstm_cell, embed, dtype=tf.float32) last_layer = tf.layers.dense(final_state.h, 1) prediction = tf.nn.sigmoid(last_layer) prediction = tf.squeeze(prediction, [1] )<choose_model_class>
avg_age = pd.DataFrame([age_notsurv.mean() ,age_surv.mean() ]) std_age = pd.DataFrame([age_notsurv.std() ,age_surv.std() ]) print("Mean age for not survived is {} and survived is {}".format(age_notsurv.mean() ,\ age_surv.mean()))
Titanic - Machine Learning from Disaster
2,302,192
learning_rate=0.001 loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=tf.squeeze(last_layer), labels=labels) loss = tf.reduce_mean(loss) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate ).minimize(loss )<compute_train_metric>
df_train['Family'] = df_train['Parch'] + df_train['SibSp'] df_train.drop(['Parch','SibSp'],axis=1,inplace=True )
Titanic - Machine Learning from Disaster
2,302,192
with tf.name_scope('metrics'): F1, f1_update = tf.contrib.metrics.f1_score(labels=labels, predictions=prediction, name='my_metric') running_vars = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope="my_metric") reset_op = tf.variables_initializer(var_list=running_vars )<init_hyperparams>
df_train['Family'].loc[df_train['Family']>1]=1 df_train['Family'].loc[df_train['Family']==0]=0 df_train['Family'].value_counts()
Titanic - Machine Learning from Disaster
2,302,192
num_epochs = 10 seed = 3 sess = tf.Session() sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) costs, f1 = [], []<define_variables>
def get_person(passenger): age,sex=passenger return 'child' if age < 12 else sex df_train['Person'] = df_train[['Age','Sex']].apply(get_person,axis=1) df_train.drop('Sex',axis=1,inplace=True )
Titanic - Machine Learning from Disaster
2,302,192
start = time.time() end = 0 max_time = 6600 sess.run(train_init_op, feed_dict={X:train_X, Y:train_y, batch_size:BATCH_SIZE}) num_iter = 1000 num_batches = int(train_X.shape[0] / BATCH_SIZE) for epoch in range(1, num_epochs+1): seed += seed tf.set_random_seed(seed) iter_cost = 0. prev_iter = 0. for i in range(num_batches): _ , batch_loss, _ = sess.run([optimizer, loss, f1_update]) iter_cost += batch_loss end = time.time() if(end-start > max_time): break if(i % num_iter == 0 and i > 0): iter_cost /=(i-prev_iter) prev_iter = i cur_f1 = sess.run(F1) sess.run(reset_op) f1.append(cur_f1) costs.append(iter_cost) print(f"Epoch {epoch} Iteration {i:5} cost: {iter_cost:.10f} f1: {cur_f1:.10f} time: {end-start:4.4f}") batch_cost = 0 .<train_model>
df_train = pd.concat([df_train,pd.get_dummies(df_train['Person'],prefix='Person')],axis=1) df_train.drop(['Person'],axis=1,inplace=True )
Titanic - Machine Learning from Disaster
2,302,192
sz = 90 tf.set_random_seed(2018) sess.run(test_init_op, feed_dict={X: val_X, Y: val_y, batch_size: sz}) val_pred = np.concatenate([sess.run(prediction)for _ in range(int(val_X.shape[0]/sz)) ] )<compute_test_metric>
df_test.drop(['Name','Ticket','Cabin'],axis=1,inplace=True) df_test['Embarked'].fillna('S',inplace=True) df_test = pd.concat([df_test,pd.get_dummies(df_test['Embarked'],prefix='Embarked')],axis=1) df_test.drop('Embarked',axis=1,inplace=True) df_test['Fare'].fillna(df_test['Fare'].mean() ,inplace=True) df_test['Age'].fillna(df_test['Age'].dropna().median() ,inplace=True) df_test['Family'] = df_test['Parch'] + df_test['SibSp'] df_test.drop(['Parch','SibSp'],axis=1,inplace=True) df_test['Family'].loc[df_test['Family']>1]=1 df_test['Family'].loc[df_test['Family']==0]=0 def get_person(passenger): age,sex=passenger return 'child' if age < 12 else sex df_test['Person'] = df_test[['Age','Sex']].apply(get_person,axis=1) df_test.drop('Sex',axis=1,inplace=True) df_test = pd.concat([df_test,pd.get_dummies(df_test['Person'],prefix='Person')],axis=1) df_test.drop(['Person'],axis=1,inplace=True) df_test.head()
Titanic - Machine Learning from Disaster
2,302,192
thresh = thresholds[np.argmax(scores)] print(f"Best Validation F1 Score is {max(scores):.4f} at threshold {thresh}" )<split>
features_train = df_train.drop(['Survived'],axis=1) target_train= df_train['Survived'] features_test = df_test.drop(['PassengerId'],axis=1 )
Titanic - Machine Learning from Disaster
2,302,192
sz=30 temp_y = val_y[:test_X.shape[0]] sub = test[['qid']] sess.run(test_init_op, feed_dict={X: test_X, Y: temp_y, batch_size:sz}) sub['prediction'] = np.concatenate([sess.run(prediction)for _ in range(int(test_X.shape[0]/sz)) ] )<save_to_csv>
train_x,test_x,train_y,test_y = train_test_split(features_train,target_train,test_size=0.2,random_state=42)
Titanic - Machine Learning from Disaster
2,302,192
sub['prediction'] =(sub['prediction'] > thresh ).astype(np.int16) sub.to_csv("submission.csv", index=False) sub.sample()<define_variables>
clf_nb = GaussianNB() clf_nb.fit(features_train,target_train) target_test_nb = clf_nb.predict(features_test )
Titanic - Machine Learning from Disaster
2,302,192
MAX_SEQUENCE_LENGTH = 60 MAX_WORDS = 45000 EMBEDDINGS_LOADED_DIMENSIONS = 300<load_from_csv>
df_test['Survived'] = target_test_nb df_test[['PassengerId','Survived']].to_csv('gaussnb-kaggle.csv',index=False )
Titanic - Machine Learning from Disaster
2,302,192
df_train = pd.read_csv(".. /input/train.csv") df_test = pd.read_csv(".. /input/test.csv" )<define_variables>
clf_nb.fit(train_x,train_y) pred_gnb_y = clf_nb.predict(test_x) print('Accuracy score of Gaussian NB is {}'.format(metrics.accuracy_score(pred_gnb_y,test_y)) )
Titanic - Machine Learning from Disaster
2,302,192
BATCH_SIZE = 512 Q_FRACTION = 1 questions = df_train.sample(frac=Q_FRACTION) question_texts = questions["question_text"].values question_targets = questions["target"].values test_texts = df_test["question_text"].fillna("_na_" ).values print(f"Working on {len(questions)} questions" )<load_pretrained>
svc = SVC(kernel='rbf',class_weight='balanced') param_grid_svm = {'C': [1, 5, 10, 50,100], 'gamma': [0.0001, 0.0005, 0.001, 0.005,0.01]} grid_svm = GridSearchCV(estimator=svc, param_grid=param_grid_svm) grid_svm.fit(train_x,train_y) grid_svm.best_params_
Titanic - Machine Learning from Disaster
2,302,192
def load_embeddings(file): embeddings = {} with open(file)as f: def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings = dict(get_coefs(*line.split(" ")) for line in f) print('Found %s word vectors.' % len(embeddings)) return embeddings %time pretrained_embeddings = load_embeddings(".. /input/embeddings/glove.840B.300d/glove.840B.300d.txt" )<string_transform>
clf_svm = grid_svm.best_estimator_ clf_svm.fit(features_train,target_train) target_test_svm = clf_svm.predict(features_test )
Titanic - Machine Learning from Disaster
2,302,192
tokenizer = Tokenizer(num_words=MAX_WORDS) %time tokenizer.fit_on_texts(list(df_train["question_text"].values))<categorify>
df_test['Survived'] = target_test_svm df_test[['PassengerId','Survived']].to_csv('svm-kaggle.csv',index=False )
Titanic - Machine Learning from Disaster
2,302,192
def create_embedding_weights(tokenizer, embeddings, dimensions): not_embedded = defaultdict(int) word_index = tokenizer.word_index words_count = min(len(word_index), MAX_WORDS) embeddings_matrix = np.zeros(( words_count, dimensions)) for word, i in word_index.items() : if i >= MAX_WORDS: continue embedding_vector = embeddings.get(word) if embedding_vector is not None: embeddings_matrix[i] = embedding_vector return embeddings_matrix pretrained_emb_weights = create_embedding_weights(tokenizer, pretrained_embeddings, EMBEDDINGS_LOADED_DIMENSIONS )<compute_train_metric>
clf_svm.fit(train_x,train_y) pred_svm_y = clf_svm.predict(test_x) print('Accuracy score of SVM is {}'.format(metrics.accuracy_score(pred_svm_y,test_y)) )
Titanic - Machine Learning from Disaster
2,302,192
THRESHOLD = 0.35 class EpochMetricsCallback(keras.callbacks.Callback): def on_train_begin(self, logs={}): self.f1s = [] self.precisions = [] self.recalls = [] def on_epoch_end(self, epoch, logs={}): predictions = self.model.predict(self.validation_data[0]) predictions =(predictions > THRESHOLD ).astype(int) predictions = np.asarray(predictions) targets = self.validation_data[1] f1 = metrics.f1_score(targets, predictions) precision = metrics.precision_score(targets, predictions) recall = metrics.recall_score(targets, predictions) print(" - F1 score: {0:.4f}, Precision: {1:.4f}, Recall: {2:.4f}" .format(f1, precision, recall)) self.f1s.append(f1) self.precisions.append(precision) self.recalls.append(recall) return def display_model_history(history): data = pd.DataFrame(data={'Train': history.history['loss'], 'Test': history.history['val_loss']}) ax = sns.lineplot(data=data, palette="pastel", linewidth=2.5, dashes=False) ax.set(xlabel='Epoch', ylabel='Loss', title='Loss') sns.despine() plt.show() def display_model_epoch_metrics(epoch_callback): data = pd.DataFrame(data = { 'F1': epoch_callback.f1s, 'Precision': epoch_callback.precisions, 'Recall': epoch_callback.recalls}) sns.lineplot(data=data, palette='muted', linewidth=2.5, dashes=False) sns.despine() plt.show() <prepare_x_and_y>
rf = RandomForestClassifier(criterion='entropy') param_grid_rf = {'n_estimators':[10,100,250,500,1000], 'max_features':['sqrt','log2'],'min_samples_split':[2,5,10,50,100]} grid_rf = GridSearchCV(estimator=rf,param_grid=param_grid_rf) grid_rf.fit(train_x,train_y) grid_rf.best_params_
Titanic - Machine Learning from Disaster
2,302,192
%time X = pad_sequences(tokenizer.texts_to_sequences(question_texts), maxlen=MAX_SEQUENCE_LENGTH) %time Y = question_targets %time test_word_tokens = pad_sequences(tokenizer.texts_to_sequences(test_texts), maxlen=MAX_SEQUENCE_LENGTH )<choose_model_class>
clf_rf = grid_rf.best_estimator_ clf_rf.fit(features_train,target_train) target_test_rf = clf_rf.predict(features_test )
Titanic - Machine Learning from Disaster
2,302,192
def make_model() : tokenized_input = Input(shape=(MAX_SEQUENCE_LENGTH,), name="tokenized_input") embedding = Embedding(MAX_WORDS, EMBEDDINGS_LOADED_DIMENSIONS, weights=[pretrained_emb_weights], trainable=False )(tokenized_input) d0 = SpatialDropout1D(0.1 )(embedding) lstm = Bidirectional(LSTM(128, return_sequences=True))(d0) lstm = Bidirectional(LSTM(64, return_sequences=False))(lstm) d1 = Dropout(0.15 )(lstm) d1 = Dense(64 )(d1) b = BatchNormalization()(d1) out = Dense(1, activation='sigmoid' )(b) model = Model(inputs=[tokenized_input], outputs=out) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() return model<train_model>
df_test['Survived'] = target_test_rf df_test[['PassengerId','Survived']].to_csv('rf-kaggle.csv',index=False )
Titanic - Machine Learning from Disaster
2,302,192
train_X, test_X, train_Y, test_Y = train_test_split(X, Y, test_size=0.01) epoch_callback = EpochMetricsCallback() model = make_model() history = model.fit(x=train_X, y=train_Y, validation_split=0.015, batch_size=BATCH_SIZE, epochs=4, verbose=2, callbacks=[epoch_callback] )<save_to_csv>
clf_rf.fit(train_x,train_y) pred_rf_y = clf_rf.predict(test_x) print('Accuracy score of RF is {}'.format(metrics.accuracy_score(pred_rf_y,test_y)) )
Titanic - Machine Learning from Disaster
2,302,192
test_word_tokens = pad_sequences(tokenizer.texts_to_sequences(test_texts), maxlen=MAX_SEQUENCE_LENGTH) kaggle_predictions =(model.predict([test_word_tokens], batch_size=1024, verbose=2)) df_out = pd.DataFrame({"qid":df_test["qid"].values}) df_out['prediction'] =(kaggle_predictions > THRESHOLD ).astype(int) df_out.to_csv("submission.csv", index=False )<categorify>
target_avg = 0.2 * target_test_nb + 0.3 * target_test_svm + 0.5 * target_test_rf df_test['Survived'] = target_test_rf df_test[['PassengerId','Survived']].to_csv('avg-kaggle.csv',index=False )
Titanic - Machine Learning from Disaster
2,213,168
dim = 300 num_words = 50000 max_len = 100 print('Fiting tokenizer') tokenizer = Tokenizer(num_words=num_words) tokenizer.fit_on_texts(df['question_text']) print('text to sequence') x_train = tokenizer.texts_to_sequences(df['question_text']) print('pad sequence') x_train = pad_sequences(x_train,maxlen=max_len) y_train = df['target'].values print(x_train.shape) print(y_train.shape )<compute_train_metric>
train_dataset = pd.read_csv('.. /input/train.csv') test_dataset = pd.read_csv('.. /input/test.csv') train_dataset.describe()
Titanic - Machine Learning from Disaster
2,213,168
print('Glove...') def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open('.. /input/embeddings/glove.840B.300d/glove.840B.300d.txt')) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = all_embs.mean() , all_embs.std() print(len(all_embs)) word_index = tokenizer.word_index embedding_matrix_glov = np.random.normal(emb_mean, emb_std,(num_words, dim)) count=0 for word, i in word_index.items() : if i >= num_words: break embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix_glov[i] = embedding_vector else: count += 1 print('embedding matrix size:',embedding_matrix_glov.shape) print('Number of words not in vocab:',count) del embeddings_index,all_embs gc.collect()<statistical_test>
y_train = train_dataset.iloc[:, 1].values X_train = train_dataset.iloc[:, [0, 2, 4, 5, 6, 7, 11]].values X_test = test_dataset.iloc[:, [0, 1, 3, 4, 5, 6, 10]].values m = X_train.shape[0] family_size_column = np.zeros(( m, 1)) X_train = np.append(X_train, family_size_column, axis=1) X_train[:, 7] = 1 + X_train[:, 4] + X_train[:, 5] X_train[:, 1] = X_train[:, 1] * X_train[:, 7] X_train = np.delete(X_train, [4, 5, 7], 1) m = X_test.shape[0] family_size_column = np.zeros(( m, 1)) X_test = np.append(X_test, family_size_column, axis=1) X_test[:, 7] = 1 + X_test[:, 4] + X_test[:, 5] X_test[:, 1] = X_test[:, 1] * X_test[:, 7] X_test = np.delete(X_test, [4, 5, 7], 1) m = X_test.shape[0] pred_column = np.zeros(( m, 1)) result = test_dataset.iloc[:, [0]].values result = np.append(result, pred_column, axis=1) result = result.astype(int) result
Titanic - Machine Learning from Disaster
2,213,168
print('Para...') EMBEDDING_FILE = '.. /input/embeddings/paragram_300_sl999/paragram_300_sl999.txt' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE, encoding="utf8", errors='ignore')if len(o)>100) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = all_embs.mean() , all_embs.std() print(len(all_embs)) word_index = tokenizer.word_index embedding_matrix_para = np.random.normal(emb_mean, emb_std,(num_words, dim)) count=0 for word, i in word_index.items() : if i >= num_words: break embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix_para[i] = embedding_vector else: count += 1 print('embedding matrix size:',embedding_matrix_glov.shape) print('Number of words not in vocab:',count) del embeddings_index,all_embs gc.collect()<feature_engineering>
nan_age_train = train_dataset[train_dataset['Age'].isnull() ] nan_age_train.shape[0]
Titanic - Machine Learning from Disaster
2,213,168
matrixes = [embedding_matrix_glov,embedding_matrix_para] matrix = np.mean(matrixes,axis=0) del embedding_matrix_glov,embedding_matrix_para gc.collect()<load_from_csv>
nan_age_test = test_dataset[test_dataset['Age'].isnull() ] nan_age_test.shape[0]
Titanic - Machine Learning from Disaster
2,213,168
print('Loading test data...') df_final = pd.read_csv('.. /input/test.csv') df_final["question_text"].fillna("_ x_test=tokenizer.texts_to_sequences(df_final['question_text']) x_test = pad_sequences(x_test,maxlen=max_len) print('Test data loaded:',x_test.shape )<import_modules>
nan_embarked_train = train_dataset[train_dataset['Embarked'].isnull() ] nan_embarked_train.shape[0]
Titanic - Machine Learning from Disaster
2,213,168
class CyclicLR(Callback): def __init__(self, base_lr=0.001, max_lr=0.006, step_size=2000., mode='triangular', gamma=1., scale_fn=None, scale_mode='cycle'): super(CyclicLR, self ).__init__() self.base_lr = base_lr self.max_lr = max_lr self.step_size = step_size self.mode = mode self.gamma = gamma if scale_fn == None: if self.mode == 'triangular': self.scale_fn = lambda x: 1. self.scale_mode = 'cycle' elif self.mode == 'triangular2': self.scale_fn = lambda x: 1/(2.**(x-1)) self.scale_mode = 'cycle' elif self.mode == 'exp_range': self.scale_fn = lambda x: gamma**(x) self.scale_mode = 'iterations' else: self.scale_fn = scale_fn self.scale_mode = scale_mode self.clr_iterations = 0. self.trn_iterations = 0. self.history = {} self._reset() def _reset(self, new_base_lr=None, new_max_lr=None, new_step_size=None): if new_base_lr != None: self.base_lr = new_base_lr if new_max_lr != None: self.max_lr = new_max_lr if new_step_size != None: self.step_size = new_step_size self.clr_iterations = 0. def clr(self): cycle = np.floor(1+self.clr_iterations/(2*self.step_size)) x = np.abs(self.clr_iterations/self.step_size - 2*cycle + 1) if self.scale_mode == 'cycle': return self.base_lr +(self.max_lr-self.base_lr)*np.maximum(0,(1-x)) *self.scale_fn(cycle) else: return self.base_lr +(self.max_lr-self.base_lr)*np.maximum(0,(1-x)) *self.scale_fn(self.clr_iterations) def on_train_begin(self, logs={}): logs = logs or {} if self.clr_iterations == 0: K.set_value(self.model.optimizer.lr, self.base_lr) else: K.set_value(self.model.optimizer.lr, self.clr()) def on_batch_end(self, epoch, logs=None): logs = logs or {} self.trn_iterations += 1 self.clr_iterations += 1 self.history.setdefault('lr', [] ).append(K.get_value(self.model.optimizer.lr)) self.history.setdefault('iterations', [] ).append(self.trn_iterations) for k, v in logs.items() : self.history.setdefault(k, [] ).append(v) K.set_value(self.model.optimizer.lr, self.clr()) class Metrics(Callback): def on_train_begin(self, logs={}): self.val_f1s = [] def _threshold_search(self,y_true, y_proba): best_threshold = 0 best_score = 0 for threshold in [i * 0.01 for i in range(100)]: score = f1_score(y_true=y_true, y_pred=y_proba > threshold) if score > best_score: best_threshold = threshold best_score = score search_result = {'threshold': best_threshold, 'f1': best_score} return search_result def on_epoch_end(self, epoch, logs={}): y_val = self.validation_data[1] y_pred = self.model.predict(self.validation_data[0]) with warnings.catch_warnings() : warnings.simplefilter("ignore") search_result = self._threshold_search(y_val, y_pred) _val_f1 = search_result['f1'] self.val_f1s.append(_val_f1) print('— val_f1_on_epoch_end: %f '%(_val_f1)) return def get_model(adam, trainable=False): inp1 = Input(shape=(max_len,)) emb = Embedding(num_words, dim, weights=[matrix],trainable = trainable, )(inp1) lstm = Bidirectional(CuDNNLSTM(int(max_len/2),return_sequences=True))(emb) avg_pool = GlobalAveragePooling1D()(lstm) max_pool = GlobalMaxPool1D()(lstm) flat = Flatten()(lstm) x = concatenate([avg_pool,max_pool,flat]) x = Dense(max_len, activation="relu" )(x) x = Dropout(0.2 )(x) x = Dense(1, activation='sigmoid',kernel_regularizer=regularizers.l2(0.0001))(x) model = Model(inputs=inp1, outputs=x) model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy']) print(model.summary()) return model def get_opt() : adam = optimizers.Adam() print('LR:',K.eval(adam.lr)) return adam def get_batch_size(opt): batch_size = int(x_train.shape[0]*K.eval(opt.lr)) print('Batch size = ',batch_size) return batch_size patience = 2 best_model=None all_results = {} train_meta = np.zeros(y_train.shape) test_meta = np.zeros(x_test.shape[0]) splits = list(StratifiedKFold(n_splits=5, shuffle=True, random_state=14 ).split(x_train, y_train)) for idx,(train_idx, valid_idx)in enumerate(splits): print('----'+str(idx)+'-----') X_train1 = x_train[train_idx] y_train1 = y_train[train_idx] X_val = x_train[valid_idx] y_val = y_train[valid_idx] model_file = 'model_'+str(idx)+'.h5' modelcheck = ModelCheckpoint(model_file,save_best_only=True) stop = EarlyStopping(patience=patience) reduce = CyclicLR(base_lr=0.001, max_lr=0.004, step_size=300., mode='exp_range', gamma=0.99994) metrics = Metrics() adam = get_opt() batch_size = get_batch_size(adam) model = get_model(adam) history = model.fit(X_train1,y_train1, batch_size=batch_size, validation_data=(X_val,y_val), epochs=4, callbacks=[modelcheck,stop,reduce,metrics], verbose=2) print('Pretraining finished, unfreezing embeddings layer...') model = get_model(adam,True) model.load_weights(model_file) modelcheck = ModelCheckpoint(model_file,save_best_only=True) stop = EarlyStopping(patience=patience) reduce = CyclicLR(base_lr=0.001, max_lr=0.004, step_size=300., mode='exp_range', gamma=0.99994) metrics = Metrics() history = model.fit(X_train1,y_train1, batch_size=batch_size, validation_data=(X_val,y_val), epochs=1, callbacks=[modelcheck,stop,reduce,metrics], verbose=2) print('training finished...') model.load_weights(model_file) y_pred = model.predict(X_val,batch_size=batch_size, verbose=1) train_meta[valid_idx] = y_pred.reshape(-1) search_result = threshold_search(y_val, y_pred) print(search_result) y_pred = y_pred>search_result['threshold'] y_pred = y_pred.astype(int) print('RESULTS ON VALIDATION SET: ',classification_report(y_val,y_pred)) all_results[model_file] = search_result['f1'] y_pred = model.predict(x_test,batch_size=batch_size, verbose=1) test_meta += y_pred.reshape(-1)/ len(splits) if best_model is None or best_model['f1'] < search_result['f1']: best_model={'model':model_file,'f1':search_result['f1']} print('-'*80) print(all_results) print('-'*80) print(best_model) print('-'*80 )<save_to_csv>
nan_embarked_test = test_dataset[test_dataset['Embarked'].isnull() ] nan_embarked_test.shape[0]
Titanic - Machine Learning from Disaster
2,213,168
search_result = threshold_search(y_train, train_meta) print(search_result) df_subm = pd.DataFrame() df_subm['qid'] = df_final.qid df_subm['prediction'] = test_meta > search_result['threshold'] print(df_subm.head()) df_subm.to_csv('submission.csv', index=False )<import_modules>
imputer = SimpleImputer(missing_values = np.nan, strategy= 'mean') imputer = imputer.fit(X_train[:, 3:4]) X_train[:, 3:4] = imputer.transform(X_train[:, 3:4]) imputer = imputer.fit(X_test[:, 3:4]) X_test[:, 3:4] = imputer.transform(X_test[:, 3:4]) X_train = np.delete(X_train, [4], 1) X_test = np.delete(X_test, [4], 1 )
Titanic - Machine Learning from Disaster
2,213,168
import re import time import gc import random import os import numpy as np import pandas as pd from tqdm import tqdm from sklearn.model_selection import train_test_split from sklearn import metrics from sklearn.model_selection import GridSearchCV, StratifiedKFold from sklearn.metrics import f1_score, roc_auc_score from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences import torch import torch.nn as nn import torch.utils.data<set_options>
ct = ColumnTransformer( [('oh_enc', OneHotEncoder(sparse=False), [2]),], remainder='passthrough' ) X_train = ct.fit_transform(X_train) X_test = ct.fit_transform(X_test) labelencoder_y = LabelEncoder() y_train = labelencoder_y.fit_transform(y_train )
Titanic - Machine Learning from Disaster
2,213,168
def seed_torch(seed=1011): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True<define_variables>
X_train = np.delete(X_train, [1], 1) print(X_train[0:5, :]) X_train.shape
Titanic - Machine Learning from Disaster