kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
13,563,857
import pandas as pd import numpy as np import re from sklearn.model_selection import train_test_split from sklearn.metrics import f1_score import torch from torch import nn from torch import optim import torch.nn.functional as F from torch.utils.data import TensorDataset, DataLoader import spacy from gensim.models import KeyedVectors from nltk.stem import PorterStemmer, SnowballStemmer from nltk.stem.lancaster import LancasterStemmer import time import gc from tqdm import tqdm<load_from_csv>
df = pd.read_csv("/kaggle/input/titanic/train.csv", index_col="PassengerId") rows, cols = df.shape print(f"Original DataFrame has {rows} rows and {cols} columns") df.head()
Titanic - Machine Learning from Disaster
13,563,857
train_df = pd.read_csv(".. /input/train.csv") test_df = pd.read_csv(".. /input/test.csv") labels = np.array(train_df.target, dtype=int )<string_transform>
label_encoder = LabelEncoder() df["Sex"] = label_encoder.fit_transform(df["Sex"]) df["Family_Size"] = df["SibSp"] + df["Parch"] + 1 df["Fare_Per_Person"] = df["Fare"] / df["Family_Size"] df.tail()
Titanic - Machine Learning from Disaster
13,563,857
def n_upper(sentence): return len(re.findall(r'[A-Z]',sentence)) def n_unique_words(sentence): return len(set(sentence.split())) def n_question_mark(sentence): return len(re.findall(r'[?]',sentence)) def n_exclamation_mark(sentence): return len(re.findall(r'[!]',sentence)) def n_asterisk(sentence): return len(re.findall(r'[*]',sentence)) def n_parentheses(sentence): return len(re.findall(r'[() ]',sentence)) def n_brackets(sentence): return len(re.findall(r'[\[\]]',sentence)) def n_braces(sentence): return len(re.findall(r'[{}]',sentence)) def n_quotes(sentence): return len(re.findall(r'["]',sentence)) def n_ampersand(sentence): return len(re.findall(r'[&]',sentence)) def n_dash(sentence): return len(re.findall(r'[-]',sentence)) n_stats = 11 def get_stat(questions_list): stat_feat = np.zeros(( len(questions_list), n_stats), dtype=int) for i,question in tqdm(enumerate(questions_list)) : stat_feat[i,0] = n_upper(question) stat_feat[i,1] = n_unique_words(question) stat_feat[i,2] = n_question_mark(question) stat_feat[i,3] = n_exclamation_mark(question) stat_feat[i,4] = n_asterisk(question) stat_feat[i,5] = n_parentheses(question) stat_feat[i,6] = n_brackets(question) stat_feat[i,7] = n_braces(question) stat_feat[i,8] = n_quotes(question) stat_feat[i,9] = n_ampersand(question) stat_feat[i,10] = n_dash(question) return stat_feat<split>
set1 = df.copy() set1 = set1[["Survived", "Pclass", "Sex", "Age", "SibSp", "Parch", "Fare"]] set1.dropna(axis=0, inplace=True) rows, cols = set1.shape print(f"set1 DataFrame has {rows} rows and {cols} columns") set1.tail()
Titanic - Machine Learning from Disaster
13,563,857
train_stat = get_stat(train_df.question_text) test_stat = get_stat(test_df.question_text )<string_transform>
y = set1["Age"] X = set1[["Survived", "Pclass", "Sex", "SibSp", "Parch", "Fare"]]
Titanic - Machine Learning from Disaster
13,563,857
train_list = list(train_df.question_text.apply(lambda s: s.lower())) test_list = list(test_df.question_text.apply(lambda s: s.lower())) train_text = ' '.join(train_list) test_text = ' '.join(test_list )<load_pretrained>
train_X, val_X, train_y, val_y = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) print(f"Training features shape: {train_X.shape}") print(f"Training labels shape: {train_y.shape}") print(f"Testing features shape: {val_X.shape}") print(f"Testing labels shape: {val_y.shape}" )
Titanic - Machine Learning from Disaster
13,563,857
nlp = spacy.load("en", disable=['tagger','parser','ner','textcat'] )<feature_engineering>
def get_mae1(max_leaf_nodes, train_X, val_X, train_y, val_y): age_model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=0) age_model.fit(train_X, train_y) val_predictions = age_model.predict(val_X) return mean_absolute_error(val_y, val_predictions) for max_leaf_nodes in [5, 10, 15, 20, 50, 100]: mae = get_mae1(max_leaf_nodes, train_X, val_X, train_y, val_y) print(f"Max leaf nodes: {max_leaf_nodes} \t\t Mean Absolute Error: {mae}")
Titanic - Machine Learning from Disaster
13,563,857
vocab = {} lemma_vocab = {} word_idx = 1 train_tokens = [] for doc in tqdm(nlp.pipe(train_list)) : curr_tokens = [] for token in doc: if token.text not in vocab: vocab[token.text] = word_idx lemma_vocab[token.text] = token.lemma_ word_idx += 1 curr_tokens.append(vocab[token.text]) train_tokens.append(np.array(curr_tokens, dtype=int)) test_tokens = [] for doc in tqdm(nlp.pipe(test_list)) : curr_tokens = [] for token in doc: if token.text not in vocab: vocab[token.text] = word_idx lemma_vocab[token.text] = token.lemma_ word_idx += 1 curr_tokens.append(vocab[token.text]) test_tokens.append(np.array(curr_tokens, dtype=int))<categorify>
age_model = DecisionTreeRegressor(max_leaf_nodes=10, random_state=0) age_model.fit(X, y )
Titanic - Machine Learning from Disaster
13,563,857
def pad(questions, seq_length): features = np.zeros(( len(questions), seq_length+1), dtype=int) for i, sentence in enumerate(questions): if len(sentence)==0: continue features[i, 0] = len(sentence) features[i, -len(sentence):] = sentence return features<define_variables>
set2 = df.copy() columns = ["Survived", "Pclass", "Sex", "Age", "SibSp", "Parch", "Fare"] set2 = set2.loc[set2["Age"].isnull() , columns] rows, cols = set2.shape print(f"set2 DataFrame has {rows} rows and {cols} columns") set2.tail()
Titanic - Machine Learning from Disaster
13,563,857
seq_length = max(max(map(len, train_tokens)) , max(map(len, test_tokens)) )<categorify>
X = set2[["Survived", "Pclass", "Sex", "SibSp", "Parch", "Fare"]] set2["Age"] = age_model.predict(X) set2.head()
Titanic - Machine Learning from Disaster
13,563,857
train_tokens = pad(train_tokens, seq_length) test_tokens = pad(test_tokens, seq_length )<categorify>
set1 = set1.append(set2) rows, cols = set1.shape print(f"set1 DataFrame has {rows} rows and {cols} columns") set1.head()
Titanic - Machine Learning from Disaster
13,563,857
def get_embeddings(file): embeddings = {} with open(file, encoding="utf8", errors='ignore')as f: for line in tqdm(f): line_list = line.split(" ") if len(line_list)> 100: embeddings[line_list[0]] = np.array(line_list[1:], dtype='float32') return embeddings def get_embeddings_matrix(vocab, lemma_vocab, embeddings, keyedVector=False): ps = PorterStemmer() lc = LancasterStemmer() sb = SnowballStemmer("english") n_words = len(vocab) if keyedVector: emb_size = embeddings.vector_size else: emb_size = next(iter(embeddings.values())).shape[0] if keyedVector: emb_dict = {} for word in vocab: try: emb_dict[word] = embeddings.get_vector(word) except: continue embeddings = emb_dict embedding_matrix = np.zeros(( n_words+1, emb_size), dtype=np.float32) unknown_vec = np.zeros(( emb_size,), dtype=np.float32)- 1 unknown_words = 0 for word in tqdm(vocab): emb_vec = embeddings.get(word) if emb_vec is not None: embedding_matrix[vocab[word]] = emb_vec continue emb_vec = embeddings.get(lemma_vocab[word]) if emb_vec is not None: embedding_matrix[vocab[word]] = emb_vec continue emb_vec = embeddings.get(ps.stem(word)) if emb_vec is not None: embedding_matrix[vocab[word]] = emb_vec continue emb_vec = embeddings.get(lc.stem(word)) if emb_vec is not None: embedding_matrix[vocab[word]] = emb_vec continue emb_vec = embeddings.get(sb.stem(word)) if emb_vec is not None: embedding_matrix[vocab[word]] = emb_vec continue embedding_matrix[vocab[word]] = unknown_vec unknown_words += 1 print('% known words: {:.2%}'.format(1 - unknown_words/n_words)) return embedding_matrix<load_pretrained>
y = set1["Survived"] X = set1[["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare"]]
Titanic - Machine Learning from Disaster
13,563,857
glove_file = '.. /input/embeddings/glove.840B.300d/glove.840B.300d.txt' glove_emb = get_embeddings(glove_file) glove_emb_matrix = get_embeddings_matrix(vocab, lemma_vocab, glove_emb) del glove_emb gc.collect()<load_pretrained>
train_X, val_X, train_y, val_y = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) print(f"Training features shape: {train_X.shape}") print(f"Training labels shape: {train_y.shape}") print(f"Testing features shape: {val_X.shape}") print(f"Testing labels shape: {val_y.shape}" )
Titanic - Machine Learning from Disaster
13,563,857
fasttext_file = '.. /input/embeddings/wiki-news-300d-1M/wiki-news-300d-1M.vec' fasttext_emb = get_embeddings(fasttext_file) fasttext_emb_matrix = get_embeddings_matrix(vocab, lemma_vocab, fasttext_emb) del fasttext_emb gc.collect() <load_pretrained>
def get_mae2(max_leaf_nodes, train_X, val_X, train_y, val_y): survival_model = RandomForestClassifier(max_leaf_nodes=max_leaf_nodes, random_state=0) survival_model.fit(train_X, train_y) val_predictions = survival_model.predict(val_X) return mean_absolute_error(val_y, val_predictions) for max_leaf_nodes in [5, 8, 10, 15, 20, 50, 100]: mae = get_mae2(max_leaf_nodes, train_X, val_X, train_y, val_y) print(f"Max leaf nodes: {max_leaf_nodes} \t\t Mean Absolute Error: {mae}")
Titanic - Machine Learning from Disaster
13,563,857
word2vec_file = '.. /input/embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin' word2vec_emb = KeyedVectors.load_word2vec_format(word2vec_file, binary=True) word2vec_emb_matrix = get_embeddings_matrix(vocab, lemma_vocab, word2vec_emb, keyedVector=True) del word2vec_emb gc.collect()<load_pretrained>
survival_model = RandomForestClassifier(max_leaf_nodes=15, random_state=0) accuracy = survival_model.fit(X, y ).score(X, y) print(f"Accuracy value: {accuracy}" )
Titanic - Machine Learning from Disaster
13,563,857
paragram_file = '.. /input/embeddings/paragram_300_sl999/paragram_300_sl999.txt' paragram_emb = get_embeddings(paragram_file) paragram_emb_matrix = get_embeddings_matrix(vocab, lemma_vocab, paragram_emb) del paragram_emb gc.collect()<concatenate>
tdf = pd.read_csv("/kaggle/input/titanic/test.csv", index_col="PassengerId") rows, cols = tdf.shape print(f"Original Test DataFrame has {rows} rows and {cols} columns") tdf.head()
Titanic - Machine Learning from Disaster
13,563,857
emb_matrix = np.concatenate(( glove_emb_matrix, paragram_emb_matrix), axis=1) del glove_emb_matrix, fasttext_emb_matrix, word2vec_emb_matrix, paragram_emb_matrix gc.collect()<concatenate>
label_encoder = LabelEncoder() tdf["Sex"] = label_encoder.fit_transform(tdf["Sex"]) tdf["Family_Size"] = tdf["SibSp"] + tdf["Parch"] + 1 tdf["Fare_Per_Person"] = tdf["Fare"] / tdf["Family_Size"] tdf.tail()
Titanic - Machine Learning from Disaster
13,563,857
train_feat = np.concatenate(( train_stat, train_tokens), axis=1) test_feat = np.concatenate(( test_stat, test_tokens), axis=1 )<split>
tdf[tdf["Fare"].isnull() ]
Titanic - Machine Learning from Disaster
13,563,857
x_train, x_val, label_train, label_val = train_test_split(train_feat, labels, test_size=0.1, random_state=0) train_data = TensorDataset(torch.from_numpy(x_train), torch.from_numpy(label_train)) valid_data = TensorDataset(torch.from_numpy(x_val), torch.from_numpy(label_val)) test_data = TensorDataset(torch.from_numpy(test_feat)) batch_size = 64 train_loader = DataLoader(train_data, shuffle=True, batch_size=batch_size) valid_loader = DataLoader(valid_data, shuffle=True, batch_size=batch_size) test_loader = DataLoader(test_data, shuffle=False, batch_size=batch_size )<train_model>
test_set1 = tdf.copy() filtr =(~test_set1["Fare"].isnull())&(~test_set1["Age"].isnull())&(test_set1["Pclass"] == 3) test_set1 = test_set1[filtr] rows, cols = test_set1.shape print(f"test_set1 DataFrame has {rows} rows and {cols} columns") test_set1.head()
Titanic - Machine Learning from Disaster
13,563,857
train_on_gpu=torch.cuda.is_available() if train_on_gpu: print('Training on GPU.') else: print('No GPU available, training on CPU.' )<choose_model_class>
y = test_set1["Fare"] X = test_set1[["Pclass", "Sex", "Age", "SibSp", "Parch"]] fare_model = DecisionTreeRegressor(random_state=1) r_squared = fare_model.fit(X, y ).score(X, y) print(f"R-Squared value: {r_squared}" )
Titanic - Machine Learning from Disaster
13,563,857
def init_emb_layer(self, embedding_matrix): embedding_matrix = torch.tensor(embedding_matrix, dtype=torch.float32) num_emb, emb_size = embedding_matrix.size() emb_layer = nn.Embedding.from_pretrained(embedding_matrix) return emb_layer class SelfAttention(nn.Module): def __init__(self, attention_size, batch_first=False, non_linearity="tanh"): super(SelfAttention, self ).__init__() self.batch_first = batch_first self.attention_weights = nn.Parameter(torch.FloatTensor(attention_size)) self.softmax = nn.Softmax(dim=-1) if non_linearity == "relu": self.non_linearity = nn.ReLU() else: self.non_linearity = nn.Tanh() nn.init.uniform(self.attention_weights.data, -0.005, 0.005) def get_mask(self, attentions, lengths): max_len = max(lengths.data) mask = torch.autograd.Variable(torch.ones(attentions.size())).detach() if attentions.data.is_cuda: mask = mask.cuda() for i, l in enumerate(lengths.data): if l < max_len: mask[i, :-l] = 0 return mask def forward(self, inputs, lengths): scores = self.non_linearity(inputs.matmul(self.attention_weights)) scores = self.softmax(scores) mask = self.get_mask(scores, lengths) masked_scores = scores * mask _sums = masked_scores.sum(-1, keepdim=True) scores = masked_scores.div(_sums) weighted = torch.mul(inputs, scores.unsqueeze(-1 ).expand_as(inputs)) representations = weighted.sum(1 ).squeeze() return representations class Quora_model(nn.Module): def __init__(self, hidden_layer_dim, embedding_matrix, hidden_dim, gru_layers, stat_layers, drop_prob=0.5): super(Quora_model, self ).__init__() self.hidden_layer_dim = hidden_layer_dim self.gru_layers = gru_layers self.emb_dim = embedding_matrix.shape[1] self.hidden_dim = hidden_dim self.stat_layers = stat_layers stat_in_dim = n_stats + 1 modules = [] for out_dim in self.stat_layers: modules.append(nn.Linear(stat_in_dim, out_dim)) modules.append(nn.ReLU()) stat_in_dim = out_dim self.stat_dense = nn.Sequential(*modules) self.embedding = init_emb_layer(self, embedding_matrix) self.gru = nn.GRU(self.emb_dim, self.hidden_dim, self.gru_layers, batch_first=True, bidirectional=True, dropout = drop_prob) self.attention = SelfAttention(self.hidden_dim*2, batch_first=True) self.final_dense = nn.Sequential( nn.Dropout(p=drop_prob), nn.Linear(self.hidden_dim*2 + out_dim, self.hidden_layer_dim), nn.ReLU() , nn.Dropout(p=drop_prob), nn.Linear(self.hidden_layer_dim, 1), nn.Sigmoid() ) def forward(self, x, hidden): batch_size, _ = x.size() if hidden.size(1)!= batch_size: hidden = hidden[:, :batch_size, :].contiguous() lengths = x[:,n_stats].cpu().numpy().astype(int) seq_len = max(lengths) x_text = x[:, -seq_len:] x_stat = x[:, :n_stats+1].type(torch.FloatTensor) if train_on_gpu: x_stat = x_stat.cuda() x_text = self.embedding(x_text) out_gru, _ = self.gru(x_text, hidden) out_att = self.attention(out_gru, lengths) out_stat = self.stat_dense(x_stat) out = torch.cat(( out_att, out_stat), dim=1) out = self.final_dense(out) return out def init_hidden(self, batch_size): weight = next(self.parameters() ).data if train_on_gpu: hidden = weight.new(self.gru_layers*2, batch_size, self.hidden_dim ).zero_().cuda() else: hidden = weight.new(self.gru_layers*2, batch_size, self.hidden_dim ).zero_() return hidden<choose_model_class>
tdf.loc[1044, ["Fare"]] = fare_model.predict([[3, 1, 60.5, 0, 0]]) tdf.loc[1044, ["Fare_Per_Person"]] = fare_model.predict([[3, 1, 60.5, 0, 0]]) tdf.loc[1044]
Titanic - Machine Learning from Disaster
13,563,857
hidden_dim = 256 gru_layers = 1 dropout = 0.1 stat_layers_dim = [16, 8] hidden_layer_dim = 64 model = Quora_model(hidden_layer_dim, emb_matrix, hidden_dim, gru_layers, stat_layers_dim, dropout) model<choose_model_class>
test_set1 = tdf.copy() test_set1 = test_set1.loc[~tdf["Age"].isnull() ] rows, cols = test_set1.shape print(f"test_set1 DataFrame has {rows} rows and {cols} columns") test_set1.head()
Titanic - Machine Learning from Disaster
13,563,857
epochs = 4 print_every = 1000 early_stop = 20 clip = 5 lr=0.001 criterion = nn.BCELoss() optimizer = torch.optim.Adam(model.parameters() , lr=lr )<train_model>
y = test_set1["Age"] X = test_set1[["Pclass", "Sex", "SibSp", "Parch", "Fare"]]
Titanic - Machine Learning from Disaster
13,563,857
def train_model(model, train_loader, valid_loader, batch_size, epochs, optimizer, criterion, clip, print_every, early_stop): if(train_on_gpu): model.cuda() counter = 0 model.train() breaker = False for e in range(epochs): for inputs, labels in train_loader: counter += 1 if(train_on_gpu): inputs, labels = inputs.cuda() , labels.cuda() h = model.init_hidden(batch_size) model.zero_grad() output = model(inputs, h) loss = criterion(output.squeeze() , labels.float()) loss.backward() nn.utils.clip_grad_norm_(model.parameters() , clip) optimizer.step() if counter % print_every == 0: with torch.no_grad() : val_losses = [] all_val_labels = [] all_val_preds = [] all_val_probs = [] model.eval() for inputs, labels in valid_loader: all_val_labels += list(labels) if(train_on_gpu): inputs, labels = inputs.cuda() , labels.cuda() val_h = model.init_hidden(batch_size) output = model(inputs, val_h) val_loss = criterion(output.squeeze() , labels.float()) val_losses.append(val_loss.item()) preds = torch.round(output.squeeze()) preds = np.squeeze(preds.cpu().numpy()) all_val_preds += list(preds) output = np.squeeze(output.cpu().detach().numpy()) all_val_probs += list(output) current_loss = np.mean(val_losses) print("Epoch: {}/{}...".format(e+1, epochs), "Step: {}...".format(counter), "Loss: {:.6f}...".format(loss.item()), "Val Loss: {:.6f}...".format(current_loss), "F1-score(threshold=0.5): {:.3%}".format(f1_score(all_val_labels, all_val_preds))) if counter == print_every or current_loss < best_loss: best_loss = current_loss best_val_labels = all_val_labels best_probs = all_val_probs torch.save(model.state_dict() , 'checkpoint.pth') counter_eval = 0 counter_eval += 1 if counter_eval == early_stop: breaker = True break model.train() if breaker: break state_dict = torch.load('checkpoint.pth') model.load_state_dict(state_dict) return best_probs, best_val_labels<train_model>
train_X, val_X, train_y, val_y = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) print(f"Training features shape: {train_X.shape}") print(f"Training labels shape: {train_y.shape}") print(f"Testing features shape: {val_X.shape}") print(f"Testing labels shape: {val_y.shape}" )
Titanic - Machine Learning from Disaster
13,563,857
t0 = time.time() all_val_probs, all_val_labels = train_model(model, train_loader, valid_loader, batch_size, epochs, optimizer, criterion, clip, print_every, early_stop) tf = time.time() print(" Execution time: {:.2f}min".format(( tf-t0)/60))<find_best_params>
def get_mae3(max_leaf_nodes, train_X, val_X, train_y, val_y): age_model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=0) age_model.fit(train_X, train_y) val_predictions = age_model.predict(val_X) return mean_absolute_error(val_y, val_predictions) for max_leaf_nodes in [5, 10, 15, 20, 50, 100]: mae = get_mae3(max_leaf_nodes, train_X, val_X, train_y, val_y) print(f"Max leaf nodes: {max_leaf_nodes} \t\t Mean Absolute Error: {mae}")
Titanic - Machine Learning from Disaster
13,563,857
best_score = 0 for thr in np.arange(0.0, 0.5, 0.005): pred = np.array(all_val_probs > thr, dtype=int) score = f1_score(all_val_labels, pred) print("Threshold: {:.3f}...F1-score {:.3%}".format(thr, score)) if score > best_score: best_score = score best_thr = thr print(" Best threshold: {:.3f}...F1-score {:.3%}".format(best_thr, best_score))<train_model>
age_model = DecisionTreeRegressor(random_state=1) age_model.fit(X, y )
Titanic - Machine Learning from Disaster
13,563,857
model.eval() with torch.no_grad() : all_test_preds = [] for inputs in test_loader: inputs = inputs[0] if(train_on_gpu): inputs = inputs.cuda() test_h = model.init_hidden(batch_size) output = model(inputs, test_h) preds =(output.squeeze() > best_thr ).type(torch.IntTensor) preds = np.squeeze(preds.cpu().numpy()) all_test_preds += list(preds.astype(int))<save_to_csv>
test_set2 = tdf.copy() test_set2 = test_set2.loc[tdf["Age"].isnull() ] rows, cols = test_set2.shape print(f"test_set2 DataFrame has {rows} rows and {cols} columns") test_set2.head()
Titanic - Machine Learning from Disaster
13,563,857
sub = pd.DataFrame({ 'qid': test_df.qid, 'prediction': all_test_preds }) sub = sub[['qid', 'prediction']] sub.to_csv('submission.csv', index=False, sep=',' )<import_modules>
X = test_set2[["Pclass", "Sex", "SibSp", "Parch", "Fare"]] test_set2["Age"] = age_model.predict(X) test_set2.head()
Titanic - Machine Learning from Disaster
13,563,857
tqdm.pandas() gc.collect()<define_variables>
test_set1 = test_set1.append(test_set2) rows, cols = test_set1.shape print(f"test_set1 DataFrame has {rows} rows and {cols} columns") test_set1.head()
Titanic - Machine Learning from Disaster
13,563,857
max_features= 200000 max_senten_len = 40 max_senten_num = 3 embed_size = 300 VALIDATION_SPLIT = 0<import_modules>
X = test_set1[["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare"]] predictions = survival_model.predict(X) output = pd.DataFrame({"PassengerId": test_set1.index, "Survived": predictions}) output.to_csv("my_submission.csv", index=False) print("my_submission.csv is ready to submit!" )
Titanic - Machine Learning from Disaster
14,160,111
from sklearn.utils import shuffle<load_from_csv>
data=pd.read_csv('.. /input/titanic/train.csv') data
Titanic - Machine Learning from Disaster
14,160,111
df = pd.read_csv('.. /input/train.csv' )<load_from_csv>
data['Ticket_type']=data['Ticket'].apply(lambda x: x[0:3]) data['Ticket_type']=data['Ticket_type'].astype('category' ).cat.codes
Titanic - Machine Learning from Disaster
14,160,111
test_df = pd.read_csv(".. /input/test.csv" )<count_unique_values>
data['Words_counts']=data['Name'].apply(lambda x: len(x.split()))
Titanic - Machine Learning from Disaster
14,160,111
len(df.target.unique() )<rename_columns>
data['cabin_or_not']=data["Cabin"].apply(lambda x: 0 if type(x)== float else 1) data.head(3 )
Titanic - Machine Learning from Disaster
14,160,111
df.columns = ['qid', 'text', 'category'] test_df.columns = ['qid', 'text']<drop_column>
data['Family_size']=data['SibSp'] + data['Parch'] + 1
Titanic - Machine Learning from Disaster
14,160,111
df = df[['text', 'category']]<feature_engineering>
data['IsAlone'] = 0 data.loc[data['Family_size'] == 1, 'IsAlone'] = 1 data.head(3 )
Titanic - Machine Learning from Disaster
14,160,111
df['text'] = df['text'].str.lower() test_df['text'] = test_df['text'].str.lower()<define_variables>
data['Embarked'] = data['Embarked'].fillna('S') data['Age'].fillna(data['Age'].mean() ,inplace=True )
Titanic - Machine Learning from Disaster
14,160,111
contraction_mapping = {"ain't": "is not", "aren't": "are not","can't": "cannot", "'cause": "because", "could've": "could have", "couldn't": "could not", "didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hasn't": "has not", "haven't": "have not", "he'd": "he would","he'll": "he will", "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is", "I'd": "I would", "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have","I'm": "I am", "I've": "I have", "i'd": "i would", "i'd've": "i would have", "i'll": "i will", "i'll've": "i will have","i'm": "i am", "i've": "i have", "isn't": "is not", "it'd": "it would", "it'd've": "it would have", "it'll": "it will", "it'll've": "it will have","it's": "it is", "let's": "let us", "ma'am": "madam", "mayn't": "may not", "might've": "might have","mightn't": "might not","mightn't've": "might not have", "must've": "must have", "mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have","o'clock": "of the clock", "oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have", "she'd": "she would", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is", "should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have","so's": "so as", "this's": "this is","that'd": "that would", "that'd've": "that would have", "that's": "that is", "there'd": "there would", "there'd've": "there would have", "there's": "there is", "here's": "here is","they'd": "they would", "they'd've": "they would have", "they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have", "wasn't": "was not", "we'd": "we would", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are", "we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have", "what're": "what are", "what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is", "where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have", "why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not", "won't've": "will not have", "would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all", "y'all'd": "you all would","y'all'd've": "you all would have","y'all're": "you all are","y'all've": "you all have","you'd": "you would", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have", "you're": "you are", "you've": "you have", 'u.s':'america', 'e.g':'for example'}<string_transform>
data['fare_cat']=pd.qcut(data['Fare'], 4) data['fare_cat']=data['fare_cat'].astype('category' ).cat.codes.astype('int' )
Titanic - Machine Learning from Disaster
14,160,111
def clean_contractions(text, mapping): specials = ["’", "‘", "´", "`"] for s in specials: text = text.replace(s, "'") text = ' '.join([mapping[t] if t in mapping else t for t in text.split(" ")]) return text<feature_engineering>
data['cat_age']=pd.cut(data['Age'],5) print(data['cat_age'].value_counts()) data['cat_age']=data['cat_age'].astype('category' ).cat.codes.astype('int' )
Titanic - Machine Learning from Disaster
14,160,111
df['text'] = df['text'].progress_apply(lambda x: clean_contractions(x, contraction_mapping)) test_df['text'] = test_df['text'].progress_apply(lambda x: clean_contractions(x, contraction_mapping))<define_variables>
title=title.replace(['Lady', 'Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'],'Rare') title=title.replace('Mlle','Miss') title=title.replace('Ms','Miss') title=title.replace('Mme','Mrs') title.value_counts()
Titanic - Machine Learning from Disaster
14,160,111
punct = [',', '.', '"', ':', ')', '(', '-', '!', '?', '|', ';', "'", '$', '&', '/', '[', ']', '>', '%', '=', ' '·', '_', '{', '}', '©', '^', '®', '`', '<', '→', '°', '€', '™', '›', '♥', '←', '×', '§', '″', '′', 'Â', '█', '½', 'à', '…', '“', '★', '”', '–', '●', 'â', '►', '−', '¢', '²', '¬', '░', '¶', '↑', '±', '¿', '▾', '═', '¦', '║', '―', '¥', '▓', '—', '‹', '─', '▒', ':', '¼', '⊕', '▼', '▪', '†', '■', '’', '▀', '¨', '▄', '♫', '☆', 'é', '¯', '♦', '¤', '▲', 'è', '¸', '¾', 'Ã', '⋅', '‘', '∞', '∙', ')', '↓', '、', '│', '(', '»', ',', '♪', '╩', '╚', '³', '・', '╦', '╣', '╔', '╗', '▬', '❤', 'ï', 'Ø', '¹', '≤', '‡', '√', ]<define_variables>
data['Title']=title dic1= {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5} data['Title'] = data['Title'].map(dic1) data['Title'] =data['Title'].fillna(0 )
Titanic - Machine Learning from Disaster
14,160,111
punct_mapping = {"‘": "'", "₹": "e", "´": "'", "°": "", "€": "e", "™": "tm", "√": " sqrt ", "×": "x", "²": "2", "—": "-", "–": "-", "’": "'", "_": "-", "`": "'", '“': '"', '”': '"', '“': '"', "£": "e", '∞': 'infinity', 'θ': 'theta', '÷': '/', 'α': 'alpha', '•': '.', 'à': 'a', '−': '-', 'β': 'beta', '∅': '', '³': '3', 'π': 'pi', '!':' '}<categorify>
data['Sex'] = data['Sex'].map({'female': 0, 'male': 1} ).astype(int) data.head(3 )
Titanic - Machine Learning from Disaster
14,160,111
def clean_special_chars(text, punct, mapping): for p in mapping: text = text.replace(p, mapping[p]) for p in punct: text = text.replace(p, f' {p} ') specials = {'\u200b': ' ', '…': '...', '\ufeff': '', 'करना': '', 'है': ''} for s in specials: text = text.replace(s, specials[s]) return text<feature_engineering>
data['Embarked'] = data['Embarked'].map({'S': 0, 'C': 1, 'Q': 2} ).astype(int )
Titanic - Machine Learning from Disaster
14,160,111
df['text'] = df['text'].progress_apply(lambda x: clean_special_chars(x, punct, punct_mapping)) test_df['text'] = test_df['text'].progress_apply(lambda x: clean_special_chars(x, punct, punct_mapping))<define_variables>
data.drop(columns=['PassengerId','Name','Ticket','Cabin'],inplace=True )
Titanic - Machine Learning from Disaster
14,160,111
mispell_dict = {'colour': 'color', 'centre': 'center', 'favourite': 'favorite', 'travelling': 'traveling', 'counselling': 'counseling', 'theatre': 'theater', 'cancelled': 'canceled', 'labour': 'labor', 'organisation': 'organization', 'wwii': 'world war 2', 'citicise': 'criticize', 'youtu ': 'youtube ', 'Qoura': 'Quora', 'sallary': 'salary', 'Whta': 'What', 'narcisist': 'narcissist', 'howdo': 'how do', 'whatare': 'what are', 'howcan': 'how can', 'howmuch': 'how much', 'howmany': 'how many', 'whydo': 'why do', 'doI': 'do I', 'theBest': 'the best', 'howdoes': 'how does', 'mastrubation': 'masturbation', 'mastrubate': 'masturbate', "mastrubating": 'masturbating', 'pennis': 'penis', 'Etherium': 'Ethereum', 'narcissit': 'narcissist', 'bigdata': 'big data', '2k17': '2017', '2k18': '2018', 'qouta': 'quota', 'exboyfriend': 'ex boyfriend', 'airhostess': 'air hostess', "whst": 'what', 'watsapp': 'whatsapp', 'demonitisation': 'demonetization', 'demonitization': 'demonetization', 'demonetisation': 'demonetization'}<categorify>
train=pd.read_csv('.. /input/titanic/train.csv') test=pd.read_csv('.. /input/titanic/test.csv' )
Titanic - Machine Learning from Disaster
14,160,111
def correct_spelling(x, dic): for word in dic.keys() : x = x.replace(word, dic[word]) return x<feature_engineering>
test['Fare'].fillna(test['Fare'].mean() ,inplace=True )
Titanic - Machine Learning from Disaster
14,160,111
df['text'] = df['text'].progress_apply(lambda x: correct_spelling(x, mispell_dict)) test_df['text'] = test_df['text'].progress_apply(lambda x: correct_spelling(x, mispell_dict))<define_variables>
datas=[train,test] for data in datas: data['Ticket_type']=data['Ticket'].apply(lambda x: x[0:3]) data['Ticket_type']=data['Ticket_type'].astype('category' ).cat.codes data['Words_counts']=data['Name'].apply(lambda x: len(x.split())) data['cabin_or_not']=data["Cabin"].apply(lambda x: 0 if type(x)== float else 1) data['Family_size']=data['SibSp'] + data['Parch'] + 1 data['IsAlone'] = 0 data.loc[data['Family_size'] == 1, 'IsAlone'] = 1 data['Embarked'] = data['Embarked'].fillna('S') data['fare_cat']=pd.qcut(data['Fare'], 4) data['Age'].fillna(data['Age'].mean() ,inplace=True) data['cat_age']=pd.cut(data['Age'],5) title=[] for i in range(0,len(data)) : title.append(re.search('([A-Za-z]+)\.', data['Name'][i] ).group(1)) title=pd.Series(title) title=title.replace(['Lady', 'Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'],'Rare') title=title.replace('Mlle','Miss') title=title.replace('Ms','Miss') title=title.replace('Mme','Mrs') data['Title']=title data['Sex'] = data['Sex'].map({'female': 0, 'male': 1} ).astype(int) dic1= {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5} data['Title'] = data['Title'].map(dic1) data['Title'] =data['Title'].fillna(0) data['Embarked'] = data['Embarked'].map({'S': 0, 'C': 1, 'Q': 2} ).astype('int') data['fare_cat']=data['fare_cat'].astype('category' ).cat.codes.astype('int') data['cat_age']=data['cat_age'].astype('category' ).cat.codes.astype('int') data.drop(columns=['PassengerId','Name','Ticket','Cabin'],inplace=True)
Titanic - Machine Learning from Disaster
14,160,111
labels = df['category'] text = df['text']<prepare_x_and_y>
x=train.drop(columns=['Survived']) y=train['Survived']
Titanic - Machine Learning from Disaster
14,160,111
train_text = text.reset_index().drop('index', axis=1) y_train = labels.reset_index().drop('index', axis=1) val_text = None y_val = None<drop_column>
log_model = LogisticRegression(C=22 ).fit(x, y )
Titanic - Machine Learning from Disaster
14,160,111
test = test_df['text']<groupby>
submitssion_dis={'PassengerId':[a for a in range(892,1310)] ,'Survived':log_model.predict(test)}
Titanic - Machine Learning from Disaster
14,160,111
cates = df.groupby('category') print("total categories:", cates.ngroups) print(cates.size() )<define_variables>
submission=pd.DataFrame(submitssion_dis) submission.to_csv('Submission_out.csv',index=False )
Titanic - Machine Learning from Disaster
14,123,238
paras = [] labels = [] texts = []<string_transform>
train=pd.read_csv('/kaggle/input/titanic/train.csv') test=pd.read_csv('/kaggle/input/titanic/test.csv') train.head()
Titanic - Machine Learning from Disaster
14,123,238
sent_lens = [] sent_nums = [] for idx in tqdm(range(train_text.shape[0])) : text = train_text.text[idx] texts.append(text) sentences = tokenize.sent_tokenize(text) sent_nums.append(len(sentences)) for sent in sentences: sent_lens.append(len(text_to_word_sequence(sent))) paras.append(sentences )<define_variables>
df = train.copy()
Titanic - Machine Learning from Disaster
14,123,238
val_paras = [] val_labels = []<define_variables>
testo =test.copy()
Titanic - Machine Learning from Disaster
14,123,238
test_paras = [] test_labels = []<string_transform>
df.drop(columns=['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) test.drop(columns=['Name', 'Ticket', 'Cabin'], axis=1, inplace=True )
Titanic - Machine Learning from Disaster
14,123,238
for idx in range(test.shape[0]): text = test[idx] sentences = tokenize.sent_tokenize(text) test_paras.append(sentences )<feature_engineering>
df.isnull().sum()
Titanic - Machine Learning from Disaster
14,123,238
tokenizer = Tokenizer(num_words=max_features, oov_token=True) tokenizer.fit_on_texts(texts )<feature_engineering>
df['Age'].fillna(df['Age'].median() , inplace=True) df['Embarked'].fillna(df['Embarked'].mode() [0], inplace=True)
Titanic - Machine Learning from Disaster
14,123,238
x_train = np.zeros(( len(texts), max_senten_num, max_senten_len), dtype='int32') for i, sentences in tqdm(enumerate(paras)) : tokenized_sent = tokenizer.texts_to_sequences(sentences) padded_seq = pad_sequences(tokenized_sent, maxlen=max_senten_len, padding='post', truncating='post') for j, seq in enumerate(padded_seq): if(j < max_senten_num): x_train[i,j,:] = seq else: break<categorify>
df.isnull().sum()
Titanic - Machine Learning from Disaster
14,123,238
<categorify>
df[df.Fare.isnull() ]
Titanic - Machine Learning from Disaster
14,123,238
test_data = np.zeros(( test.shape[0], max_senten_num, max_senten_len), dtype='int32') for i, sentences in enumerate(test_paras): tokenized_sent = tokenizer.texts_to_sequences(sentences) padded_seq = pad_sequences(tokenized_sent, maxlen=max_senten_len, padding='post', truncating='post') for j, seq in enumerate(padded_seq): if(j < max_senten_num): test_data[i,j,:] = seq else: break<count_unique_values>
df.isnull().sum()
Titanic - Machine Learning from Disaster
14,123,238
word_index = tokenizer.word_index print('Total %s unique tokens.' % len(word_index))<import_modules>
test.isnull().sum()
Titanic - Machine Learning from Disaster
14,123,238
import os<statistical_test>
test.isnull().sum()
Titanic - Machine Learning from Disaster
14,123,238
gc.collect() word_index = tokenizer.word_index max_features = len(word_index)+1 def load_glove(word_index): EMBEDDING_FILE = '.. /input/embeddings/glove.840B.300d/glove.840B.300d.txt' def get_coefs(word,*arr): return word.lower() , np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE)if o.split(" ")[0] in word_index) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = all_embs.mean() , all_embs.std() embed_size = all_embs.shape[1] embedding_matrix = np.random.normal(emb_mean, emb_std,(max_features, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix def load_fasttext(word_index): EMBEDDING_FILE = '.. /input/embeddings/wiki-news-300d-1M/wiki-news-300d-1M.vec' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE)if len(o)>100 and o.split(" ")[0] in word_index) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = all_embs.mean() , all_embs.std() embed_size = all_embs.shape[1] embedding_matrix = np.random.normal(emb_mean, emb_std,(max_features, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix def load_para(word_index): EMBEDDING_FILE = '.. /input/embeddings/paragram_300_sl999/paragram_300_sl999.txt' def get_coefs(word,*arr): return word.lower() , np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE, encoding="utf8", errors='ignore')if len(o)>100 and o.split(" ")[0] in word_index) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = all_embs.mean() , all_embs.std() embed_size = all_embs.shape[1] embedding_matrix = np.random.normal(emb_mean, emb_std,(max_features, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix<normalization>
print('Duplicated data =',df.duplicated().sum() )
Titanic - Machine Learning from Disaster
14,123,238
embedding_matrix_1 = load_glove(word_index) embedding_matrix_3 = load_para(word_index) embedding_matrix = np.mean(( embedding_matrix_1, embedding_matrix_3), axis=0) del embedding_matrix_1, embedding_matrix_3 gc.collect() np.shape(embedding_matrix )<train_model>
df[['Pclass','Survived']].groupby(['Pclass'],as_index = False ).mean().sort_values(by = 'Survived', ascending = False )
Titanic - Machine Learning from Disaster
14,123,238
class CyclicLR(Callback): def __init__(self, base_lr=0.001, max_lr=0.006, step_size=2000., mode='triangular', gamma=1., scale_fn=None, scale_mode='cycle'): super(CyclicLR, self ).__init__() self.base_lr = base_lr self.max_lr = max_lr self.step_size = step_size self.mode = mode self.gamma = gamma if scale_fn == None: if self.mode == 'triangular': self.scale_fn = lambda x: 1. self.scale_mode = 'cycle' elif self.mode == 'triangular2': self.scale_fn = lambda x: 1/(2.**(x-1)) self.scale_mode = 'cycle' elif self.mode == 'exp_range': self.scale_fn = lambda x: gamma**(x) self.scale_mode = 'iterations' else: self.scale_fn = scale_fn self.scale_mode = scale_mode self.clr_iterations = 0. self.trn_iterations = 0. self.history = {} self._reset() def _reset(self, new_base_lr=None, new_max_lr=None, new_step_size=None): if new_base_lr != None: self.base_lr = new_base_lr if new_max_lr != None: self.max_lr = new_max_lr if new_step_size != None: self.step_size = new_step_size self.clr_iterations = 0. def clr(self): cycle = np.floor(1+self.clr_iterations/(2*self.step_size)) x = np.abs(self.clr_iterations/self.step_size - 2*cycle + 1) if self.scale_mode == 'cycle': return self.base_lr +(self.max_lr-self.base_lr)*np.maximum(0,(1-x)) *self.scale_fn(cycle) else: return self.base_lr +(self.max_lr-self.base_lr)*np.maximum(0,(1-x)) *self.scale_fn(self.clr_iterations) def on_train_begin(self, logs={}): logs = logs or {} if self.clr_iterations == 0: K.set_value(self.model.optimizer.lr, self.base_lr) else: K.set_value(self.model.optimizer.lr, self.clr()) def on_batch_end(self, epoch, logs=None): logs = logs or {} self.trn_iterations += 1 self.clr_iterations += 1 self.history.setdefault('lr', [] ).append(K.get_value(self.model.optimizer.lr)) self.history.setdefault('iterations', [] ).append(self.trn_iterations) for k, v in logs.items() : self.history.setdefault(k, [] ).append(v) K.set_value(self.model.optimizer.lr, self.clr()) def f1(y_true, y_pred): def recall(y_true, y_pred): true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) recall = true_positives /(possible_positives + K.epsilon()) return recall def precision(y_true, y_pred): true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives /(predicted_positives + K.epsilon()) return precision precision = precision(y_true, y_pred) recall = recall(y_true, y_pred) return 2*(( precision*recall)/(precision+recall+K.epsilon()))<categorify>
df[['Sex','Survived']].groupby(['Sex'],as_index = False ).mean()
Titanic - Machine Learning from Disaster
14,123,238
def han_model(embedding_matrix): nb_words = embedding_matrix.shape[0] embedding_layer = Embedding(nb_words, embed_size, weights=[embedding_matrix]) word_input = Input(shape=(max_senten_len,), dtype='float32') word_sequences = embedding_layer(word_input) word_lstm = Bidirectional(CuDNNLSTM(64, return_sequences=True))(word_sequences) word_att = AttentionWithContext()(word_lstm) word_att = ReLU()(word_att) wordEncoder = Model(word_input, word_att) sent_input = Input(shape=(max_senten_num, max_senten_len), dtype='float32') sent_encoder = TimeDistributed(wordEncoder )(sent_input) sent_enc_avg = Lambda(lambda x: K.sum(x, axis=1))(sent_encoder) sent_enc_avg = ReLU()(sent_enc_avg) pred1 = Dense(1, activation='sigmoid' )(sent_enc_avg) sent_lstm = Bidirectional(CuDNNLSTM(128, return_sequences=True))(sent_encoder) sent_att = AttentionWithContext()(sent_lstm) sent_att = ReLU()(sent_att) pred2 = Dense(1, activation='sigmoid' )(sent_att) preds = Average()([pred1, pred2]) model = Model(sent_input, preds) model.compile(loss='binary_crossentropy',optimizer=Adam() ,metrics=[f1]) return model<predict_on_test>
df[['SibSp','Survived']].groupby(['SibSp'] ).mean()
Titanic - Machine Learning from Disaster
14,123,238
def train_pred(model, train_X, train_y, val_X, val_y, epochs=2, callback=None, batch_size=512): print(train_X.dtype, train_y.dtype) h = model.fit(train_X, train_y, batch_size=batch_size, epochs=epochs, validation_data=(val_X, val_y), callbacks = callback, verbose=1) model.load_weights(filepath) pred_val_y = model.predict([val_X], batch_size=1024, verbose=0) pred_test_y = model.predict([test_data], batch_size=1024, verbose=0) print('=' * 60) return pred_val_y, pred_test_y<import_modules>
df[['Parch','Survived']].groupby(['Parch'] ).mean()
Titanic - Machine Learning from Disaster
14,123,238
from sklearn.model_selection import GridSearchCV, StratifiedKFold<compute_test_metric>
LabelEncoder = preprocessing.LabelEncoder() df['Embarked'] = LabelEncoder.fit_transform(df['Embarked']) test['Embarked'] = LabelEncoder.transform(test['Embarked']) df['Sex'] = LabelEncoder.fit_transform(df['Sex']) test['Sex'] = LabelEncoder.fit_transform(test['Sex']) y = df['Survived'] X = df.drop(['Survived'],axis=1 )
Titanic - Machine Learning from Disaster
14,123,238
search_result = threshold_search(y_train, train_meta )<save_to_csv>
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25 )
Titanic - Machine Learning from Disaster
14,123,238
pred_test_y =(test_meta>search_result['threshold'] ).astype(int) out_df = pd.DataFrame({"qid":test_df["qid"].values}) out_df['prediction'] = pred_test_y out_df.to_csv("submission.csv", index=False )<set_options>
scaler = MinMaxScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) test = scaler.fit_transform(test )
Titanic - Machine Learning from Disaster
14,123,238
gc.collect()<import_modules>
def perform_model(model, X_train, y_train, X_test, y_test, class_labels, cm_normalize=True, \ print_cm=True, cm_cmap=plt.cm.Greens): results = dict() train_start_time = datetime.now() print('training the model.. ') model.fit(X_train, y_train) print('Done ') train_end_time = datetime.now() results['training_time'] = train_end_time - train_start_time print('training_time(HH:MM:SS.ms)- {} '.format(results['training_time'])) print('Predicting test data') test_start_time = datetime.now() y_pred = model.predict(X_test) test_end_time = datetime.now() print('Done ') results['testing_time'] = test_end_time - test_start_time print('testing time(HH:MM:SS:ms)- {} '.format(results['testing_time'])) results['predicted'] = y_pred accuracy = accuracy_score(y_true=y_test, y_pred=y_pred) results['accuracy'] = accuracy print('---------------------') print('| Accuracy |') print('---------------------') print(' {} '.format(accuracy)) cm = confusion_matrix(y_test, y_pred) results['confusion_matrix'] = cm if print_cm: print('--------------------') print('| Confusion Matrix |') print('--------------------') print(' {}'.format(cm)) plt.figure(figsize=(4,4)) plt.grid(b=False) plot_confusion_matrix(cm, classes=class_labels, normalize=True, title='Normalized confusion matrix', cmap = cm_cmap) plt.show() print('-------------------------') print('| Classifiction Report |') print('-------------------------') classification_Report = classification_report(y_test, y_pred) results['classification_Report'] = classification_Report print(classification_Report) results['model'] = model return results
Titanic - Machine Learning from Disaster
14,123,238
import os import random import re import time from collections import Counter from itertools import chain import numpy as np import pandas as pd import torch import torch.nn as nn from sklearn.metrics import f1_score, roc_auc_score from sklearn.model_selection import StratifiedKFold, KFold from sklearn.utils import shuffle from torch import optim from torch.utils.data import Dataset, Sampler, DataLoader from tqdm import tqdm<define_variables>
def print_grid_search_attributes(model): print('--------------------------') print('| Best Estimator |') print('--------------------------') print(' \t{} '.format(model.best_estimator_)) print('--------------------------') print('| Best parameters |') print('--------------------------') print('\tParameters of best estimator : \t{} '.format(model.best_params_)) print('---------------------------------') print('| No of CrossValidation sets |') print('--------------------------------') print(' \tTotal numbre of cross validation sets: {} '.format(model.n_splits_)) print('--------------------------') print('| Best Score |') print('--------------------------') print(' \tAverage Cross Validate scores of best estimator : \t{} '.format(model.best_score_))
Titanic - Machine Learning from Disaster
14,123,238
embedding_glove = '.. /input/embeddings/glove.840B.300d/glove.840B.300d.txt' embedding_fasttext = '.. /input/embeddings/wiki-news-300d-1M/wiki-news-300d-1M.vec' embedding_para = '.. /input/embeddings/paragram_300_sl999/paragram_300_sl999.txt' embedding_w2v = '.. /input/embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin' train_path = '.. /input/train.csv' test_path = '.. /input/test.csv' mispell_dict = {"ain't": "is not", "aren't": "are not", "can't": "cannot", "'cause": "because", "could've": "could have", "couldn't": "could not", "didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hasn't": "has not", "haven't": "have not", "he'd": "he would", "he'll": "he will", "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is", "i'd": "i would", "i'd've": "i would have", "i'll": "i will", "i'll've": "I will have", "i'm": "i am", "i've": "I have", "isn't": "is not", "it'd": "it would", "it'd've": "it would have", "it'll": "it will", "it'll've": "it will have", "it's": "it is", "let's": "let us", "ma'am": "madam", "mayn't": "may not", "might've": "might have", "mightn't": "might not", "mightn't've": "might not have", "must've": "must have", "mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have", "o'clock": "of the clock", "oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have", "she'd": "she would", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is", "should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have", "so's": "so as", "this's": "this is", "that'd": "that would", "that'd've": "that would have", "that's": "that is", "there'd": "there would", "there'd've": "there would have", "there's": "there is", "here's": "here is", "they'd": "they would", "they'd've": "they would have", "they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have", "wasn't": "was not", "we'd": "we would", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are", "we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have", "what're": "what are", "what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is", "where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have", "why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not", "won't've": "will not have", "would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all", "y'all'd": "you all would", "y'all'd've": "you all would have", "y'all're": "you all are", "y'all've": "you all have", "you'd": "you would", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have", "you're": "you are", "you've": "you have", 'colour': 'color', 'centre': 'center', 'favourite': 'favorite', 'travelling': 'traveling', 'counselling': 'counseling', 'theatre': 'theater', 'cancelled': 'canceled', 'labour': 'labor', 'organisation': 'organization', 'wwii': 'world war 2', 'citicise': 'criticize', 'youtu ': 'youtube ', 'qoura': 'quora', 'sallary': 'salary', 'whta': 'what', 'narcisist': 'narcissist', 'howdo': 'how do', 'whatare': 'what are', 'howcan': 'how can', 'howmuch': 'how much', 'howmany': 'how many', 'whydo': 'why do', 'doi': 'do I', 'thebest': 'the best', 'howdoes': 'how does', 'mastrubation': 'masturbation', 'mastrubate': 'masturbate', "mastrubating": 'masturbating', 'pennis': 'penis', 'etherium': 'ethereum', 'narcissit': 'narcissist', 'bigdata': 'big data', '2k17': '2017', '2k18': '2018', 'qouta': 'quota', 'exboyfriend': 'ex boyfriend', 'airhostess': 'air hostess', "whst": 'what', 'watsapp': 'whatsapp', 'demonitisation': 'demonetization', 'demonitization': 'demonetization', 'demonetisation': 'demonetization'} puncts = ''!" ' punct_mapping = {"‘": "'", "₹": "e", "´": "'", "°": "", "€": "e", "™": "tm", "√": " sqrt ", "×": "x", "²": "2", "—": "-", "–": "-", "’": "'", "_": "-", "`": "'", '”': '"', '“': '"', "£": "e", '∞': 'infinity', 'θ': 'theta', '÷': '/', 'α': 'alpha', '•': '.', 'à': 'a', '−': '-', 'β': 'beta', '∅': '', '³': '3', 'π': 'pi', '\u200b': ' ', '…': '...', '\ufeff': '', 'करना': '', 'है': ''} for p in puncts: punct_mapping[p] = ' %s ' % p p = re.compile('(\[ math \] ).+(\[ / math \])') p_space = re.compile(r'[^\x20-\x7e]' )<set_options>
labels = ['0','1']
Titanic - Machine Learning from Disaster
14,123,238
def set_seed(seed): np.random.seed(seed) torch.manual_seed(seed + 1) if torch.cuda.is_available() : torch.cuda.manual_seed_all(seed + 2) random.seed(seed + 4) <string_transform>
parameters = {"C":np.logspace(-3,3,7), "penalty":["l1","l2"]} logreg=LogisticRegression() lr_grid = GridSearchCV(logreg,param_grid=parameters, n_jobs=-1) lr_grid_results = perform_model(lr_grid, X_train, y_train, X_test, y_test, class_labels=labels) print_grid_search_attributes(lr_grid_results['model'] )
Titanic - Machine Learning from Disaster
14,123,238
def clean_text(text): text = p.sub(' [ math ] ', text) text = p_space.sub(r'', text) for punct in punct_mapping: if punct in text: text = text.replace(punct, punct_mapping[punct]) tokens = [] for token in text.split() : token = mispell_dict.get(token.lower() , token) tokens.append(token) text = ' '.join(tokens) return text def load_data(train_path=train_path, test_path=test_path, debug=False): train_df = pd.read_csv(train_path) test_df = pd.read_csv(test_path) if debug: train_df = train_df[:10000] test_df = test_df[:10000] s = time.time() train_df['question_text'] = train_df['question_text'].apply(clean_text) test_df['question_text'] = test_df['question_text'].apply(clean_text) print('preprocssing {}s'.format(time.time() - s)) return train_df, test_df<count_duplicates>
parameters = {'max_depth':np.arange(3,10,2)} dt = DecisionTreeClassifier() dt_grid = GridSearchCV(dt,param_grid=parameters, n_jobs=-1) dt_grid_results = perform_model(dt_grid, X_train, y_train, X_test, y_test, class_labels=labels) print_grid_search_attributes(dt_grid_results['model'] )
Titanic - Machine Learning from Disaster
14,123,238
def build_counter(sents, splited=False): counter = Counter() for sent in tqdm(sents, ascii=True, desc='building conuter'): if splited: counter.update(sent) else: counter.update(sent.split()) return counter def build_vocab(counter, max_vocab_size): vocab = {'token2id': {'<PAD>': 0, '<UNK>': max_vocab_size + 1}} vocab['token2id'].update( {token: _id + 1 for _id,(token, count)in tqdm(enumerate(counter.most_common(max_vocab_size)) , desc='building vocab')}) vocab['id2token'] = {v: k for k, v in vocab['token2id'].items() } return vocab def tokens2ids(tokens, token2id): seq = [] for token in tokens: token_id = token2id.get(token, len(token2id)- 1) seq.append(token_id) return seq class TextDataset(Dataset): def __init__(self, df, vocab=None, num_max=None, max_seq_len=100, max_vocab_size=95000): if num_max is not None: df = df[:num_max] self.src_sents = df['question_text'].tolist() self.qids = df['qid'].values if vocab is None: src_counter = build_counter(self.src_sents) vocab = build_vocab(src_counter, max_vocab_size) self.vocab = vocab if 'src_seqs' not in df.columns: self.src_seqs = [] for sent in tqdm(self.src_sents, desc='tokenize'): seq = tokens2ids(sent.split() [:max_seq_len], vocab['token2id']) self.src_seqs.append(seq) else: self.src_seqs = df['src_seqs'].tolist() if 'target' in df.columns: self.targets = df['target'].values else: self.targets = np.random.randint(2, size=(len(self.src_sents),)) self.max_seq_len = max_seq_len def __len__(self): return len(self.src_sents) def get_keys(self): lens = np.fromiter( tqdm(((min(self.max_seq_len, len(c.split())))for c in self.src_sents), desc='generate lens'), dtype=np.int32) return lens def __getitem__(self, index): return self.qids[index], self.src_sents[index], self.src_seqs[index], self.targets[index]<categorify>
n_estimators = [10, 100, 500, 1000, 2000] max_depth = [5, 10, 20] parameters = dict(n_estimators=n_estimators, max_depth=max_depth) rf = RandomForestClassifier(random_state=42) rf_grid = GridSearchCV(rf,param_grid=parameters, n_jobs=-1) rf_grid_results = perform_model(rf_grid, X_train, y_train, X_test, y_test, class_labels=labels) print_grid_search_attributes(rf_grid_results['model'] )
Titanic - Machine Learning from Disaster
14,123,238
def _pad_sequences(seqs): lens = [len(seq)for seq in seqs] max_len = max(lens) padded_seqs = torch.zeros(len(seqs), max_len ).long() for i, seq in enumerate(seqs): end = lens[i] padded_seqs[i, :end] = torch.LongTensor(seq) return padded_seqs, lens def collate_fn(data): qids, src_sents, src_seqs, targets, = zip(*data) src_seqs, src_lens = _pad_sequences(src_seqs) return qids, src_sents, src_seqs, src_lens, torch.FloatTensor(targets) def divide_chunks(l, n): if n == len(l): yield np.arange(len(l), dtype=np.int32), l else: for i in range(0, len(l), n): data = l[i:i + n] yield np.arange(i, i + len(data), dtype=np.int32), data def prepare_buckets(lens, bucket_size, batch_size, shuffle_data=True, indices=None): lens = -lens assert bucket_size % batch_size == 0 or bucket_size == len(lens) if indices is None: if shuffle_data: indices = shuffle(np.arange(len(lens), dtype=np.int32)) lens = lens[indices] else: indices = np.arange(len(lens), dtype=np.int32) new_indices = [] extra_batch = None for chunk_index, chunk in(divide_chunks(lens, bucket_size)) : indices_sorted = chunk_index[np.argsort(chunk, axis=-1)] batches = [] for _, batch in divide_chunks(indices_sorted, batch_size): if len(batch)== batch_size: batches.append(batch.tolist()) else: assert extra_batch is None assert batch is not None extra_batch = batch if shuffle_data: batches = shuffle(batches) for batch in batches: new_indices.extend(batch) if extra_batch is not None: new_indices.extend(extra_batch) return indices[new_indices] class BucketSampler(Sampler): def __init__(self, data_source, sort_keys, bucket_size=None, batch_size=1536, shuffle_data=True): super().__init__(data_source) self.shuffle = shuffle_data self.batch_size = batch_size self.sort_keys = sort_keys self.bucket_size = bucket_size if bucket_size is not None else len(sort_keys) if not shuffle_data: self.index = prepare_buckets(self.sort_keys, bucket_size=self.bucket_size, batch_size=self.batch_size, shuffle_data=self.shuffle) else: self.index = None self.weights = None def set_weights(self, w): assert w >= 0 total = np.sum(w) if total != 1: w = w / total self.weights = w def __iter__(self): indices = None if self.weights is not None: total = len(self.sort_keys) indices = np.random.choice(total,(total,), p=self.weights) if self.shuffle: self.index = prepare_buckets(self.sort_keys, bucket_size=self.bucket_size, batch_size=self.batch_size, shuffle_data=self.shuffle, indices=indices) return iter(self.index) def get_reverse_indexes(self): indexes = np.zeros(( len(self.index),), dtype=np.int32) for i, j in enumerate(self.index): indexes[j] = i return indexes def __len__(self): return len(self.sort_keys) <load_pretrained>
model_rf_final = RandomForestClassifier(max_depth= 5, n_estimators= 500) model_rf_final.fit(X_train, y_train )
Titanic - Machine Learning from Disaster
14,123,238
def read_embedding(embedding_file): if os.path.basename(embedding_file)!= 'wiki-news-300d-1M.vec': skip_head = None else: skip_head = 0 if os.path.basename(embedding_file)== 'paragram_300_sl999.txt': encoding = 'latin' else: encoding = 'utf-8' embeddings_index = {} t_chunks = pd.read_csv(embedding_file, index_col=0, skiprows=skip_head, encoding=encoding, sep=' ', header=None, quoting=3, doublequote=False, quotechar=None, engine='c', na_filter=False, low_memory=True, chunksize=10000) for t in t_chunks: for k, v in zip(t.index.values, t.values): embeddings_index[k] = v.astype(np.float32) return embeddings_index def get_emb(embedding_index, word, word_raw): if word == word_raw: return None else: return embedding_index.get(word, None) def embedding2numpy(embedding_path, word_index, num_words, embed_size, emb_mean=0., emb_std=0.5, report_stats=False): embedding_index = read_embedding(embedding_path) num_words = min(num_words + 2, len(word_index)) if report_stats: all_coefs = [] for v in embedding_index.values() : all_coefs.append(v.reshape([-1, 1])) all_coefs = np.concatenate(all_coefs) print(all_coefs.mean() , all_coefs.std() , np.linalg.norm(all_coefs, axis=-1 ).mean()) embedding_matrix = np.zeros(( num_words, embed_size), dtype=np.float32) oov = 0 oov_cap = 0 oov_upper = 0 oov_lower = 0 for word, i in word_index.items() : if i == 0: continue if i >= num_words: continue embedding_vector = embedding_index.get(word, None) if embedding_vector is None: embedding_vector = get_emb(embedding_index, word.lower() , word) if embedding_vector is None: embedding_vector = get_emb(embedding_index, word.upper() , word) if embedding_vector is None: embedding_vector = get_emb(embedding_index, word.capitalize() , word) if embedding_vector is None: oov += 1 embedding_vector = np.random.normal(emb_mean, emb_std, size=(1, embed_size)) else: oov_lower += 1 else: oov_upper += 1 else: oov_cap += 1 embedding_matrix[i] = embedding_vector print('oov %d/%d/%d/%d/%d' %(oov, oov_cap, oov_upper, oov_lower, len(word_index))) return embedding_matrix def load_embedding(vocab, max_vocab_size, embed_size): embedding_matrix1 = embedding2numpy(embedding_glove, vocab['token2id'], max_vocab_size, embed_size, emb_mean=-0.005838499, emb_std=0.48782197, report_stats=False) embedding_matrix2 = embedding2numpy(embedding_para, vocab['token2id'], max_vocab_size, embed_size, emb_mean=-0.0053247833, emb_std=0.49346462, report_stats=False) return [embedding_matrix1, embedding_matrix2] <set_options>
test_pred = pd.Series(model_rf_final.predict(test), name = "Survived") test_pred_final = pd.DataFrame(test_pred )
Titanic - Machine Learning from Disaster
14,123,238
def set_lr(optimizer, lr): for g in optimizer.param_groups: g['lr'] = lr class CyclicLR: def __init__(self, optimizer, base_lr=0.001, max_lr=0.002, step_size=300., mode='triangular', gamma=0.99994, scale_fn=None, scale_mode='cycle'): super(CyclicLR, self ).__init__() self.optimizer = optimizer self.base_lr = base_lr self.max_lr = max_lr self.step_size = step_size self.mode = mode self.gamma = gamma if scale_fn is None: if self.mode == 'triangular': self.scale_fn = lambda x: 1. self.scale_mode = 'cycle' elif self.mode == 'triangular2': self.scale_fn = lambda x: 1 /(2.**(x - 1)) self.scale_mode = 'cycle' elif self.mode == 'exp_range': self.scale_fn = lambda x: gamma ** x self.scale_mode = 'iterations' else: self.scale_fn = scale_fn self.scale_mode = scale_mode self.clr_iterations = 0. self.trn_iterations = 0. self.history = {} self._reset() def _reset(self, new_base_lr=None, new_max_lr=None, new_step_size=None): if new_base_lr is not None: self.base_lr = new_base_lr if new_max_lr is not None: self.max_lr = new_max_lr if new_step_size is not None: self.step_size = new_step_size self.clr_iterations = 0. def clr(self): cycle = np.floor(1 + self.clr_iterations /(2 * self.step_size)) x = np.abs(self.clr_iterations / self.step_size - 2 * cycle + 1) if self.scale_mode == 'cycle': return self.base_lr +(self.max_lr - self.base_lr)* np.maximum(0,(1 - x)) * self.scale_fn(cycle) else: return self.base_lr +(self.max_lr - self.base_lr)* np.maximum(0,(1 - x)) * self.scale_fn( self.clr_iterations) def on_train_begin(self): if self.clr_iterations == 0: set_lr(self.optimizer, self.base_lr) else: set_lr(self.optimizer, self.clr()) def on_batch_end(self): self.trn_iterations += 1 self.clr_iterations += 1 set_lr(self.optimizer, self.clr()) <normalization>
df_gender_submission = pd.read_csv('.. /input/titanic/gender_submission.csv')
Titanic - Machine Learning from Disaster
14,123,238
class Capsule(nn.Module): def __init__(self, input_dim_capsule=1024, num_capsule=5, dim_capsule=5, routings=4): super(Capsule, self ).__init__() self.num_capsule = num_capsule self.dim_capsule = dim_capsule self.routings = routings self.activation = self.squash self.W = nn.Parameter( nn.init.xavier_normal_(torch.empty(1, input_dim_capsule, self.num_capsule * self.dim_capsule))) def forward(self, x): u_hat_vecs = torch.matmul(x, self.W) batch_size = x.size(0) input_num_capsule = x.size(1) u_hat_vecs = u_hat_vecs.view(( batch_size, input_num_capsule, self.num_capsule, self.dim_capsule)) u_hat_vecs = u_hat_vecs.permute(0, 2, 1, 3 ).contiguous() with torch.no_grad() : b = torch.zeros_like(u_hat_vecs[:, :, :, 0]) for i in range(self.routings): c = torch.nn.functional.softmax(b, dim=1) outputs = self.activation(torch.sum(c.unsqueeze(-1)* u_hat_vecs, dim=2)) if i < self.routings - 1: b =(torch.sum(outputs.unsqueeze(2)* u_hat_vecs, dim=-1)) return outputs def squash(self, x, axis=-1): s_squared_norm =(x ** 2 ).sum(axis, keepdim=True) scale = torch.sqrt(s_squared_norm + 1e-7) return x / scale class Attention(nn.Module): def __init__(self, feature_dim, max_seq_len=70): super().__init__() self.attention_fc = nn.Linear(feature_dim, 1) self.bias = nn.Parameter(torch.zeros(1, max_seq_len, 1, requires_grad=True)) def forward(self, rnn_output): attention_weights = self.attention_fc(rnn_output) seq_len = rnn_output.size(1) attention_weights = self.bias[:, :seq_len, :] + attention_weights attention_weights = torch.tanh(attention_weights) attention_weights = torch.exp(attention_weights) attention_weights_sum = torch.sum(attention_weights, dim=1, keepdim=True)+ 1e-7 attention_weights = attention_weights / attention_weights_sum attended = torch.sum(attention_weights * rnn_output, dim=1) return attended class InsincereModel(nn.Module): def __init__(self, device, hidden_dim, hidden_dim_fc, embedding_matrixs, vocab_size=None, embedding_dim=None, dropout=0.1, num_capsule=5, dim_capsule=5, capsule_out_dim=1, alpha=0.8, beta=0.8, finetuning_vocab_size=120002, embedding_mode='mixup', max_seq_len=70): super(InsincereModel, self ).__init__() self.beta = beta self.embedding_mode = embedding_mode self.finetuning_vocab_size = finetuning_vocab_size self.alpha = alpha vocab_size, embedding_dim = embedding_matrixs[0].shape self.raw_embedding_weights = embedding_matrixs self.embedding_0 = nn.Embedding(vocab_size, embedding_dim, padding_idx=0 ).from_pretrained( torch.from_numpy(embedding_matrixs[0])) self.embedding_1 = nn.Embedding(vocab_size, embedding_dim, padding_idx=0 ).from_pretrained( torch.from_numpy(embedding_matrixs[1])) self.embedding_mean = nn.Embedding(vocab_size, embedding_dim, padding_idx=0 ).from_pretrained( torch.from_numpy(( embedding_matrixs[0] + embedding_matrixs[1])/ 2)) self.learnable_embedding = nn.Embedding(finetuning_vocab_size, embedding_dim, padding_idx=0) nn.init.constant_(self.learnable_embedding.weight, 0) self.learn_embedding = False self.spatial_dropout = nn.Dropout2d(p=0.2) self.device = device self.hidden_dim = hidden_dim self.rnn0 = nn.LSTM(embedding_dim, int(hidden_dim / 2), num_layers=1, bidirectional=True, batch_first=True) self.rnn1 = nn.GRU(hidden_dim, int(hidden_dim / 2), num_layers=1, bidirectional=True, batch_first=True) self.capsule = Capsule(input_dim_capsule=self.hidden_dim, num_capsule=num_capsule, dim_capsule=dim_capsule) self.dropout2 = nn.Dropout(0.3) self.lincaps = nn.Linear(num_capsule * dim_capsule, capsule_out_dim) self.attention1 = Attention(self.hidden_dim, max_seq_len=max_seq_len) self.attention2 = Attention(self.hidden_dim, max_seq_len=max_seq_len) self.fc = nn.Linear(hidden_dim * 4 + capsule_out_dim, hidden_dim_fc) self.norm = torch.nn.LayerNorm(hidden_dim * 4 + capsule_out_dim) self.dropout1 = nn.Dropout(0.2) self.dropout_linear = nn.Dropout(p=dropout) self.hidden2out = nn.Linear(hidden_dim_fc, 1) def set_embedding_mode(self, embedding_mode): self.embedding_mode = embedding_mode def enable_learning_embedding(self): self.learn_embedding = True def init_weights(self): ih =(param.data for name, param in self.named_parameters() if 'weight_ih' in name) hh =(param.data for name, param in self.named_parameters() if 'weight_hh' in name) b =(param.data for name, param in self.named_parameters() if 'bias' in name) for k in ih: nn.init.xavier_uniform_(k) for k in hh: nn.init.orthogonal_(k) for k in b: nn.init.constant_(k, 0) def apply_spatial_dropout(self, emb): emb = emb.permute(0, 2, 1 ).unsqueeze(-1) emb = self.spatial_dropout(emb ).squeeze(-1 ).permute(0, 2, 1) return emb def forward(self, seqs, lens, return_logits=True): if self.embedding_mode == 'mixup': emb0 = self.embedding_0(seqs) emb1 = self.embedding_1(seqs) prob = np.random.beta(self.alpha, self.beta, size=(seqs.size(0), 1, 1)).astype(np.float32) prob = torch.from_numpy(prob ).to(self.device) emb = emb0 * prob + emb1 *(1 - prob) elif self.embedding_mode == 'emb0': emb = self.embedding_0(seqs) elif self.embedding_mode == 'emb1': emb = self.embedding_1(seqs) elif self.embedding_mode == 'mean': emb = self.embedding_mean(seqs) else: assert False if self.learn_embedding: seq_clamped = torch.clamp(seqs, 0, self.finetuning_vocab_size - 1) emb_learned = self.learnable_embedding(seq_clamped) emb = emb + emb_learned emb = self.apply_spatial_dropout(emb) lstm_output0, _ = self.rnn0(emb) lstm_output1, _ = self.rnn1(lstm_output0) content3 = self.capsule(lstm_output1) batch_size = content3.size(0) content3 = content3.view(batch_size, -1) content3 = self.dropout2(content3) content3 = torch.relu(self.lincaps(content3)) feature_att1 = self.attention1(lstm_output0) feature_att2 = self.attention2(lstm_output1) feature_avg2 = torch.mean(lstm_output1, dim=1) feature_max2, _ = torch.max(lstm_output1, dim=1) feature = torch.cat(( feature_att1, feature_att2, feature_avg2, feature_max2, content3), dim=-1) feature = self.norm(feature) feature = self.dropout1(feature) feature = torch.relu(feature) out = self.fc(feature) out = self.dropout_linear(out) out = self.hidden2out(out) if not return_logits: out = torch.sigmoid(out) return out <predict_on_test>
submission = pd.DataFrame({ "PassengerId": testo["PassengerId"], "Survived": test_pred_final['Survived'] }) submission.to_csv('Titanic Submission.csv', index = False) print('Done' )
Titanic - Machine Learning from Disaster
14,129,375
def eval_model(model, data_iter, device, order_index=None): model.eval() predictions = [] with torch.no_grad() : for batch_data in data_iter: qid_batch, src_sents, src_seqs, src_lens, tgts = batch_data src_seqs = src_seqs.to(device) out = model(src_seqs, src_lens, return_logits=False) predictions.append(out) predictions = torch.cat(predictions, dim=0) if order_index is not None: predictions = predictions[order_index] predictions = predictions.to('cpu' ).numpy().ravel() return predictions <compute_train_metric>
train_data = pd.read_csv("/kaggle/input/titanic/train.csv") train_data.head()
Titanic - Machine Learning from Disaster
14,129,375
def cv(train_df, test_df, device=None, n_folds=10, shared_resources=None, share=True, **kwargs): if device is None: device = torch.device("cuda:{}".format(0)if torch.cuda.is_available() else "cpu") max_vocab_size = kwargs['max_vocab_size'] embed_size = kwargs['embed_size'] threshold = kwargs['threshold'] max_seq_len = kwargs['max_seq_len'] if shared_resources is None: shared_resources = {} if share: if 'vocab' not in shared_resources: counter = build_counter(chain(train_df['question_text'], test_df['question_text'])) vocab = build_vocab(counter, max_vocab_size=max_vocab_size) shared_resources['vocab'] = vocab seqs = [] for sent in tqdm(train_df['question_text'], desc='tokenize'): seq = tokens2ids(sent.split() [:max_seq_len], vocab['token2id']) seqs.append(seq) train_df['src_seqs'] = seqs seqs = [] for sent in tqdm(test_df['question_text'], desc='tokenize'): seq = tokens2ids(sent.split() [:max_seq_len], vocab['token2id']) seqs.append(seq) test_df['src_seqs'] = seqs if 'embedding_matrix' not in shared_resources: embedding_matrix = load_embedding(shared_resources['vocab'], max_vocab_size, embed_size) shared_resources['embedding_matrix'] = embedding_matrix splits = list( StratifiedKFold(n_splits=n_folds, shuffle=True ).split(train_df['target'], train_df['target'])) scores = [] best_threshold = [] best_threshold_global = None best_score = -1 predictions_train_reduced = [] targets_train = [] predictions_tes_reduced = np.zeros(( len(test_df), n_folds)) predictions_te = np.zeros(( len(test_df),)) for idx,(train_idx, valid_idx)in enumerate(splits): grow_df = train_df.iloc[train_idx].reset_index(drop=True) dev_df = train_df.iloc[valid_idx].reset_index(drop=True) predictions_te_i, predictions_va, targets_va, best_threshold_i = main(grow_df, dev_df, test_df, device, **kwargs, idx=idx, shared_resources=shared_resources, return_reduced=True) predictions_tes_reduced[:, idx] = predictions_te_i scores.append([f1_score(targets_va, predictions_va > threshold), roc_auc_score(targets_va, predictions_va)]) best_threshold.append(best_threshold_i) predictions_te += predictions_te_i / n_folds predictions_train_reduced.append(predictions_va) targets_train.append(targets_va) coeff =(np.corrcoef(predictions_tes_reduced, rowvar=False ).sum() - n_folds)/ n_folds /(n_folds - 1) predictions_train_reduced = np.concatenate(predictions_train_reduced) targets_train = np.concatenate(targets_train) for t in np.arange(0, 1, 0.01): score = f1_score(targets_train, predictions_train_reduced > t) if score > best_score: best_score = score best_threshold_global = t print('avg of best threshold {} macro-f1 best threshold {} best score {}'.format(best_threshold, best_threshold_global, best_score)) return predictions_te, predictions_te, scores, best_threshold_global, coeff <split>
test_data = pd.read_csv("/kaggle/input/titanic/test.csv") test_data.head() print(test_data.isnull().sum() )
Titanic - Machine Learning from Disaster
14,129,375
def main(train_df, valid_df, test_df, device=None, epochs=3, fine_tuning_epochs=3, batch_size=512, learning_rate=0.001, learning_rate_max_offset=0.001, dropout=0.1, threshold=None, max_vocab_size=95000, embed_size=300, max_seq_len=70, print_every_step=500, idx=0, shared_resources=None, return_reduced=True): if device is None: device = torch.device("cuda:{}".format(0)if torch.cuda.is_available() else "cpu") if shared_resources is None: shared_resources = {} batch_time = AverageMeter() data_time = AverageMeter() mean_len = AverageMeter() if 'vocab' not in shared_resources: counter = build_counter(chain(train_df['question_text'], test_df['question_text'])) vocab = build_vocab(counter, max_vocab_size=max_vocab_size) else: vocab = shared_resources['vocab'] if 'embedding_matrix' not in shared_resources: embedding_matrix = load_embedding(vocab, max_vocab_size, embed_size) else: embedding_matrix = shared_resources['embedding_matrix'] test_dataset = TextDataset(test_df, vocab=vocab, max_seq_len=max_seq_len) tb = BucketSampler(test_dataset, test_dataset.get_keys() , batch_size=batch_size, shuffle_data=False) test_iter = DataLoader(dataset=test_dataset, batch_size=batch_size, sampler=tb, num_workers=0, collate_fn=collate_fn) train_dataset = TextDataset(train_df, vocab=vocab, max_seq_len=max_seq_len) valid_dataset = TextDataset(valid_df, vocab=vocab, max_seq_len=max_seq_len) vb = BucketSampler(valid_dataset, valid_dataset.get_keys() , batch_size=batch_size, shuffle_data=False) valid_index_reverse = vb.get_reverse_indexes() model = InsincereModel(device, hidden_dim=256, hidden_dim_fc=16, dropout=dropout, embedding_matrixs=embedding_matrix, vocab_size=len(vocab['token2id']), embedding_dim=embed_size, max_seq_len=max_seq_len) if idx == 0: print(model) print('total trainable {}'.format(count_parameters(model))) model = model.to(device) optimizer = optim.Adam([p for p in model.parameters() if p.requires_grad], lr=learning_rate) train_iter = DataLoader(dataset=train_dataset, batch_size=batch_size, sampler=BucketSampler(train_dataset, train_dataset.get_keys() , bucket_size=batch_size * 20, batch_size=batch_size), num_workers=0, collate_fn=collate_fn) valid_iter = DataLoader(dataset=valid_dataset, batch_size=batch_size, sampler=vb, collate_fn=collate_fn) loss_list = [] global_steps = 0 total_steps = epochs * len(train_iter) loss_fn = torch.nn.BCEWithLogitsLoss() end = time.time() predictions_tes = [] predictions_vas = [] n_fge = 0 clr = CyclicLR(optimizer, base_lr=learning_rate, max_lr=learning_rate + learning_rate_max_offset, step_size=300, mode='exp_range') clr.on_train_begin() fine_tuning_epochs = epochs - fine_tuning_epochs predictions_te = None for epoch in tqdm(range(epochs)) : fine_tuning = epoch >= fine_tuning_epochs start_fine_tuning = fine_tuning_epochs == epoch if start_fine_tuning: model.enable_learning_embedding() optimizer = optim.Adam([p for p in model.parameters() if p.requires_grad], lr=learning_rate) global_steps = 0 total_steps =(epochs - fine_tuning_epochs)* len(train_iter) clr = CyclicLR(optimizer, base_lr=learning_rate, max_lr=learning_rate + learning_rate_max_offset, step_size=int(len(train_iter)/ 8)) clr.on_train_begin() predictions_te = np.zeros(( len(test_df),)) predictions_va = np.zeros(( len(valid_dataset.targets),)) for batch_data in train_iter: data_time.update(time.time() - end) qids, src_sents, src_seqs, src_lens, tgts = batch_data mean_len.update(sum(src_lens)) src_seqs = src_seqs.to(device) tgts = tgts.to(device) model.train() optimizer.zero_grad() out = model(src_seqs, src_lens, return_logits=True ).view(-1) loss = loss_fn(out, tgts) loss.backward() optimizer.step() loss_list.append(loss.detach().to('cpu' ).item()) global_steps += 1 batch_time.update(time.time() - end) end = time.time() if global_steps % print_every_step == 0: curr_gpu_memory_usage = get_gpu_memory_usage(device_id=torch.cuda.current_device()) print('Global step: {}/{} Total loss: {:.4f} Current GPU memory ' 'usage: {} maxlen {} '.format(global_steps, total_steps, avg(loss_list), curr_gpu_memory_usage, mean_len.avg)) loss_list = [] if fine_tuning and global_steps %(2 * clr.step_size)== 0: predictions_te_tmp2 = eval_model(model, test_iter, device) predictions_va_tmp2 = eval_model(model, valid_iter, device, valid_index_reverse) report_perf(valid_dataset, predictions_va_tmp2, threshold, idx, epoch, desc='val set mean') predictions_te = predictions_te * n_fge +( predictions_te_tmp2) predictions_va = predictions_va * n_fge +( predictions_va_tmp2) predictions_te /= n_fge + 1 predictions_va /= n_fge + 1 report_perf(valid_dataset, predictions_va, threshold, idx, epoch , desc='val set(fge)') predictions_tes.append(predictions_te_tmp2.reshape([-1, 1])) predictions_vas.append(predictions_va_tmp2.reshape([-1, 1])) n_fge += 1 clr.on_batch_end() if not fine_tuning: predictions_va = eval_model(model, valid_iter, device, valid_index_reverse) report_perf(valid_dataset, predictions_va, threshold, idx, epoch) if predictions_te is not None: predictions_te = predictions_te[tb.get_reverse_indexes() ] else: predictions_te = eval_model(model, test_iter, device, tb.get_reverse_indexes()) best_score = -1 best_threshold = None for t in np.arange(0, 1, 0.01): score = f1_score(valid_dataset.targets, predictions_va > t) if score > best_score: best_score = score best_threshold = t print('best threshold on validation set: {:.2f} score {:.4f}'.format(best_threshold, best_score)) if not return_reduced and len(predictions_vas)> 0: predictions_te = np.concatenate(predictions_tes, axis=1) predictions_te = predictions_te[tb.get_reverse_indexes() , :] predictions_va = np.concatenate(predictions_vas, axis=1) return predictions_te, predictions_va, valid_dataset.targets, best_threshold <load_from_csv>
y = train_data["Survived"] features = ["Pclass", "Sex", "SibSp", "Parch", "Fare", "Age"] X = pd.get_dummies(train_data[features]) X_unknown = pd.get_dummies(test_data[features]) my_imputer = SimpleImputer() X = my_imputer.fit_transform(X) X_unknown = my_imputer.fit_transform(X_unknown) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=10 )
Titanic - Machine Learning from Disaster
14,129,375
set_seed(233) epochs = 8 batch_size = 512 learning_rate = 0.001 learning_rate_max_offset = 0.002 fine_tuning_epochs = 2 threshold = 0.31 max_vocab_size = 120000 embed_size = 300 print_every_step = 500 max_seq_len = 70 share = True dropout = 0.1 sub = pd.read_csv('.. /input/sample_submission.csv') train_df, test_df = load_data() trn_idx = np.random.permutation(len(train_df)) train_df = train_df.iloc[trn_idx].reset_index(drop=True) n_folds = 5 n_repeats = 1 args = {'epochs': epochs, 'batch_size': batch_size, 'learning_rate': learning_rate, 'threshold': threshold, 'max_vocab_size': max_vocab_size, 'embed_size': embed_size, 'print_every_step': print_every_step, 'dropout': dropout, 'learning_rate_max_offset': learning_rate_max_offset, 'fine_tuning_epochs': fine_tuning_epochs, 'max_seq_len': max_seq_len} predictions_te_all = np.zeros(( len(test_df),)) for _ in range(n_repeats): if n_folds > 1: _, predictions_te, _, threshold, coeffs = cv(train_df, test_df, n_folds=n_folds, share=share, **args) print('coeff between predictions {}'.format(coeffs)) else: predictions_te, _, _, _ = main(train_df, test_df, test_df, **args) predictions_te_all += predictions_te / n_repeats sub.prediction = predictions_te_all > threshold sub.to_csv("submission.csv", index=False )<init_hyperparams>
optimal_alpha = 1 optimal_accuracy = 0 for i in range(20): model = MLPClassifier(hidden_layer_sizes = [50, 50], alpha = 0.1*(i+1), activation='relu', solver='adam', random_state=1 ).fit(X_train, y_train) model_accuracy = model.score(X_test, y_test) if model_accuracy > optimal_accuracy: optimal_accuracy = model_accuracy optimal_alpha = 0.1*(i+1) print('The optimal alpha found was {}'.format(optimal_alpha)) print('The test accuracy was {} '.format(optimal_accuracy))
Titanic - Machine Learning from Disaster
14,129,375
embed_size = 600 max_features = None maxlen = 57<import_modules>
optimal_estimators = 1 optimal_accuracy = 0 for i in range(20): model = RandomForestClassifier(n_estimators=(i+1)*10, max_depth=5, random_state=1 ).fit(X_train, y_train) model_accuracy = model.score(X_test, y_test) if model_accuracy > optimal_accuracy: optimal_accuracy = model_accuracy optimal_estimators =(i+1)*10 print('The optimal number of estimators found was {}'.format(optimal_estimators)) print('The test accuracy was {} '.format(optimal_accuracy))
Titanic - Machine Learning from Disaster
14,129,375
import os import time import numpy as np import pandas as pd from tqdm import tqdm import math from sklearn.model_selection import train_test_split from sklearn import metrics from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.layers import Dense, Input, CuDNNLSTM, Embedding, Dropout, Activation, CuDNNGRU, Conv1D from keras.layers import Bidirectional, GlobalMaxPool1D, GlobalMaxPooling1D, GlobalAveragePooling1D from keras.layers import Input, Embedding, Dense, Conv2D, MaxPool2D, concatenate, Lambda from keras.layers import Reshape, Flatten, Concatenate, Dropout, SpatialDropout1D, BatchNormalization from keras.optimizers import Adam, Nadam from keras.models import Model from keras import backend as K from keras.callbacks import Callback from keras import initializers, regularizers, constraints, optimizers, layers from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from keras.engine.topology import Layer import gc, re from sklearn import metrics from sklearn.model_selection import GridSearchCV, StratifiedKFold from sklearn.metrics import f1_score, roc_auc_score, confusion_matrix, auc, precision_recall_curve<define_variables>
model = RandomForestClassifier(n_estimators=110, max_depth=5, random_state=1 ).fit(X, y) predictions = model.predict(X_unknown) output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions}) output.to_csv('my_submission.csv', index=False )
Titanic - Machine Learning from Disaster
14,110,105
puncts = [',', '.', '"', ':', ')', '(', '-', '!', '?', '|', ';', "'", '$', '&', '/', '[', ']', '>', '%', '=', ' '·', '_', '{', '}', '©', '^', '®', '`', '<', '→', '°', '€', '™', '›', '♥', '←', '×', '§', '″', '′', 'Â', '█', '½', 'à', '…', '“', '★', '”', '–', '●', 'â', '►', '−', '¢', '²', '¬', '░', '¶', '↑', '±', '¿', '▾', '═', '¦', '║', '―', '¥', '▓', '—', '‹', '─', '▒', ':', '¼', '⊕', '▼', '▪', '†', '■', '’', '▀', '¨', '▄', '♫', '☆', 'é', '¯', '♦', '¤', '▲', 'è', '¸', '¾', 'Ã', '⋅', '‘', '∞', '∙', ')', '↓', '、', '│', '(', '»', ',', '♪', '╩', '╚', '³', '・', '╦', '╣', '╔', '╗', '▬', '❤', 'ï', 'Ø', '¹', '≤', '‡', '√', ] def clean_text(x): x = str(x) for punct in puncts: if punct in x: x = x.replace(punct, f' {punct} ') return x def clean_numbers(x): x = re.sub('[0-9]{5,}', ' x = re.sub('[0-9]{4}', ' x = re.sub('[0-9]{3}', ' x = re.sub('[0-9]{2}', ' return x def _get_mispell(mispell_dict): mispell_re = re.compile('(%s)' % '|'.join(mispell_dict.keys())) return mispell_dict, mispell_re mispell_dict = {"aren't" : "are not", "can't" : "cannot", "couldn't" : "could not", "didn't" : "did not", "doesn't" : "does not", "don't" : "do not", "hadn't" : "had not", "hasn't" : "has not", "haven't" : "have not", "he'd" : "he would", "he'll" : "he will", "he's" : "he is", "i'd" : "I would", "i'd" : "I had", "i'll" : "I will", "i'm" : "I am", "isn't" : "is not", "it's" : "it is", "it'll":"it will", "i've" : "I have", "let's" : "let us", "mightn't" : "might not", "mustn't" : "must not", "shan't" : "shall not", "she'd" : "she would", "she'll" : "she will", "she's" : "she is", "shouldn't" : "should not", "that's" : "that is", "there's" : "there is", "they'd" : "they would", "they'll" : "they will", "they're" : "they are", "they've" : "they have", "we'd" : "we would", "we're" : "we are", "weren't" : "were not", "we've" : "we have", "what'll" : "what will", "what're" : "what are", "what's" : "what is", "what've" : "what have", "where's" : "where is", "who'd" : "who would", "who'll" : "who will", "who're" : "who are", "who's" : "who is", "who've" : "who have", "won't" : "will not", "wouldn't" : "would not", "you'd" : "you would", "you'll" : "you will", "you're" : "you are", "you've" : "you have", "'re": " are", "wasn't": "was not", "we'll":" will", "didn't": "did not"} mispellings, mispellings_re = _get_mispell(mispell_dict) def replace_typical_misspell(text): def replace(match): return mispellings[match.group(0)] return mispellings_re.sub(replace, text )<load_from_csv>
warnings.filterwarnings("ignore")
Titanic - Machine Learning from Disaster
14,110,105
def load_and_prec() : train_df = pd.read_csv(".. /input/train.csv") test_df = pd.read_csv(".. /input/test.csv") print("Train shape : ",train_df.shape) print("Test shape : ",test_df.shape) train_df["question_text"] = train_df["question_text"].progress_apply(lambda x: clean_text(x)) test_df["question_text"] = test_df["question_text"].progress_apply(lambda x: clean_text(x)) train_df["question_text"] = train_df["question_text"].progress_apply(lambda x: clean_numbers(x)) test_df["question_text"] = test_df["question_text"].progress_apply(lambda x: clean_numbers(x)) train_df["question_text"] = train_df["question_text"].progress_apply(lambda x: replace_typical_misspell(x)) test_df["question_text"] = test_df["question_text"].progress_apply(lambda x: replace_typical_misspell(x)) train_X = train_df["question_text"].fillna("_ splits = list(StratifiedKFold(n_splits=10,random_state=2018 ).split(train_X,train_df['target'].values)) test_X = test_df["question_text"].fillna("_ train_X, val_X = train_X[splits[0][0]],train_X[splits[0][1]] train_y = train_df['target'].values[splits[0][0]] val_y = train_df['target'].values[splits[0][1]] tokenizer = Tokenizer(num_words=max_features, filters='', lower=True) tokenizer.fit_on_texts(list(train_X)+list(test_X)) train_X = tokenizer.texts_to_sequences(train_X) val_X = tokenizer.texts_to_sequences(val_X) test_X = tokenizer.texts_to_sequences(test_X) train_X = pad_sequences(train_X, maxlen=maxlen) val_X = pad_sequences(val_X, maxlen=maxlen) test_X = pad_sequences(test_X, maxlen=maxlen) np.random.seed(SEED) trn_idx = np.random.permutation(len(train_X)) val_idx = np.random.permutation(len(val_X)) train_X = train_X[trn_idx] val_X = val_X[val_idx] train_y = train_y[trn_idx] val_y = val_y[val_idx] return train_X, val_X, test_X, train_y, val_y, tokenizer.word_index <load_pretrained>
train=pd.read_csv('.. /input/titanic/train.csv') test=pd.read_csv('.. /input/titanic/test.csv') y_test=pd.read_csv('.. /input/titanic/gender_submission.csv' )
Titanic - Machine Learning from Disaster
14,110,105
def load_glove(word_index): EMBEDDING_FILE = '.. /input/embeddings/glove.840B.300d/glove.840B.300d.txt' emb_mean,emb_std = -0.005838499,0.48782197 embed_size = 300 nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) with open(EMBEDDING_FILE, 'r', encoding="utf8")as f: for line in f: word, vec = line.split(' ', 1) if word not in word_index: continue i = word_index[word] if i >= nb_words: continue embedding_vector = np.asarray(vec.split(' '), dtype='float32')[:300] if len(embedding_vector)== 300: embedding_matrix[i] = embedding_vector return embedding_matrix def load_fasttext(word_index): EMBEDDING_FILE = '.. /input/embeddings/wiki-news-300d-1M/wiki-news-300d-1M.vec' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE)if len(o)>100) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = all_embs.mean() , all_embs.std() embed_size = all_embs.shape[1] nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) for word, i in word_index.items() : if i >= max_features: break embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix def load_para(word_index): EMBEDDING_FILE = '.. /input/embeddings/paragram_300_sl999/paragram_300_sl999.txt' emb_mean,emb_std = -0.0053247833,0.49346462 embed_size = 300 nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) with open(EMBEDDING_FILE, 'r', encoding="utf8", errors='ignore')as f: for line in f: word, vec = line.split(' ', 1) if word not in word_index: continue i = word_index[word] if i >= nb_words: continue embedding_vector = np.asarray(vec.split(' '), dtype='float32')[:300] if len(embedding_vector)== 300: embedding_matrix[i] = embedding_vector return embedding_matrix<compute_train_metric>
p=train.loc[train['Survived']==1 ] print(len(p)) male=p.loc[p['Sex']=='male'] female=p.loc[p['Sex']=='female'] print(male['Pclass'].value_counts()) print(female['Pclass'].value_counts() )
Titanic - Machine Learning from Disaster
14,110,105
def threshold_search(y_true, y_proba): best_threshold = 0 best_score = 0 for threshold in [i * 0.01 for i in range(100)]: score = f1_score(y_true=y_true, y_pred=y_proba > threshold) if score > best_score: best_threshold = threshold best_score = score rocauc = roc_auc_score(y_true, y_proba) p, r, _ = precision_recall_curve(y_true, y_proba) prauc = auc(r, p) search_result = {'threshold': best_threshold, 'f1': best_score, 'rocauc': rocauc, 'prauc': prauc} return search_result<init_hyperparams>
p=train.loc[train['Survived']==0 ] male=p.loc[p['Sex']=='male'] female=p.loc[p['Sex']=='female'] print(male['Pclass'].value_counts()) print(female['Pclass'].value_counts() )
Titanic - Machine Learning from Disaster
14,110,105
class CyclicLR(Callback): def __init__(self, base_lr=0.001, max_lr=0.006, step_size=2000., mode='triangular', gamma=1., scale_fn=None, scale_mode='cycle'): super(CyclicLR, self ).__init__() self.base_lr = base_lr self.max_lr = max_lr self.step_size = step_size self.mode = mode self.gamma = gamma if scale_fn == None: if self.mode == 'triangular': self.scale_fn = lambda x: 1. self.scale_mode = 'cycle' elif self.mode == 'triangular2': self.scale_fn = lambda x: 1/(2.**(x-1)) self.scale_mode = 'cycle' elif self.mode == 'exp_range': self.scale_fn = lambda x: gamma**(x) self.scale_mode = 'iterations' else: self.scale_fn = scale_fn self.scale_mode = scale_mode self.clr_iterations = 0. self.trn_iterations = 0. self.history = {} self._reset() def _reset(self, new_base_lr=None, new_max_lr=None, new_step_size=None): if new_base_lr != None: self.base_lr = new_base_lr if new_max_lr != None: self.max_lr = new_max_lr if new_step_size != None: self.step_size = new_step_size self.clr_iterations = 0. def clr(self): cycle = np.floor(1+self.clr_iterations/(2*self.step_size)) x = np.abs(self.clr_iterations/self.step_size - 2*cycle + 1) if self.scale_mode == 'cycle': return self.base_lr +(self.max_lr-self.base_lr)*np.maximum(0,(1-x)) *self.scale_fn(cycle) else: return self.base_lr +(self.max_lr-self.base_lr)*np.maximum(0,(1-x)) *self.scale_fn(self.clr_iterations) def on_train_begin(self, logs={}): logs = logs or {} if self.clr_iterations == 0: K.set_value(self.model.optimizer.lr, self.base_lr) else: K.set_value(self.model.optimizer.lr, self.clr()) def on_batch_end(self, epoch, logs=None): logs = logs or {} self.trn_iterations += 1 self.clr_iterations += 1 self.history.setdefault('lr', [] ).append(K.get_value(self.model.optimizer.lr)) self.history.setdefault('iterations', [] ).append(self.trn_iterations) for k, v in logs.items() : self.history.setdefault(k, [] ).append(v) K.set_value(self.model.optimizer.lr, self.clr() )<choose_model_class>
feature_mix=[] for i in range(0,len(train)) : marks=0 if(train['Sex'].iloc[i]=='female'): marks=marks+10 if(train['Pclass'].iloc[i]!=1): marks=marks+5 else: marks=marks+2 if(train['Age'].iloc[i]<35 and train['Age'].iloc[i]>20): marks=marks+4 else: marks=marks+2 feature_mix.append(marks) train['feature_mix']=feature_mix train['feature_mix'].value_counts()
Titanic - Machine Learning from Disaster
14,110,105
class Attention(Layer): def __init__(self, step_dim, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs): self.supports_masking = True self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.step_dim = step_dim self.features_dim = 0 super(Attention, self ).__init__(**kwargs) def build(self, input_shape): assert len(input_shape)== 3 self.W = self.add_weight(( input_shape[-1],), initializer=self.init, name='{}_W'.format(self.name), regularizer=self.W_regularizer, constraint=self.W_constraint) self.features_dim = input_shape[-1] if self.bias: self.b = self.add_weight(( input_shape[1],), initializer='zero', name='{}_b'.format(self.name), regularizer=self.b_regularizer, constraint=self.b_constraint) else: self.b = None self.built = True def compute_mask(self, input, input_mask=None): return None def call(self, x, mask=None): features_dim = self.features_dim step_dim = self.step_dim eij = K.reshape(K.dot(K.reshape(x,(-1, features_dim)) , K.reshape(self.W,(features_dim, 1))),(-1, step_dim)) if self.bias: eij += self.b eij = K.tanh(eij) a = K.exp(eij) if mask is not None: a *= K.cast(mask, K.floatx()) a /= K.cast(K.sum(a, axis=1, keepdims=True)+ K.epsilon() , K.floatx()) a = K.expand_dims(a) weighted_input = x * a return K.sum(weighted_input, axis=1) def compute_output_shape(self, input_shape): return input_shape[0], self.features_dim<choose_model_class>
feature_mix=[] for i in range(0,len(test)) : marks=0 if(test['Sex'].iloc[i]=='female'): marks=marks+10 if(test['Pclass'].iloc[i]!=1): marks=marks+5 else: marks=marks+2 if(test['Age'].iloc[i]<35 and test['Age'].iloc[i]>20): marks=marks+4 feature_mix.append(marks) test['feature_mix']=feature_mix test['feature_mix'].value_counts()
Titanic - Machine Learning from Disaster
14,110,105
def model_gru_conv_3(embedding_matrix): inp = Input(shape=(maxlen,)) x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False )(inp) x = SpatialDropout1D(0.2 )(x) x0 = Bidirectional(CuDNNLSTM(128, kernel_initializer=initializers.glorot_uniform(seed = 2018), return_sequences=True))(x) x1 = Bidirectional(CuDNNGRU(64, kernel_initializer=initializers.glorot_uniform(seed = 2018), return_sequences=True))(x0) z = Conv1D(64, kernel_size = 1, kernel_initializer=initializers.he_uniform(seed=2018), activation = "tanh" )(x1) y1 = GlobalMaxPooling1D()(x1) y2 = GlobalMaxPooling1D()(z) x = concatenate([y1,y2]) outp= Dense(1, kernel_initializer=initializers.he_uniform(seed=2018), activation='sigmoid' )(x) model = Model(inputs=inp, outputs=outp) model.compile(loss='binary_crossentropy', optimizer=Adam() , metrics=['accuracy']) return model<categorify>
y_test.head() PassengerId=y_test['PassengerId'] y_test=y_test.drop(['PassengerId'],axis=1 )
Titanic - Machine Learning from Disaster
14,110,105
SEED=2018 def model_RCNN(embedding_matrix, hidden_dim_1=128, hidden_dim_2=64,max_features=max_features): embedding_matrix = np.concatenate([embedding_matrix,np.zeros(( 1,np.shape(embedding_matrix)[1])) ]) print(np.shape(embedding_matrix)) left_context = Input(shape=(maxlen,)) document = Input(shape=(maxlen,)) right_context = Input(shape=(maxlen,)) embedder = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False) doc_embedding = embedder(document) doc_embedding = SpatialDropout1D(0.2 )(doc_embedding) l_embedding = embedder(left_context) l_embedding = SpatialDropout1D(0.2 )(l_embedding) r_embedding = embedder(right_context) r_embedding = SpatialDropout1D(0.2 )(r_embedding) forward = CuDNNLSTM(hidden_dim_1, return_sequences = True )(l_embedding) backward = CuDNNLSTM(hidden_dim_1, return_sequences = True )(r_embedding) backward = Lambda(lambda x: K.reverse(x, axes = 1))(backward) together = concatenate([forward, doc_embedding, backward], axis = 2) semantic = Conv1D(hidden_dim_2*8, kernel_size = 1, kernel_initializer=initializers.he_uniform(seed=SEED), activation = "tanh" )(together) pool_rnn = GlobalMaxPool1D()(semantic) output = Dense(1, kernel_initializer=initializers.he_uniform(seed=SEED), activation = "sigmoid" )(pool_rnn) model = Model(inputs = [left_context, document, right_context], outputs = output) model.compile(optimizer = Adam() , loss = 'binary_crossentropy', metrics = ["accuracy"]) return model<define_search_model>
train_m=(max(train['Age'])+min(train['Age'])) /2 values={'Cabin':'nocabin','Age':train_m,'Embarked':'notknown'} train=train.fillna(value=values) test_m=(max(test['Age'])+min(test['Age'])) /2 print(test_m) values={'Cabin':'nocabin','Age':test_m,'Embarked':'notknown',"Fare":max(test['Fare'])} test=test.fillna(value=values)
Titanic - Machine Learning from Disaster
14,110,105
def model_lstm_atten(embedding_matrix): inp = Input(shape=(maxlen,)) x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False )(inp) x = SpatialDropout1D(0.2 )(x) x0 = Bidirectional(CuDNNLSTM(128, return_sequences=True))(x) x2 = Bidirectional(CuDNNGRU(96, return_sequences=True))(x0) y2 = GlobalMaxPooling1D()(x2) y2 = Dropout(0.1 )(y2) x = Dense(1, activation="sigmoid" )(y2) model = Model(inputs=inp, outputs=x) model.compile(loss='binary_crossentropy', optimizer=Adam() , metrics=['accuracy']) return model<choose_model_class>
y= train["Survived"] train=train.drop(["Survived"],axis=1 )
Titanic - Machine Learning from Disaster
14,110,105
def model_lstm_max(embedding_matrix): inp = Input(shape=(maxlen,)) x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False )(inp) x = SpatialDropout1D(0.2 )(x) x0 = Bidirectional(CuDNNLSTM(128, return_sequences=True))(x) x1 = Bidirectional(CuDNNGRU(64, kernel_initializer=initializers.glorot_uniform(seed = 2018), return_sequences=True))(x0) z = Conv1D(64, kernel_size = 1, kernel_initializer=initializers.he_uniform(seed=2018), activation = "tanh" )(x1) y2 = Attention(maxlen )(z) x = Dense(1, activation="sigmoid" )(y2) model = Model(inputs=inp, outputs=x) model.compile(loss='binary_crossentropy', optimizer=Adam() , metrics=['accuracy']) return model<train_model>
X_train, X_cv, y_train, y_cv = train_test_split(train, y, stratify=y, test_size=0.2,random_state=42 )
Titanic - Machine Learning from Disaster
14,110,105
def get_train_list(train_X): return [np.concatenate(( np.ones(( np.shape(train_X)[0],1)) *max_features+1,train_X[:,1:]),1),train_X,np.concatenate(( np.ones(( np.shape(train_X)[0],1)) *max_features+1,train_X[:,::-1][:,1:]),1)] def RCNN_train_pred(model, epochs=2): train_X_list = get_train_list(train_X) test_X_list=get_train_list(test_X) val_X_list=get_train_list(val_X) for e in range(epochs): model.fit(train_X_list, train_y, batch_size=512, epochs=1, validation_data=(val_X_list, val_y),callbacks=[clr]) pred_val_y = model.predict(val_X_list, batch_size=1024, verbose=0) pred_test_y = model.predict(test_X_list, batch_size=1024, verbose=0) return pred_val_y, pred_test_y<predict_on_test>
vectorizer = CountVectorizer() X_tr_emb =vectorizer.fit_transform(X_train['Embarked']) X_cv_emb =vectorizer.transform(X_cv['Embarked']) X_te_emb =vectorizer.transform(test['Embarked'] )
Titanic - Machine Learning from Disaster
14,110,105
def train_pred(model, epochs=2): for e in range(epochs): model.fit(train_X, train_y, batch_size=512, epochs=1, validation_data=(val_X, val_y),verbose=1,callbacks=[clr]) pred_val_y = model.predict([val_X], batch_size=1024, verbose=0) pred_test_y = model.predict([test_X], batch_size=1024, verbose=0) return pred_val_y, pred_test_y<normalization>
enc = OneHotEncoder(handle_unknown='ignore') X_tr_age =enc.fit_transform(np.array(X_train['Age'] ).reshape(-1,1)) X_cv_age =enc.transform(np.array(X_cv['Age'] ).reshape(-1,1)) X_te_age =enc.transform(np.array(test['Age'] ).reshape(-1,1))
Titanic - Machine Learning from Disaster
14,110,105
train_X, val_X, test_X, train_y, val_y, word_index = load_and_prec() max_features = len(word_index) print(max_features) embedding_matrix_1 = load_glove(word_index) embedding_matrix_2 = load_fasttext(word_index) embedding_matrix = np.concatenate([embedding_matrix_1, embedding_matrix_2], axis = 1) np.shape(embedding_matrix )<define_variables>
X_tr_fare =enc.fit_transform(np.array(X_train['Fare'] ).reshape(-1,1)) X_cv_fare =enc.transform(np.array(X_cv['Fare'] ).reshape(-1,1)) X_te_fare =enc.transform(np.array(test['Fare'] ).reshape(-1,1))
Titanic - Machine Learning from Disaster
14,110,105
outputs = []<choose_model_class>
X_tr_Sbp =enc.fit_transform(np.array(X_train['SibSp'] ).reshape(-1,1)) X_cv_Sbp =enc.transform(np.array(X_cv['SibSp'] ).reshape(-1,1)) X_te_Sbp =enc.transform(np.array(test['SibSp'] ).reshape(-1,1))
Titanic - Machine Learning from Disaster
14,110,105
clr = CyclicLR(base_lr=0.001, max_lr=0.003,step_size=300., mode='exp_range', gamma=0.99994 )<compute_train_metric>
X_tr_par =enc.fit_transform(np.array(X_train['Parch'] ).reshape(-1,1)) X_cv_par =enc.transform(np.array(X_cv['Parch'] ).reshape(-1,1)) X_te_par=enc.transform(np.array(test['Parch'] ).reshape(-1,1))
Titanic - Machine Learning from Disaster