kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
943,976
seed_everything() glove_embeddings = load_glove(word_index) paragram_embeddings = load_para(word_index) fasttext_embeddings = load_fasttext(word_index) embedding_matrix = np.mean([glove_embeddings, paragram_embeddings, fasttext_embeddings], axis=0) del glove_embeddings, paragram_embeddings, fasttext_embeddings gc.collect() np.shape(embedding_matrix )<split>
X_train = df_train[CATEGORY_COLUMNS].fillna(-1000) y_train = df_train["Survived"] X_test = df_test[CATEGORY_COLUMNS].fillna(-1000) randomf=RandomForestClassifier(criterion='gini', n_estimators=700, min_samples_split=10,min_samples_leaf=1,max_features='auto',oob_score=True,random_state=1,n_jobs=-1) randomf.fit(X_train, y_train) Submission['Survived']=randomf.predict(X_test) acc_gd_cv = round(accuracy_score(y_pred, y_val)* 100, 2) print('Accuracy') print(acc_gd_cv) print(Submission.head(10)) Submission.to_csv('finalrandomforest01.csv',sep=',') print('Random Forest prediction created' )
Titanic - Machine Learning from Disaster
943,976
splits = list(StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=SEED ).split(x_train, y_train)) splits[:3]<choose_model_class>
MLA = [ ensemble.ExtraTreesClassifier() , ensemble.GradientBoostingClassifier() , ensemble.RandomForestClassifier() , linear_model.LogisticRegressionCV() , neighbors.KNeighborsClassifier() , svm.SVC(probability=True), ] index = 1 for alg in MLA: predicted = alg.fit(X_train, y_train ).predict(X_test) fp, tp, th = roc_curve(y_test, predicted) roc_auc_mla = auc(fp, tp) MLA_name = alg.__class__.__name__ plt.plot(fp, tp, lw=2, alpha=0.3, label='ROC %s(AUC = %0.2f)' %(MLA_name, roc_auc_mla)) index+=1 plt.title('ROC Curve comparison') plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.plot([0,1],[0,1],'r--') plt.xlim([0,1]) plt.ylim([0,1]) plt.ylabel('True Positive Rate') plt.xlabel('False Positive Rate') plt.show()
Titanic - Machine Learning from Disaster
943,976
class CyclicLR(object): def __init__(self, optimizer, base_lr=1e-3, max_lr=6e-3, step_size=2000, mode='triangular', gamma=1., scale_fn=None, scale_mode='cycle', last_batch_iteration=-1): if not isinstance(optimizer, Optimizer): raise TypeError('{} is not an Optimizer'.format( type(optimizer ).__name__)) self.optimizer = optimizer if isinstance(base_lr, list)or isinstance(base_lr, tuple): if len(base_lr)!= len(optimizer.param_groups): raise ValueError("expected {} base_lr, got {}".format( len(optimizer.param_groups), len(base_lr))) self.base_lrs = list(base_lr) else: self.base_lrs = [base_lr] * len(optimizer.param_groups) if isinstance(max_lr, list)or isinstance(max_lr, tuple): if len(max_lr)!= len(optimizer.param_groups): raise ValueError("expected {} max_lr, got {}".format( len(optimizer.param_groups), len(max_lr))) self.max_lrs = list(max_lr) else: self.max_lrs = [max_lr] * len(optimizer.param_groups) self.step_size = step_size if mode not in ['triangular', 'triangular2', 'exp_range'] \ and scale_fn is None: raise ValueError('mode is invalid and scale_fn is None') self.mode = mode self.gamma = gamma if scale_fn is None: if self.mode == 'triangular': self.scale_fn = self._triangular_scale_fn self.scale_mode = 'cycle' elif self.mode == 'triangular2': self.scale_fn = self._triangular2_scale_fn self.scale_mode = 'cycle' elif self.mode == 'exp_range': self.scale_fn = self._exp_range_scale_fn self.scale_mode = 'iterations' else: self.scale_fn = scale_fn self.scale_mode = scale_mode self.batch_step(last_batch_iteration + 1) self.last_batch_iteration = last_batch_iteration def batch_step(self, batch_iteration=None): if batch_iteration is None: batch_iteration = self.last_batch_iteration + 1 self.last_batch_iteration = batch_iteration for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()): param_group['lr'] = lr def _triangular_scale_fn(self, x): return 1. def _triangular2_scale_fn(self, x): return 1 /(2.**(x - 1)) def _exp_range_scale_fn(self, x): return self.gamma**(x) def get_lr(self): step_size = float(self.step_size) cycle = np.floor(1 + self.last_batch_iteration /(2 * step_size)) x = np.abs(self.last_batch_iteration / step_size - 2 * cycle + 1) lrs = [] param_lrs = zip(self.optimizer.param_groups, self.base_lrs, self.max_lrs) for param_group, base_lr, max_lr in param_lrs: base_height =(max_lr - base_lr)* np.maximum(0,(1 - x)) if self.scale_mode == 'cycle': lr = base_lr + base_height * self.scale_fn(cycle) else: lr = base_lr + base_height * self.scale_fn(self.last_batch_iteration) lrs.append(lr) return lrs <choose_model_class>
REVISED_NUMERIC_COLUMNS=['Pclass','Age','SibSp','Parch','Family_Survival','Alone','Title_Master', 'Title_Miss','Title_Mr', 'Title_Mrs', 'Title_Millitary','Embarked'] SIMPLE_COLUMNS=['Pclass','Age','SibSp','Parch','Family_Survival','Alone','Sex_female','Sex_male','Title_Master', 'Title_Miss','Title_Mr', 'Title_Mrs', 'Title_Millitary','Embarked'] INTERESTING_COLUMNS=['Survived','Pclass','Age','SibSp','Parch','Title','Alone','Mother','Family Size','Family_Survival','Embarked','FareBand','TicketRef'] CATEGORY_COLUMNS=['Pclass','SibSp','Parch','Family Size','Family_Survival','Alone','Mother','Sex_female','Sex_male','AgeBand_Child', 'AgeBand_Young Adult', 'AgeBand_Adult', 'AgeBand_Older Adult', 'AgeBand_Senior','Title_Master', 'Title_Miss','Title_Mr', 'Title_Mrs', 'Title_Millitary','NameBand_1', 'NameBand_2', 'NameBand_3', 'NameBand_4', 'NameBand_5','Embarked','TicketRef_A', 'TicketRef_C', 'TicketRef_F', 'TicketRef_L', 'TicketRef_P', 'TicketRef_S', 'TicketRef_W', 'TicketRef_X','HadCabin','Free'] data_to_train = df_train[REVISED_NUMERIC_COLUMNS].fillna(-1000) data_to_test = df_test[REVISED_NUMERIC_COLUMNS].fillna(-1000) prediction = df_train["Survived"] X_train, X_val, y_train, y_val = train_test_split(data_to_train, prediction, test_size = 0.3,random_state=21, stratify=prediction) print('Data Split' )
Titanic - Machine Learning from Disaster
943,976
embedding_dim = 300 embedding_path = '.. /save/embedding_matrix.npy' use_pretrained_embedding = True hidden_size = 60 gru_len = hidden_size Routings = 4 Num_capsule = 5 Dim_capsule = 5 dropout_p = 0.25 rate_drop_dense = 0.28 LR = 0.001 T_epsilon = 1e-7 num_classes = 30 class Embed_Layer(nn.Module): def __init__(self, embedding_matrix=None, vocab_size=None, embedding_dim=300): super(Embed_Layer, self ).__init__() self.encoder = nn.Embedding(vocab_size + 1, embedding_dim) if use_pretrained_embedding: self.encoder.weight.data.copy_(t.from_numpy(embedding_matrix)) def forward(self, x, dropout_p=0.25): return nn.Dropout(p=dropout_p )(self.encoder(x)) class GRU_Layer(nn.Module): def __init__(self): super(GRU_Layer, self ).__init__() self.gru = nn.GRU(input_size=300, hidden_size=gru_len, bidirectional=True) def init_weights(self): ih =(param.data for name, param in self.named_parameters() if 'weight_ih' in name) hh =(param.data for name, param in self.named_parameters() if 'weight_hh' in name) b =(param.data for name, param in self.named_parameters() if 'bias' in name) for k in ih: nn.init.xavier_uniform_(k) for k in hh: nn.init.orthogonal_(k) for k in b: nn.init.constant_(k, 0) def forward(self, x): return self.gru(x) class Caps_Layer(nn.Module): def __init__(self, input_dim_capsule=gru_len * 2, num_capsule=Num_capsule, dim_capsule=Dim_capsule, \ routings=Routings, kernel_size=(9, 1), share_weights=True, activation='default', **kwargs): super(Caps_Layer, self ).__init__(**kwargs) self.num_capsule = num_capsule self.dim_capsule = dim_capsule self.routings = routings self.kernel_size = kernel_size self.share_weights = share_weights if activation == 'default': self.activation = self.squash else: self.activation = nn.ReLU(inplace=True) if self.share_weights: self.W = nn.Parameter( nn.init.xavier_normal_(t.empty(1, input_dim_capsule, self.num_capsule * self.dim_capsule))) else: self.W = nn.Parameter( t.randn(BATCH_SIZE, input_dim_capsule, self.num_capsule * self.dim_capsule)) def forward(self, x): if self.share_weights: u_hat_vecs = t.matmul(x, self.W) else: print('add later') batch_size = x.size(0) input_num_capsule = x.size(1) u_hat_vecs = u_hat_vecs.view(( batch_size, input_num_capsule, self.num_capsule, self.dim_capsule)) u_hat_vecs = u_hat_vecs.permute(0, 2, 1, 3) b = t.zeros_like(u_hat_vecs[:, :, :, 0]) for i in range(self.routings): b = b.permute(0, 2, 1) c = F.softmax(b, dim=2) c = c.permute(0, 2, 1) b = b.permute(0, 2, 1) outputs = self.activation(t.einsum('bij,bijk->bik',(c, u_hat_vecs))) if i < self.routings - 1: b = t.einsum('bik,bijk->bij',(outputs, u_hat_vecs)) return outputs def squash(self, x, axis=-1): s_squared_norm =(x ** 2 ).sum(axis, keepdim=True) scale = t.sqrt(s_squared_norm + T_epsilon) return x / scale class Capsule_Main(nn.Module): def __init__(self, embedding_matrix=None, vocab_size=None): super(Capsule_Main, self ).__init__() self.embed_layer = Embed_Layer(embedding_matrix, vocab_size) self.gru_layer = GRU_Layer() self.gru_layer.init_weights() self.caps_layer = Caps_Layer() self.dense_layer = Dense_Layer() def forward(self, content): content1 = self.embed_layer(content) content2, _ = self.gru_layer( content1) content3 = self.caps_layer(content2) output = self.dense_layer(content3) return output <normalization>
logreg = LogisticRegression(C=10, solver='newton-cg') logreg.fit(X_train, y_train) y_pred_train_logreg = cross_val_predict(logreg,X_val, y_val) y_pred_test_logreg = logreg.predict(X_test) print('logreg first layer predicted') tree = DecisionTreeClassifier(random_state=8,min_samples_leaf=6, max_features= 7, max_depth= 4, criterion='gini', splitter='best') tree.fit(X_train, y_train) y_pred_train_tree = cross_val_predict(tree,X_val,y_val) y_pred_test_tree = tree.predict(X_test) print('decision tree first layer predicted') randomforest = RandomForestClassifier(random_state=8, n_estimators=15, min_samples_leaf= 4, max_features= 6, max_depth=4,criterion='gini') randomforest.fit(X_train, y_train) y_pred_train_randomforest = cross_val_predict(randomforest, X_val, y_val) y_pred_test_randomforest = randomforest.predict(X_test) print('random forest first layer predicted') gbk = GradientBoostingClassifier(min_samples_leaf=3, max_features= 3, max_depth= 3) gbk.fit(X_train, y_train) y_pred_train_gbk = cross_val_predict(gbk, X_val, y_val) y_pred_test_gbk = gbk.predict(X_test) print('gbk first layer predicted') knn = KNeighborsClassifier(algorithm='auto', leaf_size=36, metric='minkowski',metric_params=None, n_jobs=1, n_neighbors=12, p=2,weights='uniform') knn.fit(X_train, y_train) y_pred_train_knn = cross_val_predict(knn, X_val, y_val) y_pred_test_knn = gbk.predict(X_test) print('knn first layer predicted') clf = SVC(C=3, degree=1, kernel='linear', max_iter=1, shrinking=0) clf.fit(X_train, y_train) y_pred_train_clf = cross_val_predict(clf, X_val, y_val) y_pred_test_clf = clf.predict(X_test) print('clf first layer predicted' )
Titanic - Machine Learning from Disaster
943,976
class Attention(nn.Module): def __init__(self, feature_dim, step_dim, bias=True, **kwargs): super(Attention, self ).__init__(**kwargs) self.supports_masking = True self.bias = bias self.feature_dim = feature_dim self.step_dim = step_dim self.features_dim = 0 weight = torch.zeros(feature_dim, 1) nn.init.xavier_uniform_(weight) self.weight = nn.Parameter(weight) if bias: self.b = nn.Parameter(torch.zeros(step_dim)) def forward(self, x, mask=None): feature_dim = self.feature_dim step_dim = self.step_dim eij = torch.mm( x.contiguous().view(-1, feature_dim), self.weight ).view(-1, step_dim) if self.bias: eij = eij + self.b eij = torch.tanh(eij) a = torch.exp(eij) if mask is not None: a = a * mask a = a / torch.sum(a, 1, keepdim=True)+ 1e-10 weighted_input = x * torch.unsqueeze(a, -1) return torch.sum(weighted_input, 1) class NeuralNet(nn.Module): def __init__(self): super(NeuralNet, self ).__init__() fc_layer = 16 fc_layer1 = 16 self.embedding = nn.Embedding(max_features, embed_size) self.embedding.weight = nn.Parameter(torch.tensor(embedding_matrix, dtype=torch.float32)) self.embedding.weight.requires_grad = False self.embedding_dropout = nn.Dropout2d(0.1) self.lstm = nn.LSTM(embed_size, hidden_size, bidirectional=True, batch_first=True) self.gru = nn.GRU(hidden_size * 2, hidden_size, bidirectional=True, batch_first=True) self.lstm2 = nn.LSTM(hidden_size * 2, hidden_size, bidirectional=True, batch_first=True) self.lstm_attention = Attention(hidden_size * 2, maxlen) self.gru_attention = Attention(hidden_size * 2, maxlen) self.bn = nn.BatchNorm1d(16, momentum=0.5) self.linear = nn.Linear(hidden_size*8+3, fc_layer1) self.relu = nn.ReLU() self.dropout = nn.Dropout(0.1) self.fc = nn.Linear(fc_layer**2,fc_layer) self.out = nn.Linear(fc_layer, 1) self.lincaps = nn.Linear(Num_capsule * Dim_capsule, 1) self.caps_layer = Caps_Layer() def forward(self, x): h_embedding = self.embedding(x[0]) h_embedding = torch.squeeze( self.embedding_dropout(torch.unsqueeze(h_embedding, 0))) h_lstm, _ = self.lstm(h_embedding) h_gru, _ = self.gru(h_lstm) content3 = self.caps_layer(h_gru) content3 = self.dropout(content3) batch_size = content3.size(0) content3 = content3.view(batch_size, -1) content3 = self.relu(self.lincaps(content3)) h_lstm_atten = self.lstm_attention(h_lstm) h_gru_atten = self.gru_attention(h_gru) avg_pool = torch.mean(h_gru, 1) max_pool, _ = torch.max(h_gru, 1) f = torch.tensor(x[1], dtype=torch.float ).cuda() conc = torch.cat(( h_lstm_atten, h_gru_atten,content3, avg_pool, max_pool,f), 1) conc = self.relu(self.linear(conc)) conc = self.bn(conc) conc = self.dropout(conc) out = self.out(conc) return out<categorify>
votingC = VotingClassifier(estimators=[('logreg', logreg_cv.best_estimator_),('gbk', gbk_cv.best_estimator_), ('tree', tree_cv.best_estimator_),('randomforest',randomforest_cv.best_estimator_),('knn',knn_cv.best_estimator_)], voting='soft', n_jobs=4) votingC = votingC.fit(X_train, y_train) Submission['Survived'] = votingC.predict(X_test) Submission.to_csv('Votingclassifier02.csv',sep=',') print('Voting Classifier Ensemble File created') print(Submission.head() )
Titanic - Machine Learning from Disaster
943,976
class MyDataset(Dataset): def __init__(self,dataset): self.dataset = dataset def __getitem__(self, index): data, target = self.dataset[index] return data, target, index def __len__(self): return len(self.dataset )<define_variables>
second_layer_train = pd.DataFrame({'Logistic Regression': y_pred_train_logreg.ravel() , 'Gradient Boosting': y_pred_train_gbk.ravel() , 'Decision Tree': y_pred_train_tree.ravel() , 'Random Forest': y_pred_train_randomforest.ravel() }) X_train_second = np.concatenate(( y_pred_train_logreg.reshape(-1, 1), y_pred_train_gbk.reshape(-1, 1), y_pred_train_tree.reshape(-1, 1), y_pred_train_randomforest.reshape(-1, 1)) , axis=1) X_test_second = np.concatenate(( y_pred_test_logreg.reshape(-1, 1), y_pred_test_gbk.reshape(-1, 1), y_pred_test_tree.reshape(-1, 1), y_pred_test_randomforest.reshape(-1, 1)) , axis=1) tree = DecisionTreeClassifier(random_state=8,min_samples_leaf=6, max_depth= 4, criterion='gini' ).fit(X_train_second,y_val) Submission['Survived'] = tree.predict(X_test_second) print(Submission.head()) print('Tuned Ensemble model prediction complete' )
Titanic - Machine Learning from Disaster
943,976
<data_type_conversions><EOS>
Submission.to_csv('tunedensemblesubmission04.csv',sep=',') print('tuned Ensemble File created' )
Titanic - Machine Learning from Disaster
5,214,044
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<compute_test_metric>
train_data = pd.read_csv('/kaggle/input/train.csv') test_data = pd.read_csv('/kaggle/input/test.csv' )
Titanic - Machine Learning from Disaster
5,214,044
def bestThresshold(y_train,train_preds): tmp = [0,0,0] delta = 0 for tmp[0] in tqdm(np.arange(0.1, 0.501, 0.01)) : tmp[1] = f1_score(y_train, np.array(train_preds)>tmp[0]) if tmp[1] > tmp[2]: delta = tmp[0] tmp[2] = tmp[1] print('best threshold is {:.4f} with F1 score: {:.4f}'.format(delta, tmp[2])) return delta delta = bestThresshold(y_train,train_preds )<save_to_csv>
full_data = [train_data, test_data] for idx, dataset in enumerate(full_data): dataset = pd.concat([pd.get_dummies(dataset['Pclass'], prefix='Pclass'), dataset], axis=1) dataset = dataset.drop(['Pclass', 'Pclass_3'], axis=1) dataset['Title'] = dataset['Name'].apply(get_title) dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') dataset['Title'] = dataset['Title'].replace('Ms', 'Miss') dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5} dataset['Title'] = dataset['Title'].map(title_mapping) dataset['Title'] = dataset['Title'].fillna(0) dataset = dataset.drop(['Name'], axis=1) dataset = pd.concat([pd.get_dummies(dataset['Sex'], prefix='Sex'), dataset], axis=1) dataset = dataset.drop(['Sex', 'Sex_male'], axis=1) dataset['Age'].fillna(dataset['Age'].median() , inplace = True) dataset['AgeBin'] = pd.cut(dataset['Age'].astype(int), 5, labels=[1, 2, 3, 4, 5]) dataset = dataset.drop(['Age'], axis=1) dataset = dataset.drop(['Embarked'], axis=1) dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1 dataset['IsAlone'] = 1 dataset['IsAlone'].loc[dataset['FamilySize'] > 1] = 0 dataset = dataset.drop(['SibSp', 'Parch'], axis=1) dataset = dataset.drop(['Fare'], axis=1) dataset = dataset.drop(['Cabin'], axis=1) dataset = dataset.drop(['Ticket'], axis=1) if idx == 0: X_train, y_train = dataset.loc[:, dataset.columns != 'Survived'], dataset.loc[:, dataset.columns == 'Survived'] else: submission_id = dataset['PassengerId'] X_test = dataset
Titanic - Machine Learning from Disaster
5,214,044
submission = df_test[['qid']].copy() submission['prediction'] =(test_preds > delta ).astype(int) submission.to_csv('submission.csv', index=False )<load_from_csv>
kf = KFold(n_splits=5, random_state = 0) clf = clf = RandomForestClassifier(n_estimators=400, max_depth=4, min_samples_split=4, random_state=0) for train_index, test_index in kf.split(X_train): __kf_X_train, __kf_X_test = X_train.values[train_index], X_train.values[test_index] __kf_y_train, __kf_y_test = y_train.values[train_index].ravel() , y_train.values[test_index].ravel() clf.fit(__kf_X_train, __kf_y_train) print(accuracy_score(__kf_y_test, clf.predict(__kf_X_test)) )
Titanic - Machine Learning from Disaster
5,214,044
<feature_engineering><EOS>
y_pred = pd.Series(clf.predict(X_test)) submission = pd.concat([submission_id, y_pred], axis=1) submission = submission.rename(columns={0:'Survived'}) submission.to_csv('submisson.csv', index=False )
Titanic - Machine Learning from Disaster
6,678,936
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<train_model>
%matplotlib inline warnings.filterwarnings("ignore" )
Titanic - Machine Learning from Disaster
6,678,936
class EarlyStopping: def __init__(self, patience=7, verbose=False): self.patience = patience self.verbose = verbose self.counter = 0 self.best_score = None self.early_stop = False self.val_loss_min = np.Inf def __call__(self, val_loss, model): score = -val_loss if self.best_score is None: self.best_score = score if self.verbose: print(f'Validation loss decreased({self.val_loss_min:.6f} --> {val_loss:.6f} ).Saving model...') self.save_checkpoint(val_loss, model) elif score < self.best_score: self.counter += 1 if self.verbose: print(f'EarlyStopping counter: {self.counter} out of {self.patience}') if self.counter >= self.patience: self.early_stop = True else: self.best_score = score self.save_checkpoint(val_loss, model) self.counter = 0 def save_checkpoint(self, val_loss, model): if self.verbose: print(f'Validation loss decreased({self.val_loss_min:.6f} --> {val_loss:.6f} ).Saving model...') torch.save(model.state_dict() , 'checkpoint.pt') self.val_loss_min = val_loss<set_options>
path = ".. /input/titanic/"
Titanic - Machine Learning from Disaster
6,678,936
torch.cuda.init() torch.cuda.empty_cache() print('CUDA MEM:',torch.cuda.memory_allocated()) print('cuda:', torch.cuda.is_available()) print('cude index:',torch.cuda.current_device()) batch_size = 512 print('batch_size:',batch_size) print('---') train_loader = torchtext.data.BucketIterator(dataset=train, batch_size=batch_size, shuffle=True, sort=False) val_loader = torchtext.data.BucketIterator(dataset=val, batch_size=batch_size, shuffle=False, sort=False) test_loader = torchtext.data.BucketIterator(dataset=test, batch_size=batch_size, shuffle=False, sort=False) class Sentiment(nn.Module): def __init__(self,vocab_vectors,padding_idx,batch_size): super(Sentiment,self ).__init__() print('Vocab vectors size:',vocab_vectors.shape) self.batch_size = batch_size self.hidden_dim = 128 self.embedding = nn.Embedding.from_pretrained(vocab_vectors) self.embedding.weight.requires_grad = False self.embedding.padding_idx = padding_idx self.cnns = nn.ModuleList([nn.Conv1d(in_channels=vocab_vectors.shape[1], out_channels=32, kernel_size=k)for k in [3,4,5]]) self.lstm = nn.LSTM(input_size=32, hidden_size=self.hidden_dim, bidirectional=True, batch_first=True) self.linear1 = nn.Linear(2*self.hidden_dim,self.hidden_dim) self.linear2 = nn.Linear(self.hidden_dim,1) self.dropout = nn.Dropout(0.2) @staticmethod def conv_and_max_pool(x, conv): return F.max_pool1d(F.elu(conv(x)) ,1 ).permute(0, 2, 1) def forward(self,x): hidden =(torch.zeros(2, x.shape[0], self.hidden_dim ).cuda() , torch.zeros(2, x.shape[0], self.hidden_dim ).cuda()) e = self.embedding(x) e = e.permute(0,2,1) cnn_outs = [] for conv in self.cnns: f =self.conv_and_max_pool(e,conv) cnn_outs.append(f) out = torch.cat(cnn_outs, dim=1 ).cuda() _, hidden = self.lstm(out,hidden) out = torch.cat(( hidden[0][-2,:,:], hidden[0][-1,:,:]), dim=1 ).cuda() return self.linear2(self.dropout(F.relu(self.linear1(out)))) model = Sentiment(text.vocab.vectors, padding_idx=text.vocab.stoi[text.pad_token], batch_size=batch_size ).cuda() print(model) print('-'*80) early_stopping = EarlyStopping(patience=2,verbose=True) loss_function = nn.BCEWithLogitsLoss().cuda() optimizer = optim.Adam(model.parameters() ,lr=1e-3) def get_lr(optimizer): for param_group in optimizer.param_groups: return param_group['lr'] losses = [] val_losses=[] epoch_acc=[] epoch_val_acc=[] lrs = [] for epoch in range(100): epoch_losses=[] epoch_val_losses = [] preds = [] val_preds=[] targets = [] acc = [] model.train() for batch,train_batch in enumerate(list(iter(train_loader)) ,1): optimizer.zero_grad() y_pred = model(train_batch.text.cuda() ).squeeze(1) y_numpy_pred =torch.sigmoid(y_pred ).cpu().detach().numpy() preds += y_numpy_pred.tolist() y_true = train_batch.target.float().cuda() y_numpy_true = train_batch.target.cpu().detach().numpy() targets += y_numpy_true.tolist() loss = loss_function(y_pred,y_true) epoch_losses.append(loss.item()) loss.backward() optimizer.step() lrs.append(get_lr(optimizer)) acc.append(accuracy_score(y_numpy_true,np.round(y_numpy_pred))) if batch % 100 == 0: print('\rtraining(batch,loss,acc)| ',batch,' ===>',loss.item() ,' acc ',np.mean(acc),end='') losses.append(np.mean(epoch_losses)) targets = np.array(targets) preds = np.array(preds) search_result = threshold_search(targets, preds) train_f1 = search_result['f1'] epoch_acc.append(np.mean(acc)) targets = [] val_acc=[] model.eval() with torch.no_grad() : for batch,val_batch in enumerate(list(val_loader),1): y_pred = model(val_batch.text.cuda() ).squeeze(1) y_numpy_pred = torch.sigmoid(y_pred ).cpu().detach().numpy() val_preds += y_numpy_pred.tolist() y_true = val_batch.target.float().cuda() y_numpy_true = val_batch.target.cpu().detach().numpy() targets += y_numpy_true.tolist() val_loss = loss_function(y_pred,y_true) epoch_val_losses.append(val_loss.item()) val_acc.append(accuracy_score(y_numpy_true,np.round(y_numpy_pred))) if batch % 100 == 0: print('\rvalidation(batch,acc)| ',batch,' ===>', np.mean(val_acc),end='') val_losses.append(np.mean(epoch_val_losses)) epoch_val_acc.append(np.mean(val_acc)) targets = np.array(targets) val_preds = np.array(val_preds) search_result = threshold_search(targets, val_preds) val_f1 = search_result['f1'] print(' EPOCH: ',epoch,' has acc of ',epoch_acc[-1],' ,has loss of ',losses[-1], ' ,f1 of ',train_f1,' val acc of ',epoch_val_acc[-1],' ,val loss of ',val_losses[-1],' ,val f1 of ',val_f1) print('-'*80) if early_stopping.early_stop: print("Early stopping at ",epoch," epoch") break else: early_stopping(1.-val_f1, model) print('Training finished.... ' )<load_pretrained>
cv_n_split = 2 random_state = 0 test_train_split_part = 0.15
Titanic - Machine Learning from Disaster
6,678,936
print(os.listdir()) model = Sentiment(text.vocab.vectors, padding_idx=text.vocab.stoi[text.pad_token], batch_size=batch_size ).cuda() model.load_state_dict(torch.load('checkpoint.pt'))<init_hyperparams>
metrics_all = {1 : 'r2_score', 2: 'acc', 3 : 'rmse', 4 : 're'} metrics_now = [1, 2, 3, 4]
Titanic - Machine Learning from Disaster
6,678,936
print('Threshold:',search_result['threshold']) submission_list = list(torchtext.data.BucketIterator(dataset=submission_x, batch_size=batch_size, sort=False, train=False)) pred = [] with torch.no_grad() : for submission_batch in submission_list: model.eval() x = submission_batch.text.cuda() pred += torch.sigmoid(model(x ).squeeze(1)).cpu().data.numpy().tolist() pred = np.array(pred) df_subm = pd.DataFrame() df_subm['qid'] = [qid.vocab.itos[j] for i in submission_list for j in i.qid.view(-1 ).numpy() ] df_subm['prediction'] =(pred > search_result['threshold'] ).astype(int) print(df_subm.head()) df_subm.to_csv('submission.csv', index=False )<set_options>
traindf = pd.read_csv(path + 'train.csv' ).set_index('PassengerId') testdf = pd.read_csv(path + 'test.csv' ).set_index('PassengerId') submission = pd.read_csv(path + 'gender_submission.csv' )
Titanic - Machine Learning from Disaster
6,678,936
def seed_everything(seed=1234): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True seed_everything(6017) print('Seeding done...' )<load_from_csv>
target_name = 'Survived'
Titanic - Machine Learning from Disaster
6,678,936
text = torchtext.data.Field(lower=True, batch_first=True, tokenize=word_tokenize, fix_length=100) qid = torchtext.data.Field() target = torchtext.data.Field(sequential=False, use_vocab=False, is_target=True) train_dataset = torchtext.data.TabularDataset(path='.. /input/train.csv', format='csv', fields={'question_text':('text',text), 'target':('target',target)}) train, test = train_dataset.split(split_ratio=[0.8,0.2],stratified=True,strata_field='target',random_state=random.getstate()) submission_x = torchtext.data.TabularDataset(path='.. /input/test.csv', format='csv', fields={'qid':('qid', qid), 'question_text':('text', text)}) text.build_vocab(train_dataset, submission_x, min_freq=3) qid.build_vocab(submission_x) print('train dataset len:',len(train_dataset)) print('train len:',len(train)) print('test len:',len(test))<feature_engineering>
df = pd.concat([traindf, testdf], axis=0, sort=False) df['Title'] = df.Name.str.split(',' ).str[1].str.split('.' ).str[0].str.strip() df['Title'] = df.Name.str.split(',' ).str[1].str.split('.' ).str[0].str.strip() df['IsWomanOrBoy'] =(( df.Title == 'Master')|(df.Sex == 'female')) df['LastName'] = df.Name.str.split(',' ).str[0] family = df.groupby(df.LastName ).Survived df['WomanOrBoyCount'] = family.transform(lambda s: s[df.IsWomanOrBoy].fillna(0 ).count()) df['WomanOrBoyCount'] = df.mask(df.IsWomanOrBoy, df.WomanOrBoyCount - 1, axis=0) df['FamilySurvivedCount'] = family.transform(lambda s: s[df.IsWomanOrBoy].fillna(0 ).sum()) df['FamilySurvivedCount'] = df.mask(df.IsWomanOrBoy, df.FamilySurvivedCount - \ df.Survived.fillna(0), axis=0) df['WomanOrBoySurvived'] = df.FamilySurvivedCount / df.WomanOrBoyCount.replace(0, np.nan) df.WomanOrBoyCount = df.WomanOrBoyCount.replace(np.nan, 0) df['Alone'] =(df.WomanOrBoyCount == 0 )
Titanic - Machine Learning from Disaster
6,678,936
glove = torchtext.vocab.Vectors('.. /input/embeddings/glove.840B.300d/glove.840B.300d.txt') text.vocab.set_vectors(glove.stoi, glove.vectors, dim=300 )<define_variables>
df['Title'] = df['Title'].replace('Ms','Miss') df['Title'] = df['Title'].replace('Mlle','Miss') df['Title'] = df['Title'].replace('Mme','Mrs') df['Embarked'] = df['Embarked'].fillna('S') med_fare = df.groupby(['Pclass', 'Parch', 'SibSp'] ).Fare.median() [3][0][0] df['Fare'] = df['Fare'].fillna(med_fare) df['famous_cabin'] = df["Cabin"].apply(lambda x: 0 if type(x)== float else 1) df['Deck'] = df['Cabin'].apply(lambda s: s[0] if pd.notnull(s)else 'M') df.loc[(df['Deck'] == 'T'), 'Deck'] = 'A' df['Family_Size'] = df['SibSp'] + df['Parch'] + 1 df['Name_length'] = df['Name'].apply(len) df.WomanOrBoySurvived = df.WomanOrBoySurvived.fillna(0) df.WomanOrBoyCount = df.WomanOrBoyCount.fillna(0) df.FamilySurvivedCount = df.FamilySurvivedCount.fillna(0) df.Alone = df.Alone.fillna(0 )
Titanic - Machine Learning from Disaster
6,678,936
batch_size = 512 print('batch_size:',batch_size) print('---') train_loader = torchtext.data.BucketIterator(dataset=train, batch_size=batch_size, shuffle=True, sort=False) test_loader = torchtext.data.BucketIterator(dataset=test, batch_size=batch_size, shuffle=False, sort=False) <set_options>
cols_to_drop = ['Name','Ticket','Cabin'] df = df.drop(cols_to_drop, axis=1 )
Titanic - Machine Learning from Disaster
6,678,936
torch.cuda.init() torch.cuda.empty_cache() print('CUDA MEM:',torch.cuda.memory_allocated()) print('cuda:', torch.cuda.is_available()) print('cude index:',torch.cuda.current_device()) class SentimentLSTM(nn.Module): def __init__(self,vocab_vectors,padding_idx,batch_size): super(SentimentLSTM,self ).__init__() print('Vocab vectors size:',vocab_vectors.shape) self.batch_size = batch_size self.hidden_dim = 128 self.n_layers = 2 self.embedding = nn.Embedding.from_pretrained(vocab_vectors) self.embedding.weight.requires_grad = False self.embedding.padding_idx = padding_idx self.lstm = nn.LSTM(input_size=vocab_vectors.shape[1], hidden_size=self.hidden_dim, bidirectional=True,batch_first=True) self.linear1 = nn.Linear(self.n_layers*self.hidden_dim,self.hidden_dim) self.linear2 = nn.Linear(self.hidden_dim,1) self.dropout = nn.Dropout(0.2) def forward(self,x): hidden =(torch.zeros(self.n_layers, x.shape[0], self.hidden_dim ).cuda() , torch.zeros(self.n_layers, x.shape[0], self.hidden_dim ).cuda()) e = self.embedding(x) _, hidden = self.lstm(e, hidden) out = torch.cat(( hidden[0][-2,:,:], hidden[0][-1,:,:]), dim=1 ).cuda() out = self.linear1(out) return self.linear2(self.dropout(F.relu(out))) class SentimentBase(nn.Module): def __init__(self): super(SentimentBase,self ).__init__() self.embedding = nn.Embedding(75966,300) self.linear1 = nn.Linear(300*100,128) self.linear2 = nn.Linear(128,1) def forward(self,x): emb = self.embedding(x) pooled = emb.reshape(( emb.shape[0],emb.shape[1]*emb.shape[2])) out = self.linear1(pooled) out = self.linear2(F.relu(out)) return out class SentimentCNN(nn.Module): def __init__(self,vocab_vectors,padding_idx,batch_size): super(SentimentCNN,self ).__init__() print('Vocab vectors size:',vocab_vectors.shape) self.batch_size = batch_size self.hidden_dim = 128 self.embedding = nn.Embedding.from_pretrained(vocab_vectors) self.embedding.weight.requires_grad = False self.embedding.padding_idx = padding_idx self.cnns = nn.ModuleList([nn.Conv1d(in_channels=vocab_vectors.shape[1], out_channels=self.hidden_dim, kernel_size=k)for k in [3,4,5]]) self.linear1 = nn.Linear(3*self.hidden_dim,self.hidden_dim) self.linear2 = nn.Linear(self.hidden_dim,1) self.dropout = nn.Dropout(0.2) @staticmethod def conv_and_max_pool(x, conv): return F.relu(conv(x ).permute(0, 2, 1 ).max(1)[0]) def forward(self,x): e = self.embedding(x) e = e.permute(0,2,1) cnn_outs = [] for conv in self.cnns: f =self.conv_and_max_pool(e,conv) cnn_outs.append(f) out = torch.cat(cnn_outs, dim=1 ).cuda() out = self.linear1(out) return self.linear2(self.dropout(F.relu(out))) class SentimentGRU(nn.Module): def __init__(self,vocab_vectors,padding_idx,batch_size): super(SentimentGRU,self ).__init__() print('Vocab vectors size:',vocab_vectors.shape) self.batch_size = batch_size self.hidden_dim = 128 self.n_layers = 2 self.embedding = nn.Embedding.from_pretrained(vocab_vectors) self.embedding.weight.requires_grad = False self.embedding.padding_idx = padding_idx self.gru = nn.GRU(input_size=vocab_vectors.shape[1], hidden_size=self.hidden_dim, bidirectional=True,batch_first=True) self.linear1 = nn.Linear(self.n_layers*self.hidden_dim,self.hidden_dim) self.linear2 = nn.Linear(self.hidden_dim,1) self.dropout = nn.Dropout(0.2) def forward(self,x): hidden = torch.zeros(self.n_layers, x.shape[0], self.hidden_dim ).cuda() e = self.embedding(x) _, hidden = self.gru(e, hidden) out = torch.cat(( hidden[-2,:,:], hidden[-1,:,:]), dim=1 ).cuda() out = self.linear1(out) return self.linear2(self.dropout(F.relu(out))) def train(model,filename,epochs=3): loss_function = nn.BCEWithLogitsLoss().cuda() optimizer = optim.Adam(model.parameters()) for epoch in range(epochs): model.train() avg_loss = 0 for batch,train_batch in enumerate(list(iter(train_loader)) ,1): optimizer.zero_grad() y_pred = model(train_batch.text.cuda() ).squeeze(1) y_true = train_batch.target.float().cuda() loss = loss_function(y_pred,y_true) avg_loss += loss.item() loss.backward() optimizer.step() print('EPOCH: ',epoch,': ',avg_loss/batch) print('-'*80) torch.save(model.state_dict() , filename) print('Training finished.... ') <choose_model_class>
Y = df.Survived.loc[traindf.index].astype(int) X_train, X_test = df.loc[traindf.index], df.loc[testdf.index] X_test = X_test.drop(['Survived'], axis = 1 )
Titanic - Machine Learning from Disaster
6,678,936
model = SentimentLSTM(text.vocab.vectors, padding_idx=text.vocab.stoi[text.pad_token], batch_size=batch_size ).cuda() print(model) print('-'*80) train(model,'lstm.pt',3) print('-'*80 )<train_model>
print(X_train.isnull().sum())
Titanic - Machine Learning from Disaster
6,678,936
model = SentimentBase().cuda() print(model) print('-'*80) train(model,'base.pt',5) print('-'*80 )<train_on_grid>
numerics = ['int8', 'int16', 'int32', 'int64', 'float16', 'float32', 'float64'] categorical_columns = [] features = X_train.columns.values.tolist() for col in features: if X_train[col].dtype in numerics: continue categorical_columns.append(col) categorical_columns
Titanic - Machine Learning from Disaster
6,678,936
model = SentimentCNN(text.vocab.vectors, padding_idx=text.vocab.stoi[text.pad_token], batch_size=batch_size ).cuda() print(model) print('-'*80) train(model,'cnn.pt',3) print('-'*80 )<train_on_grid>
for col in categorical_columns: if col in X_train.columns: le = LabelEncoder() le.fit(list(X_train[col].astype(str ).values)+ list(X_test[col].astype(str ).values)) X_train[col] = le.transform(list(X_train[col].astype(str ).values)) X_test[col] = le.transform(list(X_test[col].astype(str ).values))
Titanic - Machine Learning from Disaster
6,678,936
model = SentimentGRU(text.vocab.vectors, padding_idx=text.vocab.stoi[text.pad_token], batch_size=batch_size ).cuda() print(model) print('-'*80) train(model,'gru.pt',3) print('-'*80 )<choose_model_class>
X_train = X_train.reset_index() X_test = X_test.reset_index() X_dropna_categor = X_train.dropna().astype(int) Xtest_dropna_categor = X_test.dropna().astype(int )
Titanic - Machine Learning from Disaster
6,678,936
def disable_grad(layer): for p in layer.parameters() : p.requires_grad=False class Ensemble(nn.Module): def __init__(self,vocab_vectors,padding_idx,batch_size): super(Ensemble,self ).__init__() self.lstm = SentimentLSTM(text.vocab.vectors, padding_idx=text.vocab.stoi[text.pad_token], batch_size=batch_size ).cuda() self.lstm.load_state_dict(torch.load('lstm.pt')) disable_grad(self.lstm) self.gru = SentimentGRU(text.vocab.vectors, padding_idx=text.vocab.stoi[text.pad_token], batch_size=batch_size ).cuda() self.gru.load_state_dict(torch.load('gru.pt')) disable_grad(self.gru) self.cnn = SentimentCNN(text.vocab.vectors, padding_idx=text.vocab.stoi[text.pad_token], batch_size=batch_size ).cuda() self.cnn.load_state_dict(torch.load('cnn.pt')) disable_grad(self.cnn) self.base = SentimentBase().cuda() self.base.load_state_dict(torch.load('base.pt')) disable_grad(self.base) self.l_in = nn.Linear(4,1024,bias=False) self.l_out = nn.Linear(1024,1,bias=False) def forward(self,x): o1 = self.lstm(x) o2 = self.gru(x) o3 = self.cnn(x) o4 = self.base(x) out = torch.cat([o1,o2,o3,o4],1) return self.l_out(F.relu(self.l_in(out))) model = Ensemble(text.vocab.vectors, padding_idx=text.vocab.stoi[text.pad_token], batch_size=batch_size ).cuda() print(model) print('-'*80) train(model,'ensemble.pt', 3) print('-'*80 )<load_pretrained>
Sex_female_Survived = X_dropna_categor.loc[(X_dropna_categor.Sex == 0)&(X_dropna_categor.Survived == 1)] Sex_female_NoSurvived = X_dropna_categor.loc[(X_dropna_categor.Sex == 0)&(X_dropna_categor.Survived == 0)] X_Sex_male_Survived = X_dropna_categor.loc[(X_dropna_categor.Sex == 1)&(X_dropna_categor.Survived == 1)] X_Sex_male_NoSurvived = X_dropna_categor.loc[(X_dropna_categor.Sex == 1)&(X_dropna_categor.Survived == 0)] X_test_male = Xtest_dropna_categor.loc[Xtest_dropna_categor.Sex == 1] X_test_female = Xtest_dropna_categor.loc[Xtest_dropna_categor.Sex == 0] female_Survived_mean, female_NoSurvived_mean = Sex_female_Survived['Age'].mean() , Sex_female_NoSurvived['Age'].mean() male_Survived_mean, male_NoSurvived_mean = X_Sex_male_Survived['Age'].mean() , X_Sex_male_NoSurvived['Age'].mean() female_Survived_std, female_NoSurvived_std = Sex_female_Survived['Age'].std() , Sex_female_NoSurvived['Age'].std() male_Survived_std, male_NoSurvived_std = X_Sex_male_Survived['Age'].std() , X_Sex_male_NoSurvived['Age'].std() female_std, female_mean = X_test_female['Age'].std() , X_test_female['Age'].mean() male_std, male_mean = X_test_male['Age'].std() , X_test_male['Age'].mean() X_train['Survived'] = X_train['Survived'].astype(int )
Titanic - Machine Learning from Disaster
6,678,936
print(os.listdir()) model = Ensemble(text.vocab.vectors, padding_idx=text.vocab.stoi[text.pad_token], batch_size=batch_size ).cuda() model.load_state_dict(torch.load('ensemble.pt')) <init_hyperparams>
def derf(sample, mean, std): age_shape = sample['Age'].shape[0] if age_shape > 0: standard_error_ofthe_mean = std / math.sqrt(age_shape) random_mean = round(random.uniform(mean-(1.96*standard_error_ofthe_mean), mean+(1.96*standard_error_ofthe_mean)) , 2) else: random_mean = 0 return random_mean
Titanic - Machine Learning from Disaster
6,678,936
print('Threshold:',search_result['threshold']) submission_list = list(torchtext.data.BucketIterator(dataset=submission_x, batch_size=batch_size, sort=False, train=False)) pred = [] with torch.no_grad() : for submission_batch in submission_list: model.eval() x = submission_batch.text.cuda() pred += torch.sigmoid(model(x ).squeeze(1)).cpu().data.numpy().tolist() pred = np.array(pred) df_subm = pd.DataFrame() df_subm['qid'] = [qid.vocab.itos[j] for i in submission_list for j in i.qid.view(-1 ).numpy() ] df_subm['prediction'] =(pred > search_result['threshold'] ).astype(int) print(df_subm.head()) df_subm.to_csv('submission.csv', index=False )<import_modules>
for i in X_train.loc[(X_train['Sex']==0)&(X_train['Survived']==1)&(X_train['Age'].isnull())].index: X_train.at[i, 'Age'] = derf(Sex_female_Survived, female_Survived_mean, female_Survived_std) for h in X_train.loc[(X_train['Sex']==0)&(X_train['Survived']==0)&(X_train['Age'].isnull())].index: X_train.at[h, 'Age'] = derf(Sex_female_NoSurvived, female_NoSurvived_mean, female_NoSurvived_std) for l in X_train.loc[(X_train['Sex']==1)&(X_train['Survived']==1)&(X_train['Age'].isnull())].index: X_train.at[l, 'Age'] = derf(X_Sex_male_Survived, male_Survived_mean, male_Survived_std) for b in X_train.loc[(X_train['Sex']==1)&(X_train['Survived']==0)&(X_train['Age'].isnull())].index: X_train.at[b, 'Age'] = derf(X_Sex_male_NoSurvived, male_NoSurvived_mean, male_NoSurvived_std) for p in X_test.loc[(X_test['Sex']==1)&(X_test['Age'].isnull())].index: X_test.at[p, 'Age'] = derf(X_test_male, male_mean, male_std) for y in X_test.loc[(X_test['Sex']==0)&(X_test['Age'].isnull())].index: X_test.at[y, 'Age'] = derf(X_test_female, female_mean, female_std )
Titanic - Machine Learning from Disaster
6,678,936
tqdm.pandas(desc='Progress') <define_variables>
X_train = X_train.drop(['Survived'], axis = 1 )
Titanic - Machine Learning from Disaster
6,678,936
embed_size = 300 max_features = 120000 maxlen = 70 batch_size = 512 n_epochs = 5 n_splits = 5 SEED = 1029<set_options>
print(X_train.isnull().sum()) print(X_test.isnull().sum() )
Titanic - Machine Learning from Disaster
6,678,936
def seed_everything(seed=1029): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True seed_everything()<features_selection>
def fe_creation(df): df['Age2'] = df['Age']//10 df['Fare2'] = df['Fare']//10 for i in ['Sex', 'Family_Size', 'Fare2','Alone', 'famous_cabin']: for j in ['Age2','Title', 'Embarked', 'Deck']: df[i + "_" + j] = df[i].astype('str')+ "_" + df[j].astype('str') return df X_train = fe_creation(X_train) X_test = fe_creation(X_test )
Titanic - Machine Learning from Disaster
6,678,936
def load_glove(word_index): EMBEDDING_FILE = '.. /input/embeddings/glove.840B.300d/glove.840B.300d.txt' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')[:300] embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE)) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = -0.005838499,0.48782197 embed_size = all_embs.shape[1] nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix def load_fasttext(word_index): EMBEDDING_FILE = '.. /input/embeddings/wiki-news-300d-1M/wiki-news-300d-1M.vec' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE)if len(o)>100) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = all_embs.mean() , all_embs.std() embed_size = all_embs.shape[1] nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix def load_para(word_index): EMBEDDING_FILE = '.. /input/embeddings/paragram_300_sl999/paragram_300_sl999.txt' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE, encoding="utf8", errors='ignore')if len(o)>100) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = -0.0053247833,0.49346462 embed_size = all_embs.shape[1] nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix<load_from_csv>
categorical_columns = [] features = X_train.columns.values.tolist() for col in features: if X_train[col].dtype in numerics: continue categorical_columns.append(col) categorical_columns
Titanic - Machine Learning from Disaster
6,678,936
df_train = pd.read_csv(".. /input/train.csv") df_test = pd.read_csv(".. /input/test.csv") df = pd.concat([df_train ,df_test],sort=True )<feature_engineering>
for col in categorical_columns: if col in X_train.columns: le = LabelEncoder() le.fit(list(X_train[col].astype(str ).values)+ list(X_test[col].astype(str ).values)) X_train[col] = le.transform(list(X_train[col].astype(str ).values)) X_test[col] = le.transform(list(X_test[col].astype(str ).values))
Titanic - Machine Learning from Disaster
6,678,936
def build_vocab(texts): sentences = texts.apply(lambda x: x.split() ).values vocab = {} for sentence in sentences: for word in sentence: try: vocab[word] += 1 except KeyError: vocab[word] = 1 return vocab vocab = build_vocab(df['question_text'] )<define_variables>
train0, test0 = X_train, X_test target0 = Y
Titanic - Machine Learning from Disaster
6,678,936
sin = len(df_train[df_train["target"]==0]) insin = len(df_train[df_train["target"]==1]) persin =(sin/(sin+insin)) *100 perinsin =(insin/(sin+insin)) *100 print(" print("<feature_engineering>
scaler = StandardScaler() train0 = pd.DataFrame(scaler.fit_transform(train0), columns = train0.columns) test0 = pd.DataFrame(scaler.transform(test0), columns = test0.columns) train0b = train0.copy() test0b = test0.copy() trainb, testb, targetb, target_testb = train_test_split(train0b, target0, test_size=test_train_split_part, random_state=random_state )
Titanic - Machine Learning from Disaster
6,678,936
def build_vocab(texts): sentences = texts.apply(lambda x: x.split() ).values vocab = {} for sentence in sentences: for word in sentence: try: vocab[word] += 1 except KeyError: vocab[word] = 1 return vocab def known_contractions(embed): known = [] for contract in contraction_mapping: if contract in embed: known.append(contract) return known def clean_contractions(text, mapping): specials = ["’", "‘", "´", "`"] for s in specials: text = text.replace(s, "'") text = ' '.join([mapping[t] if t in mapping else t for t in text.split(" ")]) return text def correct_spelling(x, dic): for word in dic.keys() : x = x.replace(word, dic[word]) return x def unknown_punct(embed, punct): unknown = '' for p in punct: if p not in embed: unknown += p unknown += ' ' return unknown def clean_numbers(x): x = re.sub('[0-9]{5,}', ' x = re.sub('[0-9]{4}', ' x = re.sub('[0-9]{3}', ' x = re.sub('[0-9]{2}', ' return x def clean_special_chars(text, punct, mapping): for p in mapping: text = text.replace(p, mapping[p]) for p in punct: text = text.replace(p, f' {p} ') specials = {'\u200b': ' ', '…': '...', '\ufeff': '', 'करना': '', 'है': ''} for s in specials: text = text.replace(s, specials[s]) return text def add_lower(embedding, vocab): count = 0 for word in vocab: if word in embedding and word.lower() not in embedding: embedding[word.lower() ] = embedding[word] count += 1 print(f"Added {count} words to embedding" )<define_variables>
scaler = MinMaxScaler() train0 = pd.DataFrame(scaler.fit_transform(train0), columns = train0.columns) test0 = pd.DataFrame(scaler.fit_transform(test0), columns = test0.columns )
Titanic - Machine Learning from Disaster
6,678,936
puncts = [',', '.', '"', ':', ')', '(', '-', '!', '?', '|', ';', "'", '$', '&', '/', '[', ']', '>', '%', '=', ' '·', '_', '{', '}', '©', '^', '®', '`', '<', '→', '°', '€', '™', '›', '♥', '←', '×', '§', '″', '′', 'Â', '█', '½', 'à', '…', '“', '★', '”', '–', '●', 'â', '►', '−', '¢', '²', '¬', '░', '¶', '↑', '±', '¿', '▾', '═', '¦', '║', '―', '¥', '▓', '—', '‹', '─', '▒', ':', '¼', '⊕', '▼', '▪', '†', '■', '’', '▀', '¨', '▄', '♫', '☆', 'é', '¯', '♦', '¤', '▲', 'è', '¸', '¾', 'Ã', '⋅', '‘', '∞', '∙', ')', '↓', '、', '│', '(', '»', ',', '♪', '╩', '╚', '³', '・', '╦', '╣', '╔', '╗', '▬', '❤', 'ï', 'Ø', '¹', '≤', '‡', '√', ] def clean_text(x): x = str(x) for punct in puncts: x = x.replace(punct, f' {punct} ') return x def clean_numbers(x): return x mispell_dict = {"ain't": "is not", "aren't": "are not","can't": "cannot", "'cause": "because", "could've": "could have", "couldn't": "could not", "didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hasn't": "has not", "haven't": "have not", "he'd": "he would","he'll": "he will", "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is", "I'd": "I would", "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have","I'm": "I am", "I've": "I have", "i'd": "i would", "i'd've": "i would have", "i'll": "i will", "i'll've": "i will have","i'm": "i am", "i've": "i have", "isn't": "is not", "it'd": "it would", "it'd've": "it would have", "it'll": "it will", "it'll've": "it will have","it's": "it is", "let's": "let us", "ma'am": "madam", "mayn't": "may not", "might've": "might have","mightn't": "might not","mightn't've": "might not have", "must've": "must have", "mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have","o'clock": "of the clock", "oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have", "she'd": "she would", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is", "should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have","so's": "so as", "this's": "this is","that'd": "that would", "that'd've": "that would have", "that's": "that is", "there'd": "there would", "there'd've": "there would have", "there's": "there is", "here's": "here is","they'd": "they would", "they'd've": "they would have", "they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have", "wasn't": "was not", "we'd": "we would", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are", "we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have", "what're": "what are", "what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is", "where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have", "why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not", "won't've": "will not have", "would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all", "y'all'd": "you all would","y'all'd've": "you all would have","y'all're": "you all are","y'all've": "you all have","you'd": "you would", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have", "you're": "you are", "you've": "you have", 'colour': 'color', 'centre': 'center', 'favourite': 'favorite', 'travelling': 'traveling', 'counselling': 'counseling', 'theatre': 'theater', 'cancelled': 'canceled', 'labour': 'labor', 'organisation': 'organization', 'wwii': 'world war 2', 'citicise': 'criticize', 'youtu ': 'youtube ', 'Qoura': 'Quora', 'sallary': 'salary', 'Whta': 'What', 'narcisist': 'narcissist', 'howdo': 'how do', 'whatare': 'what are', 'howcan': 'how can', 'howmuch': 'how much', 'howmany': 'how many', 'whydo': 'why do', 'doI': 'do I', 'theBest': 'the best', 'howdoes': 'how does', 'mastrubation': 'masturbation', 'mastrubate': 'masturbate', "mastrubating": 'masturbating', 'pennis': 'penis', 'Etherium': 'Ethereum', 'narcissit': 'narcissist', 'bigdata': 'big data', '2k17': '2017', '2k18': '2018', 'qouta': 'quota', 'exboyfriend': 'ex boyfriend', 'airhostess': 'air hostess', "whst": 'what', 'watsapp': 'whatsapp', 'demonitisation': 'demonetization', 'demonitization': 'demonetization', 'demonetisation': 'demonetization'} def _get_mispell(mispell_dict): mispell_re = re.compile('(%s)' % '|'.join(mispell_dict.keys())) return mispell_dict, mispell_re mispellings, mispellings_re = _get_mispell(mispell_dict) def replace_typical_misspell(text): def replace(match): return mispellings[match.group(0)] return mispellings_re.sub(replace, text )<feature_engineering>
train, test, target, target_test = train_test_split(train0, target0, test_size=test_train_split_part, random_state=random_state )
Titanic - Machine Learning from Disaster
6,678,936
def add_features(df): df['question_text'] = df['question_text'].progress_apply(lambda x:str(x)) df['total_length'] = df['question_text'].progress_apply(len) df['capitals'] = df['question_text'].progress_apply(lambda comment: sum(1 for c in comment if c.isupper())) df['caps_vs_length'] = df.progress_apply(lambda row: float(row['capitals'])/float(row['total_length']), axis=1) df['num_words'] = df.question_text.str.count('\S+') df['num_unique_words'] = df['question_text'].progress_apply(lambda comment: len(set(w for w in comment.split()))) df['words_vs_unique'] = df['num_unique_words'] / df['num_words'] return df def load_and_prec() : train_df = pd.read_csv(".. /input/train.csv") test_df = pd.read_csv(".. /input/test.csv") print("Train shape : ",train_df.shape) print("Test shape : ",test_df.shape) train_df["question_text"] = train_df["question_text"].apply(lambda x: x.lower()) test_df["question_text"] = test_df["question_text"].apply(lambda x: x.lower()) train_df["question_text"] = train_df["question_text"].progress_apply(lambda x: clean_text(x)) test_df["question_text"] = test_df["question_text"].apply(lambda x: clean_text(x)) train_df["question_text"] = train_df["question_text"].progress_apply(lambda x: clean_numbers(x)) test_df["question_text"] = test_df["question_text"].apply(lambda x: clean_numbers(x)) train_df["question_text"] = train_df["question_text"].progress_apply(lambda x: replace_typical_misspell(x)) test_df["question_text"] = test_df["question_text"].apply(lambda x: replace_typical_misspell(x)) train_X = train_df["question_text"].fillna("_ test_X = test_df["question_text"].fillna("_ train = add_features(train_df) test = add_features(test_df) features = train[['caps_vs_length', 'words_vs_unique']].fillna(0) test_features = test[['caps_vs_length', 'words_vs_unique']].fillna(0) ss = StandardScaler() ss.fit(np.vstack(( features, test_features))) features = ss.transform(features) test_features = ss.transform(test_features) tokenizer = Tokenizer(num_words=max_features) tokenizer.fit_on_texts(list(train_X)) train_X = tokenizer.texts_to_sequences(train_X) test_X = tokenizer.texts_to_sequences(test_X) train_X = pad_sequences(train_X, maxlen=maxlen) test_X = pad_sequences(test_X, maxlen=maxlen) train_y = train_df['target'].values np.random.seed(SEED) trn_idx = np.random.permutation(len(train_X)) train_X = train_X[trn_idx] train_y = train_y[trn_idx] features = features[trn_idx] return train_X, test_X, train_y, features, test_features, tokenizer.word_index <train_model>
num_models = 20 acc_train = [] acc_test = [] acc_all = np.empty(( len(metrics_now)*2, 0)).tolist() acc_all
Titanic - Machine Learning from Disaster
6,678,936
x_train, x_test, y_train, features, test_features, word_index = load_and_prec() <save_model>
acc_all_pred = np.empty(( len(metrics_now), 0)).tolist() acc_all_pred
Titanic - Machine Learning from Disaster
6,678,936
np.save("x_train",x_train) np.save("x_test",x_test) np.save("y_train",y_train) np.save("features",features) np.save("test_features",test_features) np.save("word_index.npy",word_index )<load_pretrained>
cv_train = ShuffleSplit(n_splits=cv_n_split, test_size=test_train_split_part, random_state=random_state )
Titanic - Machine Learning from Disaster
6,678,936
x_train = np.load("x_train.npy") x_test = np.load("x_test.npy") y_train = np.load("y_train.npy") features = np.load("features.npy") test_features = np.load("test_features.npy") word_index = np.load("word_index.npy" ).item()<normalization>
def acc_d(y_meas, y_pred): return mean_absolute_error(y_meas, y_pred)*len(y_meas)/sum(abs(y_meas)) def acc_rmse(y_meas, y_pred): return(mean_squared_error(y_meas, y_pred)) **0.5
Titanic - Machine Learning from Disaster
6,678,936
seed_everything() glove_embeddings = load_glove(word_index) paragram_embeddings = load_para(word_index) embedding_matrix = np.mean([glove_embeddings, paragram_embeddings, paragram_embeddings], axis=0) del glove_embeddings, paragram_embeddings gc.collect() np.shape(embedding_matrix )<split>
def acc_metrics_calc(num,model,train,test,target,target_test): global acc_all ytrain = model.predict(train ).astype(int) ytest = model.predict(test ).astype(int) print('target = ', target[:5].values) print('ytrain = ', ytrain[:5]) print('target_test =', target_test[:5].values) print('ytest =', ytest[:5]) num_acc = 0 for x in metrics_now: if x == 1: acc_train = round(r2_score(target, ytrain)* 100, 2) acc_test = round(r2_score(target_test, ytest)* 100, 2) elif x == 2: acc_train = round(metrics.accuracy_score(target, ytrain)* 100, 2) acc_test = round(metrics.accuracy_score(target_test, ytest)* 100, 2) elif x == 3: acc_train = round(acc_rmse(target, ytrain)* 100, 2) acc_test = round(acc_rmse(target_test, ytest)* 100, 2) elif x == 4: acc_train = round(acc_d(target, ytrain)* 100, 2) acc_test = round(acc_d(target_test, ytest)* 100, 2) print('acc of', metrics_all[x], 'for train =', acc_train) print('acc of', metrics_all[x], 'for test =', acc_test) acc_all[num_acc].append(acc_train) acc_all[num_acc+1].append(acc_test) num_acc += 2
Titanic - Machine Learning from Disaster
6,678,936
splits = list(StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=SEED ).split(x_train, y_train)) splits[:3]<choose_model_class>
def acc_metrics_calc_pred(num,model,name_model,train,test,target): global acc_all_pred ytrain = model.predict(train ).astype(int) ytest = model.predict(test ).astype(int) print('**********') print(name_model) print('target = ', target[:15].values) print('ytrain = ', ytrain[:15]) print('ytest =', ytest[:15]) num_acc = 0 for x in metrics_now: if x == 1: acc_train = round(r2_score(target, ytrain)* 100, 2) elif x == 2: acc_train = round(metrics.accuracy_score(target, ytrain)* 100, 2) elif x == 3: acc_train = round(acc_rmse(target, ytrain)* 100, 2) elif x == 4: acc_train = round(acc_d(target, ytrain)* 100, 2) print('acc of', metrics_all[x], 'for train =', acc_train) acc_all_pred[num_acc].append(acc_train) num_acc += 1 submission[target_name] = ytest submission.to_csv('submission_' + name_model + '.csv', index=False )
Titanic - Machine Learning from Disaster
6,678,936
class CyclicLR(object): def __init__(self, optimizer, base_lr=1e-3, max_lr=6e-3, step_size=2000, mode='triangular', gamma=1., scale_fn=None, scale_mode='cycle', last_batch_iteration=-1): if not isinstance(optimizer, Optimizer): raise TypeError('{} is not an Optimizer'.format( type(optimizer ).__name__)) self.optimizer = optimizer if isinstance(base_lr, list)or isinstance(base_lr, tuple): if len(base_lr)!= len(optimizer.param_groups): raise ValueError("expected {} base_lr, got {}".format( len(optimizer.param_groups), len(base_lr))) self.base_lrs = list(base_lr) else: self.base_lrs = [base_lr] * len(optimizer.param_groups) if isinstance(max_lr, list)or isinstance(max_lr, tuple): if len(max_lr)!= len(optimizer.param_groups): raise ValueError("expected {} max_lr, got {}".format( len(optimizer.param_groups), len(max_lr))) self.max_lrs = list(max_lr) else: self.max_lrs = [max_lr] * len(optimizer.param_groups) self.step_size = step_size if mode not in ['triangular', 'triangular2', 'exp_range'] \ and scale_fn is None: raise ValueError('mode is invalid and scale_fn is None') self.mode = mode self.gamma = gamma if scale_fn is None: if self.mode == 'triangular': self.scale_fn = self._triangular_scale_fn self.scale_mode = 'cycle' elif self.mode == 'triangular2': self.scale_fn = self._triangular2_scale_fn self.scale_mode = 'cycle' elif self.mode == 'exp_range': self.scale_fn = self._exp_range_scale_fn self.scale_mode = 'iterations' else: self.scale_fn = scale_fn self.scale_mode = scale_mode self.batch_step(last_batch_iteration + 1) self.last_batch_iteration = last_batch_iteration def batch_step(self, batch_iteration=None): if batch_iteration is None: batch_iteration = self.last_batch_iteration + 1 self.last_batch_iteration = batch_iteration for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()): param_group['lr'] = lr def _triangular_scale_fn(self, x): return 1. def _triangular2_scale_fn(self, x): return 1 /(2.**(x - 1)) def _exp_range_scale_fn(self, x): return self.gamma**(x) def get_lr(self): step_size = float(self.step_size) cycle = np.floor(1 + self.last_batch_iteration /(2 * step_size)) x = np.abs(self.last_batch_iteration / step_size - 2 * cycle + 1) lrs = [] param_lrs = zip(self.optimizer.param_groups, self.base_lrs, self.max_lrs) for param_group, base_lr, max_lr in param_lrs: base_height =(max_lr - base_lr)* np.maximum(0,(1 - x)) if self.scale_mode == 'cycle': lr = base_lr + base_height * self.scale_fn(cycle) else: lr = base_lr + base_height * self.scale_fn(self.last_batch_iteration) lrs.append(lr) return lrs <choose_model_class>
linreg = LinearRegression() linreg_CV = GridSearchCV(linreg, param_grid={}, cv=cv_train, verbose=False) linreg_CV.fit(train, target) print(linreg_CV.best_params_) acc_metrics_calc(0,linreg_CV,train,test,target,target_test )
Titanic - Machine Learning from Disaster
6,678,936
embedding_dim = 300 embedding_path = '.. /save/embedding_matrix.npy' use_pretrained_embedding = True hidden_size = 60 gru_len = hidden_size Routings = 4 Num_capsule = 5 Dim_capsule = 5 dropout_p = 0.25 rate_drop_dense = 0.28 LR = 0.001 T_epsilon = 1e-9 num_classes = 30 class Embed_Layer(nn.Module): def __init__(self, embedding_matrix=None, vocab_size=None, embedding_dim=300): super(Embed_Layer, self ).__init__() self.encoder = nn.Embedding(vocab_size + 1, embedding_dim) if use_pretrained_embedding: self.encoder.weight.data.copy_(t.from_numpy(embedding_matrix)) def forward(self, x, dropout_p=0.25): return nn.Dropout(p=dropout_p )(self.encoder(x)) class GRU_Layer(nn.Module): def __init__(self): super(GRU_Layer, self ).__init__() self.gru = nn.GRU(input_size=300, hidden_size=gru_len, bidirectional=True) def init_weights(self): ih =(param.data for name, param in self.named_parameters() if 'weight_ih' in name) hh =(param.data for name, param in self.named_parameters() if 'weight_hh' in name) b =(param.data for name, param in self.named_parameters() if 'bias' in name) for k in ih: nn.init.xavier_uniform_(k) for k in hh: nn.init.orthogonal_(k) for k in b: nn.init.constant_(k, 0) def forward(self, x): return self.gru(x) class Caps_Layer(nn.Module): def __init__(self, input_dim_capsule=gru_len * 2, num_capsule=Num_capsule, dim_capsule=Dim_capsule, \ routings=Routings, kernel_size=(9, 1), share_weights=True, activation='default', **kwargs): super(Caps_Layer, self ).__init__(**kwargs) self.num_capsule = num_capsule self.dim_capsule = dim_capsule self.routings = routings self.kernel_size = kernel_size self.share_weights = share_weights if activation == 'default': self.activation = self.squash else: self.activation = nn.ReLU(inplace=True) if self.share_weights: self.W = nn.Parameter( nn.init.xavier_normal_(t.empty(1, input_dim_capsule, self.num_capsule * self.dim_capsule))) else: self.W = nn.Parameter( t.randn(BATCH_SIZE, input_dim_capsule, self.num_capsule * self.dim_capsule)) def forward(self, x): if self.share_weights: u_hat_vecs = t.matmul(x, self.W) else: print('add later') batch_size = x.size(0) input_num_capsule = x.size(1) u_hat_vecs = u_hat_vecs.view(( batch_size, input_num_capsule, self.num_capsule, self.dim_capsule)) u_hat_vecs = u_hat_vecs.permute(0, 2, 1, 3) b = t.zeros_like(u_hat_vecs[:, :, :, 0]) for i in range(self.routings): b = b.permute(0, 2, 1) c = F.softmax(b, dim=2) c = c.permute(0, 2, 1) b = b.permute(0, 2, 1) outputs = self.activation(t.einsum('bij,bijk->bik',(c, u_hat_vecs))) if i < self.routings - 1: b = t.einsum('bik,bijk->bij',(outputs, u_hat_vecs)) return outputs def squash(self, x, axis=-1): s_squared_norm =(x ** 2 ).sum(axis, keepdim=True) scale = t.sqrt(s_squared_norm + T_epsilon) return x / scale class Capsule_Main(nn.Module): def __init__(self, embedding_matrix=None, vocab_size=None): super(Capsule_Main, self ).__init__() self.embed_layer = Embed_Layer(embedding_matrix, vocab_size) self.gru_layer = GRU_Layer() self.gru_layer.init_weights() self.caps_layer = Caps_Layer() self.dense_layer = Dense_Layer() def forward(self, content): content1 = self.embed_layer(content) content2, _ = self.gru_layer( content1) content3 = self.caps_layer(content2) output = self.dense_layer(content3) return output <normalization>
svr = SVC() svr_CV = GridSearchCV(svr, param_grid={'kernel': ['linear', 'poly', 'rbf', 'sigmoid'], 'tol': [1e-4]}, cv=cv_train, verbose=False) svr_CV.fit(train, target) print(svr_CV.best_params_) acc_metrics_calc(1,svr_CV,train,test,target,target_test )
Titanic - Machine Learning from Disaster
6,678,936
class Attention(nn.Module): def __init__(self, feature_dim, step_dim, bias=True, **kwargs): super(Attention, self ).__init__(**kwargs) self.supports_masking = True self.bias = bias self.feature_dim = feature_dim self.step_dim = step_dim self.features_dim = 0 weight = torch.zeros(feature_dim, 1) nn.init.xavier_uniform_(weight) self.weight = nn.Parameter(weight) if bias: self.b = nn.Parameter(torch.zeros(step_dim)) def forward(self, x, mask=None): feature_dim = self.feature_dim step_dim = self.step_dim eij = torch.mm( x.contiguous().view(-1, feature_dim), self.weight ).view(-1, step_dim) if self.bias: eij = eij + self.b eij = torch.tanh(eij) a = torch.exp(eij) if mask is not None: a = a * mask a = a / torch.sum(a, 1, keepdim=True)+ 1e-10 weighted_input = x * torch.unsqueeze(a, -1) return torch.sum(weighted_input, 1) class NeuralNet(nn.Module): def __init__(self): super(NeuralNet, self ).__init__() fc_layer = 16 fc_layer1 = 16 self.embedding = nn.Embedding(max_features, embed_size) self.embedding.weight = nn.Parameter(torch.tensor(embedding_matrix, dtype=torch.float32)) self.embedding.weight.requires_grad = False self.embedding_dropout = nn.Dropout2d(0.1) self.lstm = nn.LSTM(embed_size, hidden_size, bidirectional=True, batch_first=True) self.gru = nn.GRU(hidden_size * 2, hidden_size, bidirectional=True, batch_first=True) self.lstm2 = nn.LSTM(hidden_size * 2, hidden_size, bidirectional=True, batch_first=True) self.lstm_attention = Attention(hidden_size * 2, maxlen) self.gru_attention = Attention(hidden_size * 2, maxlen) self.linear = nn.Linear(hidden_size*8+3, fc_layer1) self.relu = nn.ReLU() self.dropout = nn.Dropout(0.1) self.fc = nn.Linear(fc_layer**2,fc_layer) self.out = nn.Linear(fc_layer, 1) self.lincaps = nn.Linear(Num_capsule * Dim_capsule, 1) self.caps_layer = Caps_Layer() def forward(self, x): h_embedding = self.embedding(x[0]) h_embedding = torch.squeeze( self.embedding_dropout(torch.unsqueeze(h_embedding, 0))) h_lstm, _ = self.lstm(h_embedding) h_gru, _ = self.gru(h_lstm) content3 = self.caps_layer(h_gru) content3 = self.dropout(content3) batch_size = content3.size(0) content3 = content3.view(batch_size, -1) content3 = self.relu(self.lincaps(content3)) h_lstm_atten = self.lstm_attention(h_lstm) h_gru_atten = self.gru_attention(h_gru) avg_pool = torch.mean(h_gru, 1) max_pool, _ = torch.max(h_gru, 1) f = torch.tensor(x[1], dtype=torch.float ).cuda() conc = torch.cat(( h_lstm_atten, h_gru_atten,content3, avg_pool, max_pool,f), 1) conc = self.relu(self.linear(conc)) conc = self.dropout(conc) out = self.out(conc) return out<categorify>
linear_svc = LinearSVC() param_grid = {'dual':[False], 'C': np.linspace(1, 15, 15)} linear_svc_CV = GridSearchCV(linear_svc, param_grid=param_grid, cv=cv_train, verbose=False) linear_svc_CV.fit(train, target) print(linear_svc_CV.best_params_) acc_metrics_calc(2,linear_svc_CV,train,test,target,target_test )
Titanic - Machine Learning from Disaster
6,678,936
class MyDataset(Dataset): def __init__(self,dataset): self.dataset = dataset def __getitem__(self, index): data, target = self.dataset[index] return data, target, index def __len__(self): return len(self.dataset )<define_variables>
%%time mlp = MLPClassifier() param_grid = {'hidden_layer_sizes': [i for i in range(2,5)], 'solver': ['sgd'], 'learning_rate': ['adaptive'], 'max_iter': [1000] } mlp_GS = GridSearchCV(mlp, param_grid=param_grid, cv=cv_train, verbose=False) mlp_GS.fit(train, target) print(mlp_GS.best_params_) acc_metrics_calc(3,mlp_GS,train,test,target,target_test )
Titanic - Machine Learning from Disaster
6,678,936
def sigmoid(x): return 1 /(1 + np.exp(-x)) train_preds = np.zeros(( len(x_train))) test_preds = np.zeros(( len(df_test))) seed_everything() x_test_cuda = torch.tensor(x_test, dtype=torch.long ).cuda() test = torch.utils.data.TensorDataset(x_test_cuda) test_loader = torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=False) avg_losses_f = [] avg_val_losses_f = []<data_type_conversions>
sgd = SGDClassifier(early_stopping=True) param_grid = {'alpha': [0.0001, 0.001, 0.01, 0.1, 1]} sgd_CV = GridSearchCV(sgd, param_grid=param_grid, cv=cv_train, verbose=False) sgd_CV.fit(train, target) print(sgd_CV.best_params_) acc_metrics_calc(4,sgd_CV,train,test,target,target_test )
Titanic - Machine Learning from Disaster
6,678,936
for i,(train_idx, valid_idx)in enumerate(splits): x_train = np.array(x_train) y_train = np.array(y_train) features = np.array(features) x_train_fold = torch.tensor(x_train[train_idx.astype(int)], dtype=torch.long ).cuda() y_train_fold = torch.tensor(y_train[train_idx.astype(int), np.newaxis], dtype=torch.float32 ).cuda() kfold_X_features = features[train_idx.astype(int)] kfold_X_valid_features = features[valid_idx.astype(int)] x_val_fold = torch.tensor(x_train[valid_idx.astype(int)], dtype=torch.long ).cuda() y_val_fold = torch.tensor(y_train[valid_idx.astype(int), np.newaxis], dtype=torch.float32 ).cuda() model = NeuralNet() model.cuda() loss_fn = torch.nn.BCEWithLogitsLoss(reduction='sum') step_size = 300 base_lr, max_lr = 0.001, 0.003 optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=max_lr) scheduler = CyclicLR(optimizer, base_lr=base_lr, max_lr=max_lr, step_size=step_size, mode='exp_range', gamma=0.99994) train = torch.utils.data.TensorDataset(x_train_fold, y_train_fold) valid = torch.utils.data.TensorDataset(x_val_fold, y_val_fold) train = MyDataset(train) valid = MyDataset(valid) train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid, batch_size=batch_size, shuffle=False) print(f'Fold {i + 1}') for epoch in range(n_epochs): start_time = time.time() model.train() avg_loss = 0. for i,(x_batch, y_batch, index)in enumerate(train_loader): f = kfold_X_features[index] y_pred = model([x_batch,f]) if scheduler: scheduler.batch_step() loss = loss_fn(y_pred, y_batch) optimizer.zero_grad() loss.backward() optimizer.step() avg_loss += loss.item() / len(train_loader) model.eval() valid_preds_fold = np.zeros(( x_val_fold.size(0))) test_preds_fold = np.zeros(( len(df_test))) avg_val_loss = 0. for i,(x_batch, y_batch, index)in enumerate(valid_loader): f = kfold_X_valid_features[index] y_pred = model([x_batch,f] ).detach() avg_val_loss += loss_fn(y_pred, y_batch ).item() / len(valid_loader) valid_preds_fold[i * batch_size:(i+1)* batch_size] = sigmoid(y_pred.cpu().numpy())[:, 0] elapsed_time = time.time() - start_time print('Epoch {}/{} \t loss={:.4f} \t val_loss={:.4f} \t time={:.2f}s'.format( epoch + 1, n_epochs, avg_loss, avg_val_loss, elapsed_time)) avg_losses_f.append(avg_loss) avg_val_losses_f.append(avg_val_loss) for i,(x_batch,)in enumerate(test_loader): f = test_features[i * batch_size:(i+1)* batch_size] y_pred = model([x_batch,f] ).detach() test_preds_fold[i * batch_size:(i+1)* batch_size] = sigmoid(y_pred.cpu().numpy())[:, 0] train_preds[valid_idx] = valid_preds_fold test_preds += test_preds_fold / len(splits) print('All \t loss={:.4f} \t val_loss={:.4f} \t '.format(np.average(avg_losses_f),np.average(avg_val_losses_f))) <compute_test_metric>
decision_tree = DecisionTreeClassifier() param_grid = {'min_samples_leaf': [i for i in range(2,10)]} decision_tree_CV = GridSearchCV(decision_tree, param_grid=param_grid, cv=cv_train, verbose=False) decision_tree_CV.fit(train, target) print(decision_tree_CV.best_params_) acc_metrics_calc(5,decision_tree_CV,train,test,target,target_test )
Titanic - Machine Learning from Disaster
6,678,936
def bestThresshold(y_train,train_preds): tmp = [0,0,0] delta = 0 for tmp[0] in tqdm(np.arange(0.1, 0.501, 0.01)) : tmp[1] = f1_score(y_train, np.array(train_preds)>tmp[0]) if tmp[1] > tmp[2]: delta = tmp[0] tmp[2] = tmp[1] print('best threshold is {:.4f} with F1 score: {:.4f}'.format(delta, tmp[2])) return delta delta = bestThresshold(y_train,train_preds )<save_to_csv>
%%time random_forest = RandomForestClassifier() param_grid = {'n_estimators': [300, 400, 500, 600], 'min_samples_split': [60], 'min_samples_leaf': [20, 25, 30, 35, 40], 'max_features': ['auto'], 'max_depth': [5, 6, 7, 8, 9, 10], 'criterion': ['gini'], 'bootstrap': [False]} random_forest_CV = GridSearchCV(estimator=random_forest, param_grid=param_grid, cv=cv_train, verbose=False) random_forest_CV.fit(train, target) print(random_forest_CV.best_params_) acc_metrics_calc(6,random_forest_CV,train,test,target,target_test )
Titanic - Machine Learning from Disaster
6,678,936
submission = df_test[['qid']].copy() submission['prediction'] =(test_preds > delta ).astype(int) submission.to_csv('submission.csv', index=False )<import_modules>
%%time xgb_clf = xgb.XGBClassifier(objective='reg:squarederror') parameters = {'n_estimators': [200, 300, 400], 'learning_rate': [0.001, 0.003, 0.005, 0.006, 0.01], 'max_depth': [4, 5, 6]} xgb_reg = GridSearchCV(estimator=xgb_clf, param_grid=parameters, cv=cv_train ).fit(trainb, targetb) print("Best score: %0.3f" % xgb_reg.best_score_) print("Best parameters set:", xgb_reg.best_params_) acc_metrics_calc(7,xgb_reg,trainb,testb,targetb,target_testb )
Titanic - Machine Learning from Disaster
6,678,936
tqdm.pandas(desc='Progress') <define_variables>
Xtrain, Xval, Ztrain, Zval = train_test_split(trainb, targetb, test_size=test_train_split_part, random_state=random_state) modelL = lgb.LGBMClassifier(n_estimators=1000, num_leaves=50) modelL.fit(Xtrain, Ztrain, eval_set=[(Xval, Zval)], early_stopping_rounds=50, verbose=True )
Titanic - Machine Learning from Disaster
6,678,936
embed_size = 300 max_features = 120000 maxlen = 70 batch_size = 512 n_epochs = 5 n_splits = 5 SEED = 1029<set_options>
acc_metrics_calc(8,modelL,trainb,testb,targetb,target_testb )
Titanic - Machine Learning from Disaster
6,678,936
def seed_everything(seed=1029): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True seed_everything()<features_selection>
gradient_boosting = GradientBoostingClassifier() param_grid = {'learning_rate' : [0.001, 0.01, 0.1], 'max_depth': [i for i in range(2,5)], 'min_samples_leaf': [i for i in range(2,5)]} gradient_boosting_CV = GridSearchCV(estimator=gradient_boosting, param_grid=param_grid, cv=cv_train, verbose=False) gradient_boosting_CV.fit(train, target) print(gradient_boosting_CV.best_params_) acc_metrics_calc(9,gradient_boosting_CV,train,test,target,target_test )
Titanic - Machine Learning from Disaster
6,678,936
def load_glove(word_index): EMBEDDING_FILE = '.. /input/embeddings/glove.840B.300d/glove.840B.300d.txt' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')[:300] embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE)) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = -0.005838499,0.48782197 embed_size = all_embs.shape[1] nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix def load_fasttext(word_index): EMBEDDING_FILE = '.. /input/embeddings/wiki-news-300d-1M/wiki-news-300d-1M.vec' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE)if len(o)>100) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = all_embs.mean() , all_embs.std() embed_size = all_embs.shape[1] nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix def load_para(word_index): EMBEDDING_FILE = '.. /input/embeddings/paragram_300_sl999/paragram_300_sl999.txt' def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE, encoding="utf8", errors='ignore')if len(o)>100) all_embs = np.stack(embeddings_index.values()) emb_mean,emb_std = -0.0053247833,0.49346462 embed_size = all_embs.shape[1] nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return embedding_matrix<load_from_csv>
ridge = RidgeClassifier() ridge_CV = GridSearchCV(estimator=ridge, param_grid={'alpha': np.linspace (.1, 1.5, 15)}, cv=cv_train, verbose=False) ridge_CV.fit(train, target) print(ridge_CV.best_params_) acc_metrics_calc(10,ridge_CV,train,test,target,target_test )
Titanic - Machine Learning from Disaster
6,678,936
df_train = pd.read_csv(".. /input/train.csv") df_test = pd.read_csv(".. /input/test.csv") df = pd.concat([df_train ,df_test],sort=True )<feature_engineering>
%%time bagging = BaggingClassifier(base_estimator=linear_svc_CV) param_grid={'max_features': [0.5, 0.6, 0.7, 0.8, 0.9, 1.0], 'n_estimators': [3, 5, 10], 'warm_start' : [True], 'random_state': [random_state]} bagging_CV = GridSearchCV(estimator=bagging, param_grid=param_grid, cv=cv_train, verbose=False) bagging_CV.fit(train, target) print(bagging_CV.best_params_) acc_metrics_calc(11,bagging_CV,train,test,target,target_test )
Titanic - Machine Learning from Disaster
6,678,936
def build_vocab(texts): sentences = texts.apply(lambda x: x.split() ).values vocab = {} for sentence in sentences: for word in sentence: try: vocab[word] += 1 except KeyError: vocab[word] = 1 return vocab vocab = build_vocab(df['question_text'] )<define_variables>
etr = ExtraTreesClassifier() etr_CV = GridSearchCV(estimator=etr, param_grid={'min_samples_leaf' : [10, 20, 30, 40, 50]}, cv=cv_train, verbose=False) etr_CV.fit(train, target) acc_metrics_calc(12,etr_CV,train,test,target,target_test )
Titanic - Machine Learning from Disaster
6,678,936
sin = len(df_train[df_train["target"]==0]) insin = len(df_train[df_train["target"]==1]) persin =(sin/(sin+insin)) *100 perinsin =(insin/(sin+insin)) *100 print(" print("<feature_engineering>
Ada_Boost = AdaBoostClassifier() Ada_Boost_CV = GridSearchCV(estimator=Ada_Boost, param_grid={'learning_rate' : [.01,.1,.5, 1]}, cv=cv_train, verbose=False) Ada_Boost_CV.fit(train, target) acc_metrics_calc(13,Ada_Boost_CV,train,test,target,target_test )
Titanic - Machine Learning from Disaster
6,678,936
def build_vocab(texts): sentences = texts.apply(lambda x: x.split() ).values vocab = {} for sentence in sentences: for word in sentence: try: vocab[word] += 1 except KeyError: vocab[word] = 1 return vocab def known_contractions(embed): known = [] for contract in contraction_mapping: if contract in embed: known.append(contract) return known def clean_contractions(text, mapping): specials = ["’", "‘", "´", "`"] for s in specials: text = text.replace(s, "'") text = ' '.join([mapping[t] if t in mapping else t for t in text.split(" ")]) return text def correct_spelling(x, dic): for word in dic.keys() : x = x.replace(word, dic[word]) return x def unknown_punct(embed, punct): unknown = '' for p in punct: if p not in embed: unknown += p unknown += ' ' return unknown def clean_numbers(x): x = re.sub('[0-9]{5,}', ' x = re.sub('[0-9]{4}', ' x = re.sub('[0-9]{3}', ' x = re.sub('[0-9]{2}', ' return x def clean_special_chars(text, punct, mapping): for p in mapping: text = text.replace(p, mapping[p]) for p in punct: text = text.replace(p, f' {p} ') specials = {'\u200b': ' ', '…': '...', '\ufeff': '', 'करना': '', 'है': ''} for s in specials: text = text.replace(s, specials[s]) return text def add_lower(embedding, vocab): count = 0 for word in vocab: if word in embedding and word.lower() not in embedding: embedding[word.lower() ] = embedding[word] count += 1 print(f"Added {count} words to embedding" )<define_variables>
logreg = LogisticRegression() logreg_CV = GridSearchCV(estimator=logreg, param_grid={'C' : [.1,.3,.5,.7, 1]}, cv=cv_train, verbose=False) logreg_CV.fit(train, target) acc_metrics_calc(14,logreg_CV,train,test,target,target_test )
Titanic - Machine Learning from Disaster
6,678,936
puncts = [',', '.', '"', ':', ')', '(', '-', '!', '?', '|', ';', "'", '$', '&', '/', '[', ']', '>', '%', '=', ' '·', '_', '{', '}', '©', '^', '®', '`', '<', '→', '°', '€', '™', '›', '♥', '←', '×', '§', '″', '′', 'Â', '█', '½', 'à', '…', '“', '★', '”', '–', '●', 'â', '►', '−', '¢', '²', '¬', '░', '¶', '↑', '±', '¿', '▾', '═', '¦', '║', '―', '¥', '▓', '—', '‹', '─', '▒', ':', '¼', '⊕', '▼', '▪', '†', '■', '’', '▀', '¨', '▄', '♫', '☆', 'é', '¯', '♦', '¤', '▲', 'è', '¸', '¾', 'Ã', '⋅', '‘', '∞', '∙', ')', '↓', '、', '│', '(', '»', ',', '♪', '╩', '╚', '³', '・', '╦', '╣', '╔', '╗', '▬', '❤', 'ï', 'Ø', '¹', '≤', '‡', '√', ] def clean_text(x): x = str(x) for punct in puncts: x = x.replace(punct, f' {punct} ') return x def clean_numbers(x): x = re.sub('[0-9]{5,}', ' x = re.sub('[0-9]{4}', ' x = re.sub('[0-9]{3}', ' x = re.sub('[0-9]{2}', ' return x mispell_dict = {"ain't": "is not", "aren't": "are not","can't": "cannot", "'cause": "because", "could've": "could have", "couldn't": "could not", "didn't": "did not", "doesn't": "does not", "don't": "do not", "hadn't": "had not", "hasn't": "has not", "haven't": "have not", "he'd": "he would","he'll": "he will", "he's": "he is", "how'd": "how did", "how'd'y": "how do you", "how'll": "how will", "how's": "how is", "I'd": "I would", "I'd've": "I would have", "I'll": "I will", "I'll've": "I will have","I'm": "I am", "I've": "I have", "i'd": "i would", "i'd've": "i would have", "i'll": "i will", "i'll've": "i will have","i'm": "i am", "i've": "i have", "isn't": "is not", "it'd": "it would", "it'd've": "it would have", "it'll": "it will", "it'll've": "it will have","it's": "it is", "let's": "let us", "ma'am": "madam", "mayn't": "may not", "might've": "might have","mightn't": "might not","mightn't've": "might not have", "must've": "must have", "mustn't": "must not", "mustn't've": "must not have", "needn't": "need not", "needn't've": "need not have","o'clock": "of the clock", "oughtn't": "ought not", "oughtn't've": "ought not have", "shan't": "shall not", "sha'n't": "shall not", "shan't've": "shall not have", "she'd": "she would", "she'd've": "she would have", "she'll": "she will", "she'll've": "she will have", "she's": "she is", "should've": "should have", "shouldn't": "should not", "shouldn't've": "should not have", "so've": "so have","so's": "so as", "this's": "this is","that'd": "that would", "that'd've": "that would have", "that's": "that is", "there'd": "there would", "there'd've": "there would have", "there's": "there is", "here's": "here is","they'd": "they would", "they'd've": "they would have", "they'll": "they will", "they'll've": "they will have", "they're": "they are", "they've": "they have", "to've": "to have", "wasn't": "was not", "we'd": "we would", "we'd've": "we would have", "we'll": "we will", "we'll've": "we will have", "we're": "we are", "we've": "we have", "weren't": "were not", "what'll": "what will", "what'll've": "what will have", "what're": "what are", "what's": "what is", "what've": "what have", "when's": "when is", "when've": "when have", "where'd": "where did", "where's": "where is", "where've": "where have", "who'll": "who will", "who'll've": "who will have", "who's": "who is", "who've": "who have", "why's": "why is", "why've": "why have", "will've": "will have", "won't": "will not", "won't've": "will not have", "would've": "would have", "wouldn't": "would not", "wouldn't've": "would not have", "y'all": "you all", "y'all'd": "you all would","y'all'd've": "you all would have","y'all're": "you all are","y'all've": "you all have","you'd": "you would", "you'd've": "you would have", "you'll": "you will", "you'll've": "you will have", "you're": "you are", "you've": "you have", 'colour': 'color', 'centre': 'center', 'favourite': 'favorite', 'travelling': 'traveling', 'counselling': 'counseling', 'theatre': 'theater', 'cancelled': 'canceled', 'labour': 'labor', 'organisation': 'organization', 'wwii': 'world war 2', 'citicise': 'criticize', 'youtu ': 'youtube ', 'Qoura': 'Quora', 'sallary': 'salary', 'Whta': 'What', 'narcisist': 'narcissist', 'howdo': 'how do', 'whatare': 'what are', 'howcan': 'how can', 'howmuch': 'how much', 'howmany': 'how many', 'whydo': 'why do', 'doI': 'do I', 'theBest': 'the best', 'howdoes': 'how does', 'mastrubation': 'masturbation', 'mastrubate': 'masturbate', "mastrubating": 'masturbating', 'pennis': 'penis', 'Etherium': 'Ethereum', 'narcissit': 'narcissist', 'bigdata': 'big data', '2k17': '2017', '2k18': '2018', 'qouta': 'quota', 'exboyfriend': 'ex boyfriend', 'airhostess': 'air hostess', "whst": 'what', 'watsapp': 'whatsapp', 'demonitisation': 'demonetization', 'demonitization': 'demonetization', 'demonetisation': 'demonetization'} def _get_mispell(mispell_dict): mispell_re = re.compile('(%s)' % '|'.join(mispell_dict.keys())) return mispell_dict, mispell_re mispellings, mispellings_re = _get_mispell(mispell_dict) def replace_typical_misspell(text): def replace(match): return mispellings[match.group(0)] return mispellings_re.sub(replace, text )<feature_engineering>
knn = KNeighborsClassifier() param_grid={'n_neighbors': range(2, 7)} knn_CV = GridSearchCV(estimator=knn, param_grid=param_grid, cv=cv_train, verbose=False ).fit(train, target) print(knn_CV.best_params_) acc_metrics_calc(15,knn_CV,train,test,target,target_test )
Titanic - Machine Learning from Disaster
6,678,936
def add_features(df): df['question_text'] = df['question_text'].progress_apply(lambda x:str(x)) df['total_length'] = df['question_text'].progress_apply(len) df['capitals'] = df['question_text'].progress_apply(lambda comment: sum(1 for c in comment if c.isupper())) df['caps_vs_length'] = df.progress_apply(lambda row: float(row['capitals'])/float(row['total_length']), axis=1) df['num_words'] = df.question_text.str.count('\S+') df['num_unique_words'] = df['question_text'].progress_apply(lambda comment: len(set(w for w in comment.split()))) df['words_vs_unique'] = df['num_unique_words'] / df['num_words'] return df def load_and_prec() : train_df = pd.read_csv(".. /input/train.csv") test_df = pd.read_csv(".. /input/test.csv") print("Train shape : ",train_df.shape) print("Test shape : ",test_df.shape) train_df["question_text"] = train_df["question_text"].apply(lambda x: x.lower()) test_df["question_text"] = test_df["question_text"].apply(lambda x: x.lower()) train_df["question_text"] = train_df["question_text"].progress_apply(lambda x: clean_text(x)) test_df["question_text"] = test_df["question_text"].apply(lambda x: clean_text(x)) train_df["question_text"] = train_df["question_text"].progress_apply(lambda x: clean_numbers(x)) test_df["question_text"] = test_df["question_text"].apply(lambda x: clean_numbers(x)) train_df["question_text"] = train_df["question_text"].progress_apply(lambda x: replace_typical_misspell(x)) test_df["question_text"] = test_df["question_text"].apply(lambda x: replace_typical_misspell(x)) train_X = train_df["question_text"].fillna("_ test_X = test_df["question_text"].fillna("_ train = add_features(train_df) test = add_features(test_df) features = train[['caps_vs_length', 'words_vs_unique']].fillna(0) test_features = test[['caps_vs_length', 'words_vs_unique']].fillna(0) ss = StandardScaler() ss.fit(np.vstack(( features, test_features))) features = ss.transform(features) test_features = ss.transform(test_features) tokenizer = Tokenizer(num_words=max_features) tokenizer.fit_on_texts(list(train_X)) train_X = tokenizer.texts_to_sequences(train_X) test_X = tokenizer.texts_to_sequences(test_X) train_X = pad_sequences(train_X, maxlen=maxlen) test_X = pad_sequences(test_X, maxlen=maxlen) train_y = train_df['target'].values np.random.seed(SEED) trn_idx = np.random.permutation(len(train_X)) train_X = train_X[trn_idx] train_y = train_y[trn_idx] features = features[trn_idx] return train_X, test_X, train_y, features, test_features, tokenizer.word_index <train_model>
gaussian = GaussianNB() param_grid={'var_smoothing': [1e-8, 1e-9, 1e-10]} gaussian_CV = GridSearchCV(estimator=gaussian, param_grid=param_grid, cv=cv_train, verbose=False) gaussian_CV.fit(train, target) print(gaussian_CV.best_params_) acc_metrics_calc(16,gaussian_CV,train,test,target,target_test )
Titanic - Machine Learning from Disaster
6,678,936
x_train, x_test, y_train, features, test_features, word_index = load_and_prec() <save_model>
perceptron = Perceptron() param_grid = {'penalty': [None, 'l2', 'l1', 'elasticnet']} perceptron_CV = GridSearchCV(estimator=perceptron, param_grid=param_grid, cv=cv_train, verbose=False) perceptron_CV.fit(train, target) print(perceptron_CV.best_params_) acc_metrics_calc(17,perceptron_CV,train,test,target,target_test )
Titanic - Machine Learning from Disaster
6,678,936
np.save("x_train",x_train) np.save("x_test",x_test) np.save("y_train",y_train) np.save("features",features) np.save("test_features",test_features) np.save("word_index.npy",word_index )<load_pretrained>
gpc = GaussianProcessClassifier() param_grid = {'max_iter_predict': [100, 200], 'warm_start': [True, False], 'n_restarts_optimizer': range(3)} gpc_CV = GridSearchCV(estimator=gpc, param_grid=param_grid, cv=cv_train, verbose=False) gpc_CV.fit(train, target) print(gpc_CV.best_params_) acc_metrics_calc(18,gpc_CV,train,test,target,target_test )
Titanic - Machine Learning from Disaster
6,678,936
x_train = np.load("x_train.npy") x_test = np.load("x_test.npy") y_train = np.load("y_train.npy") features = np.load("features.npy") test_features = np.load("test_features.npy") word_index = np.load("word_index.npy" ).item()<normalization>
Voting_ens = VotingClassifier(estimators=[('log', logreg_CV),('mlp', mlp_GS),('svc', linear_svc_CV)]) Voting_ens.fit(train, target) acc_metrics_calc(19,Voting_ens,train,test,target,target_test )
Titanic - Machine Learning from Disaster
6,678,936
seed_everything() glove_embeddings = load_glove(word_index) paragram_embeddings = load_para(word_index) embedding_matrix = np.mean([glove_embeddings, paragram_embeddings], axis=0) del glove_embeddings, paragram_embeddings gc.collect() np.shape(embedding_matrix )<split>
models = pd.DataFrame({ 'Model': ['Linear Regression', 'Support Vector Machines', 'Linear SVC', 'MLPClassifier', 'Stochastic Gradient Decent', 'Decision Tree Classifier', 'Random Forest', 'XGBClassifier', 'LGBMClassifier', 'GradientBoostingClassifier', 'RidgeClassifier', 'BaggingClassifier', 'ExtraTreesClassifier', 'AdaBoostClassifier', 'Logistic Regression', 'KNN', 'Naive Bayes', 'Perceptron', 'Gaussian Process Classification', 'VotingClassifier']} )
Titanic - Machine Learning from Disaster
6,678,936
splits = list(StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=SEED ).split(x_train, y_train)) splits[:3]<choose_model_class>
for x in metrics_now: xs = metrics_all[x] models[xs + '_train'] = acc_all[(x-1)*2] models[xs + '_test'] = acc_all[(x-1)*2+1] if xs == "acc": models[xs + '_diff'] = models[xs + '_train'] - models[xs + '_test'] models
Titanic - Machine Learning from Disaster
6,678,936
class CyclicLR(object): def __init__(self, optimizer, base_lr=1e-3, max_lr=6e-3, step_size=2000, mode='triangular', gamma=1., scale_fn=None, scale_mode='cycle', last_batch_iteration=-1): if not isinstance(optimizer, Optimizer): raise TypeError('{} is not an Optimizer'.format( type(optimizer ).__name__)) self.optimizer = optimizer if isinstance(base_lr, list)or isinstance(base_lr, tuple): if len(base_lr)!= len(optimizer.param_groups): raise ValueError("expected {} base_lr, got {}".format( len(optimizer.param_groups), len(base_lr))) self.base_lrs = list(base_lr) else: self.base_lrs = [base_lr] * len(optimizer.param_groups) if isinstance(max_lr, list)or isinstance(max_lr, tuple): if len(max_lr)!= len(optimizer.param_groups): raise ValueError("expected {} max_lr, got {}".format( len(optimizer.param_groups), len(max_lr))) self.max_lrs = list(max_lr) else: self.max_lrs = [max_lr] * len(optimizer.param_groups) self.step_size = step_size if mode not in ['triangular', 'triangular2', 'exp_range'] \ and scale_fn is None: raise ValueError('mode is invalid and scale_fn is None') self.mode = mode self.gamma = gamma if scale_fn is None: if self.mode == 'triangular': self.scale_fn = self._triangular_scale_fn self.scale_mode = 'cycle' elif self.mode == 'triangular2': self.scale_fn = self._triangular2_scale_fn self.scale_mode = 'cycle' elif self.mode == 'exp_range': self.scale_fn = self._exp_range_scale_fn self.scale_mode = 'iterations' else: self.scale_fn = scale_fn self.scale_mode = scale_mode self.batch_step(last_batch_iteration + 1) self.last_batch_iteration = last_batch_iteration def batch_step(self, batch_iteration=None): if batch_iteration is None: batch_iteration = self.last_batch_iteration + 1 self.last_batch_iteration = batch_iteration for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()): param_group['lr'] = lr def _triangular_scale_fn(self, x): return 1. def _triangular2_scale_fn(self, x): return 1 /(2.**(x - 1)) def _exp_range_scale_fn(self, x): return self.gamma**(x) def get_lr(self): step_size = float(self.step_size) cycle = np.floor(1 + self.last_batch_iteration /(2 * step_size)) x = np.abs(self.last_batch_iteration / step_size - 2 * cycle + 1) lrs = [] param_lrs = zip(self.optimizer.param_groups, self.base_lrs, self.max_lrs) for param_group, base_lr, max_lr in param_lrs: base_height =(max_lr - base_lr)* np.maximum(0,(1 - x)) if self.scale_mode == 'cycle': lr = base_lr + base_height * self.scale_fn(cycle) else: lr = base_lr + base_height * self.scale_fn(self.last_batch_iteration) lrs.append(lr) return lrs <choose_model_class>
print('Prediction accuracy for models') ms = metrics_all[metrics_now[1]] models.sort_values(by=[(ms + '_test'),(ms + '_train')], ascending=False )
Titanic - Machine Learning from Disaster
6,678,936
embedding_dim = 300 embedding_path = '.. /save/embedding_matrix.npy' use_pretrained_embedding = True hidden_size = 60 gru_len = hidden_size Routings = 4 Num_capsule = 5 Dim_capsule = 5 dropout_p = 0.25 rate_drop_dense = 0.28 LR = 0.001 T_epsilon = 1e-7 num_classes = 30 class Embed_Layer(nn.Module): def __init__(self, embedding_matrix=None, vocab_size=None, embedding_dim=300): super(Embed_Layer, self ).__init__() self.encoder = nn.Embedding(vocab_size + 1, embedding_dim) if use_pretrained_embedding: self.encoder.weight.data.copy_(t.from_numpy(embedding_matrix)) def forward(self, x, dropout_p=0.25): return nn.Dropout(p=dropout_p )(self.encoder(x)) class GRU_Layer(nn.Module): def __init__(self): super(GRU_Layer, self ).__init__() self.gru = nn.GRU(input_size=300, hidden_size=gru_len, bidirectional=True) def init_weights(self): ih =(param.data for name, param in self.named_parameters() if 'weight_ih' in name) hh =(param.data for name, param in self.named_parameters() if 'weight_hh' in name) b =(param.data for name, param in self.named_parameters() if 'bias' in name) for k in ih: nn.init.xavier_uniform_(k) for k in hh: nn.init.orthogonal_(k) for k in b: nn.init.constant_(k, 0) def forward(self, x): return self.gru(x) class Caps_Layer(nn.Module): def __init__(self, input_dim_capsule=gru_len * 2, num_capsule=Num_capsule, dim_capsule=Dim_capsule, \ routings=Routings, kernel_size=(9, 1), share_weights=True, activation='default', **kwargs): super(Caps_Layer, self ).__init__(**kwargs) self.num_capsule = num_capsule self.dim_capsule = dim_capsule self.routings = routings self.kernel_size = kernel_size self.share_weights = share_weights if activation == 'default': self.activation = self.squash else: self.activation = nn.ReLU(inplace=True) if self.share_weights: self.W = nn.Parameter( nn.init.xavier_normal_(t.empty(1, input_dim_capsule, self.num_capsule * self.dim_capsule))) else: self.W = nn.Parameter( t.randn(BATCH_SIZE, input_dim_capsule, self.num_capsule * self.dim_capsule)) def forward(self, x): if self.share_weights: u_hat_vecs = t.matmul(x, self.W) else: print('add later') batch_size = x.size(0) input_num_capsule = x.size(1) u_hat_vecs = u_hat_vecs.view(( batch_size, input_num_capsule, self.num_capsule, self.dim_capsule)) u_hat_vecs = u_hat_vecs.permute(0, 2, 1, 3) b = t.zeros_like(u_hat_vecs[:, :, :, 0]) for i in range(self.routings): b = b.permute(0, 2, 1) c = F.softmax(b, dim=2) c = c.permute(0, 2, 1) b = b.permute(0, 2, 1) outputs = self.activation(t.einsum('bij,bijk->bik',(c, u_hat_vecs))) if i < self.routings - 1: b = t.einsum('bik,bijk->bij',(outputs, u_hat_vecs)) return outputs def squash(self, x, axis=-1): s_squared_norm =(x ** 2 ).sum(axis, keepdim=True) scale = t.sqrt(s_squared_norm + T_epsilon) return x / scale class Capsule_Main(nn.Module): def __init__(self, embedding_matrix=None, vocab_size=None): super(Capsule_Main, self ).__init__() self.embed_layer = Embed_Layer(embedding_matrix, vocab_size) self.gru_layer = GRU_Layer() self.gru_layer.init_weights() self.caps_layer = Caps_Layer() self.dense_layer = Dense_Layer() def forward(self, content): content1 = self.embed_layer(content) content2, _ = self.gru_layer( content1) content3 = self.caps_layer(content2) output = self.dense_layer(content3) return output <normalization>
pd.options.display.float_format = '{:,.2f}'.format
Titanic - Machine Learning from Disaster
6,678,936
class Attention(nn.Module): def __init__(self, feature_dim, step_dim, bias=True, **kwargs): super(Attention, self ).__init__(**kwargs) self.supports_masking = True self.bias = bias self.feature_dim = feature_dim self.step_dim = step_dim self.features_dim = 0 weight = torch.zeros(feature_dim, 1) nn.init.xavier_uniform_(weight) self.weight = nn.Parameter(weight) if bias: self.b = nn.Parameter(torch.zeros(step_dim)) def forward(self, x, mask=None): feature_dim = self.feature_dim step_dim = self.step_dim eij = torch.mm( x.contiguous().view(-1, feature_dim), self.weight ).view(-1, step_dim) if self.bias: eij = eij + self.b eij = torch.tanh(eij) a = torch.exp(eij) if mask is not None: a = a * mask a = a / torch.sum(a, 1, keepdim=True)+ 1e-10 weighted_input = x * torch.unsqueeze(a, -1) return torch.sum(weighted_input, 1) class NeuralNet(nn.Module): def __init__(self): super(NeuralNet, self ).__init__() fc_layer = 16 fc_layer1 = 16 self.embedding = nn.Embedding(max_features, embed_size) self.embedding.weight = nn.Parameter(torch.tensor(embedding_matrix, dtype=torch.float32)) self.embedding.weight.requires_grad = False self.embedding_dropout = nn.Dropout2d(0.1) self.lstm = nn.LSTM(embed_size, hidden_size, bidirectional=True, batch_first=True) self.gru = nn.GRU(hidden_size * 2, hidden_size, bidirectional=True, batch_first=True) self.lstm2 = nn.LSTM(hidden_size * 2, hidden_size, bidirectional=True, batch_first=True) self.lstm_attention = Attention(hidden_size * 2, maxlen) self.gru_attention = Attention(hidden_size * 2, maxlen) self.bn = nn.BatchNorm1d(16, momentum=0.5) self.linear = nn.Linear(hidden_size*8+3, fc_layer1) self.relu = nn.ReLU() self.dropout = nn.Dropout(0.1) self.fc = nn.Linear(fc_layer**2,fc_layer) self.out = nn.Linear(fc_layer, 1) self.lincaps = nn.Linear(Num_capsule * Dim_capsule, 1) self.caps_layer = Caps_Layer() def forward(self, x): h_embedding = self.embedding(x[0]) h_embedding = torch.squeeze( self.embedding_dropout(torch.unsqueeze(h_embedding, 0))) h_lstm, _ = self.lstm(h_embedding) h_gru, _ = self.gru(h_lstm) content3 = self.caps_layer(h_gru) content3 = self.dropout(content3) batch_size = content3.size(0) content3 = content3.view(batch_size, -1) content3 = self.relu(self.lincaps(content3)) h_lstm_atten = self.lstm_attention(h_lstm) h_gru_atten = self.gru_attention(h_gru) avg_pool = torch.mean(h_gru, 1) max_pool, _ = torch.max(h_gru, 1) f = torch.tensor(x[1], dtype=torch.float ).cuda() conc = torch.cat(( h_lstm_atten, h_gru_atten,content3, avg_pool, max_pool,f), 1) conc = self.relu(self.linear(conc)) conc = self.bn(conc) conc = self.dropout(conc) out = self.out(conc) return out<categorify>
metrics_main = 2 xs = metrics_all[metrics_main] xs_train = metrics_all[metrics_main] + '_train' xs_test = metrics_all[metrics_main] + '_test' print('The best models by the',xs,'criterion:') direct_sort = False if(metrics_main >= 2)else True models_sort = models.sort_values(by=[xs_test, xs_train], ascending=direct_sort )
Titanic - Machine Learning from Disaster
6,678,936
class MyDataset(Dataset): def __init__(self,dataset): self.dataset = dataset def __getitem__(self, index): data, target = self.dataset[index] return data, target, index def __len__(self): return len(self.dataset )<define_variables>
models_sort = models_sort[models_sort.Model != 'VotingClassifier'] models_best = models_sort[(models_sort.acc_diff < 5)&(models_sort.acc_train > 90)] models_best[['Model', ms + '_train', ms + '_test', 'acc_diff']].sort_values(by=['acc_test'], ascending=False )
Titanic - Machine Learning from Disaster
6,678,936
def sigmoid(x): return 1 /(1 + np.exp(-x)) train_preds = np.zeros(( len(x_train))) test_preds = np.zeros(( len(df_test))) seed_everything() x_test_cuda = torch.tensor(x_test, dtype=torch.long ).cuda() test = torch.utils.data.TensorDataset(x_test_cuda) test_loader = torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=False) avg_losses_f = [] avg_val_losses_f = []<data_type_conversions>
models_pred = pd.DataFrame(models_best.Model, columns = ['Model']) N_best_models = len(models_best.Model )
Titanic - Machine Learning from Disaster
6,678,936
for i,(train_idx, valid_idx)in enumerate(splits): x_train = np.array(x_train) y_train = np.array(y_train) features = np.array(features) x_train_fold = torch.tensor(x_train[train_idx.astype(int)], dtype=torch.long ).cuda() y_train_fold = torch.tensor(y_train[train_idx.astype(int), np.newaxis], dtype=torch.float32 ).cuda() kfold_X_features = features[train_idx.astype(int)] kfold_X_valid_features = features[valid_idx.astype(int)] x_val_fold = torch.tensor(x_train[valid_idx.astype(int)], dtype=torch.long ).cuda() y_val_fold = torch.tensor(y_train[valid_idx.astype(int), np.newaxis], dtype=torch.float32 ).cuda() model = NeuralNet() model.cuda() loss_fn = torch.nn.BCEWithLogitsLoss(reduction='sum') step_size = 300 base_lr, max_lr = 0.001, 0.003 optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=max_lr) scheduler = CyclicLR(optimizer, base_lr=base_lr, max_lr=max_lr, step_size=step_size, mode='exp_range', gamma=0.99994) train = torch.utils.data.TensorDataset(x_train_fold, y_train_fold) valid = torch.utils.data.TensorDataset(x_val_fold, y_val_fold) train = MyDataset(train) valid = MyDataset(valid) train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid, batch_size=batch_size, shuffle=False) print(f'Fold {i + 1}') for epoch in range(n_epochs): start_time = time.time() model.train() avg_loss = 0. for i,(x_batch, y_batch, index)in enumerate(train_loader): f = kfold_X_features[index] y_pred = model([x_batch,f]) if scheduler: scheduler.batch_step() loss = loss_fn(y_pred, y_batch) optimizer.zero_grad() loss.backward() optimizer.step() avg_loss += loss.item() / len(train_loader) model.eval() valid_preds_fold = np.zeros(( x_val_fold.size(0))) test_preds_fold = np.zeros(( len(df_test))) avg_val_loss = 0. for i,(x_batch, y_batch, index)in enumerate(valid_loader): f = kfold_X_valid_features[index] y_pred = model([x_batch,f] ).detach() avg_val_loss += loss_fn(y_pred, y_batch ).item() / len(valid_loader) valid_preds_fold[i * batch_size:(i+1)* batch_size] = sigmoid(y_pred.cpu().numpy())[:, 0] elapsed_time = time.time() - start_time print('Epoch {}/{} \t loss={:.4f} \t val_loss={:.4f} \t time={:.2f}s'.format( epoch + 1, n_epochs, avg_loss, avg_val_loss, elapsed_time)) avg_losses_f.append(avg_loss) avg_val_losses_f.append(avg_val_loss) for i,(x_batch,)in enumerate(test_loader): f = test_features[i * batch_size:(i+1)* batch_size] y_pred = model([x_batch,f] ).detach() test_preds_fold[i * batch_size:(i+1)* batch_size] = sigmoid(y_pred.cpu().numpy())[:, 0] train_preds[valid_idx] = valid_preds_fold test_preds += test_preds_fold / len(splits) print('All \t loss={:.4f} \t val_loss={:.4f} \t '.format(np.average(avg_losses_f),np.average(avg_val_losses_f))) <compute_test_metric>
def model_fit(name_model,train,target): if name_model == 'LGBMClassifier': Xtrain, Xval, Ztrain, Zval = train_test_split(train, target, test_size=test_train_split_part, random_state=random_state) model = lgb.LGBMClassifier(n_estimators=1000) model.fit(Xtrain, Ztrain, eval_set=[(Xval, Zval)], early_stopping_rounds=50, verbose=False) else: param_grid={} if name_model == 'Linear Regression': model_clf = LinearRegression() elif name_model == 'Support Vector Machines': model_clf = SVC() param_grid = {'kernel': ['linear', 'poly', 'rbf', 'sigmoid'], 'tol': [1e-4]} elif name_model == 'Linear SVC': model_clf = LinearSVC() param_grid = {'dual':[False], 'C': np.linspace(1, 15, 15)} elif name_model == 'MLPClassifier': model_clf = MLPClassifier() param_grid = {'hidden_layer_sizes': [i for i in range(2,5)], 'solver': ['sgd'], 'learning_rate': ['adaptive'], 'max_iter': [1000] } elif name_model == 'Stochastic Gradient Decent': model_clf = SGDClassifier(early_stopping=True) param_grid = {'alpha': [0.0001, 0.001, 0.01, 0.1, 1]} elif name_model == 'Decision Tree Classifier': model_clf = DecisionTreeClassifier() param_grid = {'min_samples_leaf': [i for i in range(2,10)]} elif name_model == 'Random Forest': model_clf = RandomForestClassifier() param_grid = {'n_estimators': [300, 400, 500, 600], 'min_samples_split': [60], 'min_samples_leaf': [20, 25, 30, 35, 40], 'max_features': ['auto'], 'max_depth': [5, 6, 7, 8, 9, 10], 'criterion': ['gini'], 'bootstrap': [False]} elif name_model == 'XGBClassifier': model_clf = xgb.XGBClassifier(objective='reg:squarederror') param_grid = {'n_estimators': [200, 300, 400], 'learning_rate': [0.001, 0.003, 0.005, 0.006, 0.01], 'max_depth': [4, 5, 6]} elif name_model == 'GradientBoostingClassifier': model_clf = GradientBoostingClassifier() param_grid = {'learning_rate' : [0.001, 0.01, 0.1], 'max_depth': [i for i in range(2,5)], 'min_samples_leaf': [i for i in range(2,5)]} elif name_model == 'RidgeClassifier': model_clf = RidgeClassifier() param_grid={'alpha': np.linspace (.1, 1.5, 15)} elif name_model == 'BaggingClassifier': model_base_estimator = GridSearchCV(LinearSVC() , param_grid={'dual':[False], 'C': np.linspace(1, 15, 15)}, cv=cv_train, verbose=False) model_base_estimator.fit(train, target) model_clf = BaggingClassifier(base_estimator=model_base_estimator) param_grid={'max_features': [0.5, 0.6, 0.7, 0.8, 0.9, 1.0], 'n_estimators': [3, 5, 10], 'warm_start' : [True], 'random_state': [random_state]} elif name_model == 'ExtraTreesClassifier': model_clf = ExtraTreesClassifier() param_grid={'min_samples_leaf' : [10, 20, 30, 40, 50]} elif name_model == 'AdaBoostClassifier': model_clf = AdaBoostClassifier() param_grid={'learning_rate' : [.01,.1,.5, 1]} elif name_model == 'Logistic Regression': model_clf = LogisticRegression() param_grid={'C' : [.1,.3,.5,.7, 1]} elif name_model == 'KNN': model_clf = KNeighborsClassifier() param_grid={'n_neighbors': range(2, 7)} elif name_model == 'Naive Bayes': model_clf = GaussianNB() param_grid={'var_smoothing': [1e-8, 1e-9, 1e-10]} elif name_model == 'Perceptron': model_clf = Perceptron() param_grid = {'penalty': [None, 'l2', 'l1', 'elasticnet']} elif name_model == 'Gaussian Process Classification': model_clf = GaussianProcessClassifier() param_grid = {'max_iter_predict': [100, 200], 'warm_start': [True, False], 'n_restarts_optimizer': range(3)} model = GridSearchCV(model_clf, param_grid=param_grid, cv=cv_train, verbose=False) model.fit(train, target) return model
Titanic - Machine Learning from Disaster
6,678,936
def bestThresshold(y_train,train_preds): tmp = [0,0,0] delta = 0 for tmp[0] in tqdm(np.arange(0.1, 0.501, 0.01)) : tmp[1] = f1_score(y_train, np.array(train_preds)>tmp[0]) if tmp[1] > tmp[2]: delta = tmp[0] tmp[2] = tmp[1] print('best threshold is {:.4f} with F1 score: {:.4f}'.format(delta, tmp[2])) return delta delta = bestThresshold(y_train,train_preds )<save_to_csv>
for i in range(N_best_models): name_model = models_best.iloc[i]['Model'] if(name_model == 'LGBMClassifier')or(name_model == 'XGBClassifier'): model = model_fit(name_model,train0b,target0) acc_metrics_calc_pred(i,model,name_model,train0b,test0b,target0) else: model = model_fit(name_model,train0,target0) acc_metrics_calc_pred(i,model,name_model,train0,test0,target0 )
Titanic - Machine Learning from Disaster
6,678,936
<set_options><EOS>
for x in metrics_now: xs = metrics_all[x] models_pred[xs + '_train'] = acc_all_pred[(x-1)] models_pred[['Model', 'acc_train']].sort_values(by=['acc_train'], ascending=False )
Titanic - Machine Learning from Disaster
965,330
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<load_from_csv>
import numpy as np import pandas as pd from catboost import CatBoostClassifier, Pool, cv import hyperopt
Titanic - Machine Learning from Disaster
965,330
EMBEDDING_FILE = '.. /input/embeddings/glove.840B.300d/glove.840B.300d.txt' train = pd.read_csv('.. /input/train.csv') test = pd.read_csv('.. /input/test.csv') submission = pd.read_csv('.. /input/sample_submission.csv' )<string_transform>
train = pd.read_csv('.. /input/train.csv') test = pd.read_csv('.. /input/test.csv') train_size = train.shape[0] test_size = test.shape[0] data = pd.concat([train, test] )
Titanic - Machine Learning from Disaster
965,330
X_train = train["question_text"].fillna("fillna" ).values y_train = train["target"].values X_test = test["question_text"].fillna("fillna" ).values max_features = 40000 maxlen = 50 embed_size = 300 tokenizer = text.Tokenizer(num_words=max_features) tokenizer.fit_on_texts(list(X_train)+ list(X_test)) X_train = tokenizer.texts_to_sequences(X_train) X_test = tokenizer.texts_to_sequences(X_test) x_train = sequence.pad_sequences(X_train, maxlen=maxlen) x_test = sequence.pad_sequences(X_test, maxlen=maxlen )<feature_engineering>
data['Title'] = data['Name'].str.extract('([A-Za-z]+)\.', expand=False )
Titanic - Machine Learning from Disaster
965,330
def get_coefs(word, *arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.rstrip().rsplit(' ')) for o in open(EMBEDDING_FILE)) word_index = tokenizer.word_index nb_words = min(max_features, len(word_index)) embedding_matrix = np.zeros(( nb_words, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector<predict_on_test>
age_ref = data.groupby('Title' ).Age.mean() data['Age'] = data.apply(lambda r: r.Age if pd.notnull(r.Age)else age_ref[r.Title] , axis=1) del age_ref
Titanic - Machine Learning from Disaster
965,330
class F1Evaluation(Callback): def __init__(self, validation_data=() , interval=1): super(Callback, self ).__init__() self.interval = interval self.X_val, self.y_val = validation_data def on_epoch_end(self, epoch, logs={}): if epoch % self.interval == 0: y_pred = self.model.predict(self.X_val, verbose=0) y_pred =(y_pred > 0.35 ).astype(int) score = f1_score(self.y_val, y_pred) print(" F1 Score - epoch: %d - score: %.6f " %(epoch+1, score))<choose_model_class>
data.loc[(data.PassengerId==1044, 'Fare')] = 14.43
Titanic - Machine Learning from Disaster
965,330
filter_sizes = [1,2,3,5] num_filters = 36 def get_model() : inp = Input(shape=(maxlen,)) x = Embedding(max_features, embed_size, weights=[embedding_matrix] )(inp) x = SpatialDropout1D(0.4 )(x) conv_0 = Conv1D(num_filters, kernel_size=(filter_sizes[0]), kernel_initializer='he_normal', activation='elu' )(x) conv_1 = Conv1D(num_filters, kernel_size=(filter_sizes[1]), kernel_initializer='he_normal', activation='elu' )(x) conv_2 = Conv1D(num_filters, kernel_size=(filter_sizes[2]), kernel_initializer='he_normal', activation='elu' )(x) conv_3 = Conv1D(num_filters, kernel_size=(filter_sizes[3]), kernel_initializer='he_normal', activation='elu' )(x) maxpool_0 = MaxPool1D(pool_size=(maxlen - filter_sizes[0] + 1))(conv_0) maxpool_1 = MaxPool1D(pool_size=(maxlen - filter_sizes[1] + 1))(conv_1) maxpool_2 = MaxPool1D(pool_size=(maxlen - filter_sizes[2] + 1))(conv_2) maxpool_3 = MaxPool1D(pool_size=(maxlen - filter_sizes[3] + 1))(conv_3) z = Concatenate(axis=1 )([maxpool_0, maxpool_1, maxpool_2, maxpool_3]) z = Flatten()(z) z = BatchNormalization()(z) outp = Dense(1, activation="sigmoid" )(z) model = Model(inputs=inp, outputs=outp) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model model = get_model()<train_model>
data['Embarked'] = data['Embarked'].fillna('S') data['Cabin'] = data['Cabin'].fillna('Undefined' )
Titanic - Machine Learning from Disaster
965,330
batch_size = 1024 epochs = 4 X_tra, X_val, y_tra, y_val = train_test_split(x_train, y_train, train_size=0.95, random_state=233) F1_Score = F1Evaluation(validation_data=(X_val, y_val), interval=1) hist = model.fit(X_tra, y_tra, batch_size=batch_size, epochs=epochs, validation_data=(X_val, y_val), callbacks=[F1_Score], verbose=True )<choose_model_class>
cols = [ 'Pclass', 'Name', 'Sex', 'Age', 'SibSp', 'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked' ] X_train = data[:train_size][cols] Y_train = data[:train_size]['Survived'].astype(int) X_test = data[train_size:][cols] categorical_features_indices = [0,1,2,6,8,9] X_train.head()
Titanic - Machine Learning from Disaster
965,330
filter_sizes = [1,2,3,5] num_filters = 36 def get_model() : inp = Input(shape=(maxlen,)) x = Lambda(lambda x: K.reverse(x,axes=-1))(inp) x = Embedding(max_features, embed_size, weights=[embedding_matrix] )(x) x = SpatialDropout1D(0.4 )(x) conv_0 = Conv1D(num_filters, kernel_size=(filter_sizes[0]), kernel_initializer='he_normal', activation='elu' )(x) conv_1 = Conv1D(num_filters, kernel_size=(filter_sizes[1]), kernel_initializer='he_normal', activation='elu' )(x) conv_2 = Conv1D(num_filters, kernel_size=(filter_sizes[2]), kernel_initializer='he_normal', activation='elu' )(x) conv_3 = Conv1D(num_filters, kernel_size=(filter_sizes[3]), kernel_initializer='he_normal', activation='elu' )(x) maxpool_0 = MaxPool1D(pool_size=(maxlen - filter_sizes[0] + 1))(conv_0) maxpool_1 = MaxPool1D(pool_size=(maxlen - filter_sizes[1] + 1))(conv_1) maxpool_2 = MaxPool1D(pool_size=(maxlen - filter_sizes[2] + 1))(conv_2) maxpool_3 = MaxPool1D(pool_size=(maxlen - filter_sizes[3] + 1))(conv_3) z = Concatenate(axis=1 )([maxpool_0, maxpool_1, maxpool_2, maxpool_3]) z = Flatten()(z) z = BatchNormalization()(z) outp = Dense(1, activation="sigmoid" )(z) model = Model(inputs=inp, outputs=outp) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model model_flip = get_model() <train_model>
train_pool = Pool(X_train, Y_train, cat_features=categorical_features_indices )
Titanic - Machine Learning from Disaster
965,330
hist_flip = model_flip.fit(X_tra, y_tra, batch_size=batch_size, epochs=epochs, validation_data=(X_val, y_val), callbacks=[F1_Score], verbose=True )<predict_on_test>
Titanic - Machine Learning from Disaster
965,330
val_y_pred1 = model.predict(X_val, batch_size=1024, verbose = True) val_y_pred2 = model_flip.predict(X_val, batch_size=1024, verbose = True) <compute_test_metric>
model = CatBoostClassifier( depth=3, iterations=300, eval_metric='Accuracy', random_seed=42, logging_level='Silent', allow_writing_files=False ) cv_data = cv( train_pool, model.get_params() , fold_count=5 ) print('Best validation accuracy score: {:.2f}±{:.2f} on step {}'.format( np.max(cv_data['test-Accuracy-mean']), cv_data['test-Accuracy-std'][cv_data['test-Accuracy-mean'].idxmax(axis=0)], cv_data['test-Accuracy-mean'].idxmax(axis=0) )) print('Precise validation accuracy score: {}'.format(np.max(cv_data['test-Accuracy-mean']))) model.fit(train_pool); model.score(X_train, Y_train )
Titanic - Machine Learning from Disaster
965,330
val_y_pred = np.mean([val_y_pred1,val_y_pred2],axis = 0 )<predict_on_test>
feature_importances = model.get_feature_importance(train_pool) feature_names = X_train.columns for score, name in sorted(zip(feature_importances, feature_names), reverse=True): print('{}: {}'.format(name, score))
Titanic - Machine Learning from Disaster
965,330
<compute_test_metric><EOS>
Y_pred = model.predict(X_test) submission = pd.DataFrame({ "PassengerId": data[train_size:]["PassengerId"], "Survived": Y_pred.astype(int) }) submission.to_csv('submission.csv', index=False )
Titanic - Machine Learning from Disaster
737,908
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<compute_test_metric>
warnings.filterwarnings('ignore' )
Titanic - Machine Learning from Disaster
737,908
best_threshold = 0.01 best_score = 0.0 for threshold in range(1, 100): threshold = threshold / 100 score = f1_score(y_val, val_y_pred > threshold) if score > best_score: best_threshold = threshold best_score = score print("Score at threshold=0.5 is {}".format(f1_score(y_val, val_y_pred > 0.5))) print("Optimal threshold is {} with a score of {}".format(best_threshold, best_score))<save_to_csv>
train_set = pd.read_csv('.. /input/train.csv') test_set = pd.read_csv('.. /input/test.csv') train_set.shape, test_set.shape
Titanic - Machine Learning from Disaster
737,908
y_pred =(y_pred > best_threshold ).astype(int) submission['prediction'] = y_pred submission.to_csv('submission.csv', index=False )<set_options>
full_set = pd.concat([train_set, test_set]) full_set.head()
Titanic - Machine Learning from Disaster
737,908
start = time.time() seed = 32 os.environ['PYTHONHASHSEED'] = str(seed) os.environ['OMP_NUM_THREADS'] = '4' np.random.seed(seed) rn.seed(seed) session_conf = tf.ConfigProto(intra_op_parallelism_threads = 1, inter_op_parallelism_threads = 1) tf.set_random_seed(seed) sess = tf.Session(graph = tf.get_default_graph() , config = session_conf) K.set_session(sess) train = pd.read_csv(".. /input/train.csv" ).fillna("missing") test = pd.read_csv(".. /input/test.csv" ).fillna("missing") embedding_file1 = ".. /input/embeddings/glove.840B.300d/glove.840B.300d.txt" embedding_file2 = ".. /input/embeddings/paragram_300_sl999/paragram_300_sl999.txt" embed_size = 300 max_features = 100000 max_len = 60<define_variables>
full_set['Age'][full_set['Age'].isnull() ] = full_set['Age'].median() full_set['Age'] = full_set['Age'].astype(int )
Titanic - Machine Learning from Disaster
737,908
puncts = [',', '.', '"', ':', ')', '(', '-', '!', '?', '|', ';', "'", '$', '&', '/', '[', ']', '>', '%', '=', ' '·', '_', '{', '}', '©', '^', '®', '`', '<', '→', '°', '€', '™', '›', '♥', '←', '×', '§', '″', '′', 'Â', '█', '½', 'à', '…', '“', '★', '”', '–', '●', 'â', '►', '−', '¢', '²', '¬', '░', '¶', '↑', '±', '¿', '▾', '═', '¦', '║', '―', '¥', '▓', '—', '‹', '─', '▒', ':', '¼', '⊕', '▼', '▪', '†', '■', '’', '▀', '¨', '▄', '♫', '☆', 'é', '¯', '♦', '¤', '▲', 'è', '¸', '¾', 'Ã', '⋅', '‘', '∞', '∙', ')', '↓', '、', '│', '(', '»', ',', '♪', '╩', '╚', '³', '・', '╦', '╣', '╔', '╗', '▬', '❤', 'ï', 'Ø', '¹', '≤', '‡', '√', 'β', 'α', '∅', 'θ', '÷', '₹'] def clean_punct(x): x = str(x) for punct in puncts: if punct in x: x = x.replace(punct, f' {punct} ') return x train["question_text"] = train["question_text"].apply(lambda x: clean_punct(x)) test["question_text"] = test["question_text"].apply(lambda x: clean_punct(x))<define_variables>
full_set['HasCabin'] = full_set['Cabin'].apply(lambda x: 0 if isinstance(x, float)else 1 )
Titanic - Machine Learning from Disaster
737,908
sincere = train[train["target"] == 0] insincere = train[train["target"] == 1] print("Sincere questions {}; Insincere questions {}".format(sincere.shape[0], insincere.shape[0]))<compute_train_metric>
full_set['Embarked'][full_set['Embarked'].isnull() ] = 'S' full_set['Embarked'] = full_set['Embarked'].map({'S': 0, 'C': 1, 'Q': 2} ).astype(int )
Titanic - Machine Learning from Disaster
737,908
def get_glove(embedding_file): def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(embedding_file)) all_embs = np.stack(embeddings_index.values()) emb_mean, emb_std = all_embs.mean() , all_embs.std() return embeddings_index, emb_mean, emb_std def get_para(embedding_file): def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32') embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(embedding_file, encoding="utf8", errors='ignore')if len(o)>100) all_embs = np.stack(embeddings_index.values()) emb_mean, emb_std = all_embs.mean() , all_embs.std() return embeddings_index, emb_mean, emb_std glove_index, glove_mean, glove_std = get_glove(embedding_file1) para_index, para_mean, para_std = get_para(embedding_file2 )<categorify>
full_set[ full_set['Fare'].isnull() ]
Titanic - Machine Learning from Disaster
737,908
def get_embed(tokenizer = None, embeddings_index = None, emb_mean = None, emb_std = None): word_index = tokenizer.word_index nb_words = min(max_features, len(word_index)) embedding_matrix = np.random.normal(emb_mean, emb_std,(nb_words, embed_size)) for word, i in word_index.items() : if i >= max_features: continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector return nb_words, embedding_matrix<string_transform>
full_set['Fare'][ full_set['Fare'].isnull() ] = 0 .
Titanic - Machine Learning from Disaster
737,908
tokenizer = Tokenizer(num_words = max_features, lower = True) tokenizer.fit_on_texts(train["question_text"]) train_token = tokenizer.texts_to_sequences(train["question_text"]) fake_test_token = tokenizer.texts_to_sequences(fake_test["question_text"]) test_token = tokenizer.texts_to_sequences(test["question_text"]) train_seq = pad_sequences(train_token, maxlen = max_len) fake_test_seq = pad_sequences(fake_test_token, maxlen = max_len) X_test = pad_sequences(test_token, maxlen = max_len) del train_token, fake_test_token, test_token; gc.collect()<load_pretrained>
full_set['Title'] = full_set['Name'].apply(get_title) print(full_set['Title'].unique() )
Titanic - Machine Learning from Disaster
737,908
nb_words, embedding_matrix1 = get_embed(tokenizer = tokenizer, embeddings_index = glove_index, emb_mean = glove_mean, emb_std = glove_std) nb_words, embedding_matrix2 = get_embed(tokenizer = tokenizer, embeddings_index = para_index, emb_mean = para_mean, emb_std = para_std) embedding_matrix = np.mean([embedding_matrix1, embedding_matrix2], axis = 0) del embedding_matrix1, embedding_matrix2; gc.collect() print("Embedding matrix completed!" )<choose_model_class>
commons = ['Mr','Mrs','Miss','Mme','Ms','Mlle'] rares = list(set(full_set['Title'].unique())- set(commons)) full_set['Title'] = full_set['Title'].replace('Ms','Miss') full_set['Title'] = full_set['Title'].replace('Mlle','Miss') full_set['Title'] = full_set['Title'].replace('Mme','Mrs') full_set['Title'][full_set['Title'].isin(rares)] = 'Rare' full_set['Title'] = full_set['Title'].map({'Mr':0, 'Mrs':1, 'Miss':2, 'Rare':3} )
Titanic - Machine Learning from Disaster
737,908
class Attention(Layer): def __init__(self, step_dim, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs): self.supports_masking = True self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.step_dim = step_dim self.features_dim = 0 super(Attention, self ).__init__(**kwargs) def build(self, input_shape): assert len(input_shape)== 3 self.W = self.add_weight(( input_shape[-1],), initializer=self.init, name='{}_W'.format(self.name), regularizer=self.W_regularizer, constraint=self.W_constraint) self.features_dim = input_shape[-1] if self.bias: self.b = self.add_weight(( input_shape[1],), initializer='zero', name='{}_b'.format(self.name), regularizer=self.b_regularizer, constraint=self.b_constraint) else: self.b = None self.built = True def compute_mask(self, input, input_mask=None): return None def call(self, x, mask=None): features_dim = self.features_dim step_dim = self.step_dim eij = K.reshape(K.dot(K.reshape(x,(-1, features_dim)) , K.reshape(self.W,(features_dim, 1))),(-1, step_dim)) if self.bias: eij += self.b eij = K.tanh(eij) a = K.exp(eij) if mask is not None: a *= K.cast(mask, K.floatx()) a /= K.cast(K.sum(a, axis=1, keepdims=True)+ K.epsilon() , K.floatx()) a = K.expand_dims(a) weighted_input = x * a return K.sum(weighted_input, axis=1) def compute_output_shape(self, input_shape): return input_shape[0], self.features_dim<find_best_params>
full_set['FamSize'] = full_set['Parch'] + full_set['SibSp'] + 1
Titanic - Machine Learning from Disaster
737,908
def get_f1(true, val): precision, recall, thresholds = precision_recall_curve(true, val) thresholds = np.append(thresholds, 1.001) F = 2 /(1/precision + 1/recall) best_score = np.max(F) best_threshold = thresholds[np.argmax(F)] return best_threshold, best_score<choose_model_class>
full_set['Sex'] = full_set['Sex'].map({'male':0, 'female':1} )
Titanic - Machine Learning from Disaster
737,908
def build_model(units = 40, dr = 0.3): inp = Input(shape =(max_len,)) embed_layer = Embedding(nb_words, embed_size, input_length = max_len, weights = [embedding_matrix], trainable = False )(inp) x = SpatialDropout1D(dr, seed = seed )(embed_layer) x = Bidirectional(CuDNNLSTM(units, kernel_initializer = glorot_normal(seed = seed), recurrent_initializer = orthogonal(gain = 1.0, seed = seed), return_sequences = True))(x) x = Bidirectional(CuDNNGRU(units, kernel_initializer = glorot_normal(seed = seed), recurrent_initializer = orthogonal(gain = 1.0, seed = seed), return_sequences = True))(x) att = Attention(max_len )(x) avg_pool = GlobalAveragePooling1D()(x) max_pool = GlobalMaxPooling1D()(x) main = concatenate([att, avg_pool, max_pool]) main = Dense(64, kernel_initializer = glorot_normal(seed = seed))(main) main = Activation("relu" )(main) main = Dropout(0.1, seed = seed )(main) out = Dense(1, activation = "sigmoid", kernel_initializer = glorot_normal(seed = seed))(main) model = Model(inputs = inp, outputs = out) model.compile(loss = "binary_crossentropy", optimizer = Adam() , metrics = None) return model<split>
full_set.drop(['Cabin','Name','Parch','PassengerId','SibSp','Ticket'], axis=1, inplace=True )
Titanic - Machine Learning from Disaster
737,908
fold = 5 batch_size = 1024 epochs = 5 oof_pred = np.zeros(( train.shape[0], 1)) pred = np.zeros(( test_shape[0], 1)) fake_pred = np.zeros(( test_shape[0], 1)) thresholds = [] k_fold = StratifiedKFold(n_splits = fold, random_state = seed, shuffle = True) for i,(train_idx, val_idx)in enumerate(k_fold.split(train_seq, target)) : print("-"*50) print("Trainging fold {}/{}".format(i+1, fold)) X_train, y_train = train_seq[train_idx], target[train_idx] X_val, y_val = train_seq[val_idx], target[val_idx] K.clear_session() model = build_model(units = 60) model.fit(X_train, y_train, batch_size = batch_size, epochs = epochs, validation_data =(X_val, y_val), verbose = 2) val_pred = model.predict(X_val, batch_size = batch_size) oof_pred[val_idx] = val_pred fake_pred += model.predict(fake_test_seq, batch_size = batch_size)/fold pred += model.predict(X_test, batch_size = batch_size)/fold threshold, score = get_f1(y_val, val_pred) print("F1 score at threshold {} is {}".format(threshold, score))<compute_test_metric>
def fare_bin(fare): if fare <= 7.8958: return 0. elif 7.8958 < fare <= 14.4542: return 1. elif 14.4542 < fare <= 31.2750: return 2. else: return 3 .
Titanic - Machine Learning from Disaster