kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
11,748,875 | pred_val_l = pred_val[0].argmax(1 )<import_modules> | test['Ticket_Class'].fillna(value='0',inplace=True)
test['Sex'] = les.transform(test['Sex'])
test['Ticket_Class'] = letc.transform(test['Ticket_Class'].astype(str))
test['Cabin'] = lec.transform(test['Cabin'].astype(str))
test['Embarked'] = lee.transform(test['Embarked'].astype(str))
test['Ticket_Number'] = letn.transform(test['Ticket_Number'].astype(str))
test['Title'] = let.transform(test['Title'].astype(str))
| Titanic - Machine Learning from Disaster |
11,748,875 | from sklearn.metrics import classification_report<compute_test_metric> | X_test = test[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch',
'Fare', 'Cabin', 'Embarked', 'Ticket_Class','Ticket_Number','Title']] | Titanic - Machine Learning from Disaster |
11,748,875 | print(classification_report(pred_val[1], pred_val_l))<predict_on_test> | X_test = imputer.transform(X_test ) | Titanic - Machine Learning from Disaster |
11,748,875 | pred_test, label_test = clf.get_preds(DatasetType.Test, ordered=True )<prepare_output> | y_pred_bc = model_bc.predict(X_test ) | Titanic - Machine Learning from Disaster |
11,748,875 | pred_test_ = pred_test.argmax(1)
pred_test_l = [data_clf.train_ds.y.classes[n] for n in pred_test_]<rename_columns> | lgb = LGBMClassifier() | Titanic - Machine Learning from Disaster |
11,748,875 | res.index.name = "id"<save_to_csv> | lgb.fit(X_res,y_res ) | Titanic - Machine Learning from Disaster |
11,748,875 | pd.DataFrame(res ).to_csv("submission.csv" )<import_modules> | y_pred_lgb = lgb.predict(X_test ) | Titanic - Machine Learning from Disaster |
11,748,875 | from types import SimpleNamespace
from collections import Counter
import os
import re
import pathlib
import array
import pickle
import numpy as np
import torch
import torch.nn as nn
import pandas as pd<define_variables> | Proba_bc=model_bc.predict_proba(X_test)
Proba_lgb=lgb.predict_proba(X_test ) | Titanic - Machine Learning from Disaster |
11,748,875 | DATASET_VERSION = 'ca-100'
COMPETITION_ROOT = '.. /input/vectors'
DATASET_ROOT = f'.. /input/cbow-preprocessing/data/{DATASET_VERSION}'
WORKING_ROOT = f'data/{DATASET_VERSION}'
DATASET_PREFIX = 'ca.wiki'<define_variables> | def argmax(iterable):
return max(enumerate(iterable), key=lambda x: x[1])[0] | Titanic - Machine Learning from Disaster |
11,748,875 | params = SimpleNamespace(
embedding_dim = 100,
window_size = 5,
batch_size = 1000,
epochs = 4,
preprocessed = f'{DATASET_ROOT}/{DATASET_PREFIX}',
working = f'{WORKING_ROOT}/{DATASET_PREFIX}',
modelname = f'{WORKING_ROOT}/{DATASET_VERSION}.pt',
train = True
)<categorify> | def custom_pred(bc,lgb):
preds = list()
diff_index = [i for i, x in enumerate(y_pred_bc==y_pred_lgb)if not x]
for index,(i,j)in enumerate(zip(bc,lgb)) :
if index in diff_index:
preds.append(y_pred_bc[index])
else:
preds.append(argmax(i+j)%2)
return preds
| Titanic - Machine Learning from Disaster |
11,748,875 | class Vocabulary(object):
def __init__(self, pad_token='<pad>', unk_token='<unk>', eos_token='<eos>'):
self.token2idx = {}
self.idx2token = []
self.pad_token = pad_token
self.unk_token = unk_token
self.eos_token = eos_token
if pad_token is not None:
self.pad_index = self.add_token(pad_token)
if unk_token is not None:
self.unk_index = self.add_token(unk_token)
if eos_token is not None:
self.eos_index = self.add_token(eos_token)
def add_token(self, token):
if token not in self.token2idx:
self.idx2token.append(token)
self.token2idx[token] = len(self.idx2token)- 1
return self.token2idx[token]
def get_index(self, token):
if isinstance(token, str):
return self.token2idx.get(token, self.unk_index)
else:
return [self.token2idx.get(t, self.unk_index)for t in token]
def __len__(self):
return len(self.idx2token)
def save(self, filename):
with open(filename, 'wb')as f:
pickle.dump(self.__dict__, f)
def load(self, filename):
with open(filename, 'rb')as f:
self.__dict__.update(pickle.load(f))<define_variables> | preds = custom_pred(Proba_bc,Proba_lgb ) | Titanic - Machine Learning from Disaster |
11,748,875 | def batch_generator(idata, target, batch_size, shuffle=True):
nsamples = len(idata)
if shuffle:
perm = np.random.permutation(nsamples)
else:
perm = range(nsamples)
for i in range(0, nsamples, batch_size):
batch_idx = perm[i:i+batch_size]
if target is not None:
yield idata[batch_idx], target[batch_idx]
else:
yield idata[batch_idx], None<categorify> | submission = pd.DataFrame({
"PassengerId": ID,
"Survived": preds
})
submission.to_csv('submission.csv', index=False ) | Titanic - Machine Learning from Disaster |
5,977,210 | class CBOW(nn.Module):
def __init__(self, num_embeddings, embedding_dim):
super().__init__()
self.emb = nn.Embedding(num_embeddings, embedding_dim, padding_idx=0)
self.lin = nn.Linear(embedding_dim, num_embeddings, bias=False)
def forward(self, input):
e = self.emb(input)
u = e.sum(dim=1)
v = self.lin(u)
return v<categorify> | df = pd.read_csv('/kaggle/input/titanic/train.csv')
df2 = pd.read_csv('/kaggle/input/titanic/test.csv')
pd.options.display.max_columns = None
df.head() | Titanic - Machine Learning from Disaster |
5,977,210 | def load_preprocessed_dataset(prefix):
token_vocab = Vocabulary()
token_vocab.load(f'{prefix}.vocab')
data = []
for part in ['train', 'valid', 'test']:
with np.load(f'{prefix}.{part}.npz')as set_data:
idata, target = set_data['idata'], set_data['target']
data.append(( idata, target))
print(f'Number of samples({part}): {len(target)}')
print("Using precomputed vocabulary and data files")
print(f'Vocabulary size: {len(token_vocab)}')
return token_vocab, data<train_model> | df3 = pd.read_csv('/kaggle/input/titanic/gender_submission.csv')
df3.head() | Titanic - Machine Learning from Disaster |
5,977,210 | def train(model, criterion, optimizer, idata, target, batch_size, device, log=False):
model.train()
total_loss = 0
ncorrect = 0
ntokens = 0
niterations = 0
for X, y in batch_generator(idata, target, batch_size, shuffle=True):
X = torch.tensor(X, dtype=torch.long, device=device)
y = torch.tensor(y, dtype=torch.long, device=device)
model.zero_grad()
output = model(X)
loss = criterion(output, y)
loss.backward()
optimizer.step()
total_loss += loss.item()
ncorrect +=(torch.max(output, 1)[1] == y ).sum().item()
ntokens += y.numel()
niterations += 1
if niterations == 200 or niterations == 500 or niterations % 1000 == 0:
print(f'Train: wpb={ntokens//niterations}, num_updates={niterations}, accuracy={100*ncorrect/ntokens:.1f}, loss={total_loss/ntokens:.2f}')
total_loss = total_loss / ntokens
accuracy = 100 * ncorrect / ntokens
if log:
print(f'Train: wpb={ntokens//niterations}, num_updates={niterations}, accuracy={accuracy:.1f}, loss={total_loss:.2f}')
return accuracy, total_loss<train_model> | percent_missing = df.isnull().sum() * 100 / len(df)
missing_values = pd.DataFrame({'percent_missing': percent_missing})
missing_values.sort_values(by ='percent_missing' , ascending=False ) | Titanic - Machine Learning from Disaster |
5,977,210 | def validate(model, criterion, idata, target, batch_size, device):
model.eval()
total_loss = 0
ncorrect = 0
ntokens = 0
niterations = 0
y_pred = []
with torch.no_grad() :
for X, y in batch_generator(idata, target, batch_size, shuffle=False):
X = torch.tensor(X, dtype=torch.long, device=device)
output = model(X)
if target is not None:
y = torch.tensor(y, dtype=torch.long, device=device)
loss = criterion(output, y)
total_loss += loss.item()
ncorrect +=(torch.max(output, 1)[1] == y ).sum().item()
ntokens += y.numel()
niterations += 1
else:
pred = torch.max(output, 1)[1].detach().to('cpu' ).numpy()
y_pred.append(pred)
if target is not None:
total_loss = total_loss / ntokens
accuracy = 100 * ncorrect / ntokens
return accuracy, total_loss
else:
return np.concatenate(y_pred )<set_options> | percent_missing = df2.isnull().sum() * 100 / len(df2)
missing_values = pd.DataFrame({'percent_missing': percent_missing})
missing_values.sort_values(by ='percent_missing' , ascending=False ) | Titanic - Machine Learning from Disaster |
5,977,210 | if torch.cuda.is_available() :
device = torch.device('cuda')
else:
device = torch.device('cpu')
print("WARNING: Training without GPU can be very slow!" )<create_dataframe> | df['Survived'].value_counts() | Titanic - Machine Learning from Disaster |
5,977,210 | vocab, data = load_preprocessed_dataset(params.preprocessed )<choose_model_class> | X = df.drop(['Cabin', 'Name', 'PassengerId', 'Survived', 'Ticket'], axis = 1)
Y = df['Survived']
Test_Data = df2.drop(['Cabin', 'Name', 'PassengerId', 'Ticket'], axis = 1 ) | Titanic - Machine Learning from Disaster |
5,977,210 | model = CBOW(len(vocab), params.embedding_dim ).to(device )<load_from_csv> | X = pd.get_dummies(X, prefix_sep='_')
Test_Data = pd.get_dummies(Test_Data, prefix_sep='_')
X.head() | Titanic - Machine Learning from Disaster |
5,977,210 | valid_x_df = pd.read_csv(f'{COMPETITION_ROOT}/x_valid.csv')
tokens = valid_x_df.columns[1:]
valid_x = valid_x_df[tokens].apply(vocab.get_index ).to_numpy(dtype='int32')
valid_y_df = pd.read_csv(f'{COMPETITION_ROOT}/y_valid.csv')
valid_y = valid_y_df['token'].apply(vocab.get_index ).to_numpy(dtype='int32' )<choose_model_class> | Test_Data['Embarked_0'] = 0
Test_Data = Test_Data[['Pclass', 'Age','SibSp','Parch','Fare','Sex_female', 'Sex_male', 'Embarked_C', 'Embarked_Q', 'Embarked_S']]
Test_Data.head() | Titanic - Machine Learning from Disaster |
5,977,210 | optimizer = torch.optim.Adam(model.parameters())
criterion = nn.CrossEntropyLoss(reduction='sum')
train_accuracy = []
wiki_accuracy = []
valid_accuracy = []
for epoch in range(params.epochs):
acc, loss = train(model, criterion, optimizer, data[0][0], data[0][1], params.batch_size, device, log=True)
train_accuracy.append(acc)
print(f'| epoch {epoch:03d} | train accuracy={acc:.1f}%, train loss={loss:.2f}')
acc, loss = validate(model, criterion, data[1][0], data[1][1], params.batch_size, device)
wiki_accuracy.append(acc)
print(f'| epoch {epoch:03d} | valid accuracy={acc:.1f}%, valid loss={loss:.2f}(wikipedia)')
acc, loss = validate(model, criterion, valid_x, valid_y, params.batch_size, device)
valid_accuracy.append(acc)
print(f'| epoch {epoch:03d} | valid accuracy={acc:.1f}%, valid loss={loss:.2f}(El Periódico)')
torch.save(model.state_dict() , params.modelname )<load_from_csv> | Y = LabelEncoder().fit_transform(Y)
Y | Titanic - Machine Learning from Disaster |
5,977,210 | valid_x_df = pd.read_csv(f'{COMPETITION_ROOT}/x_test.csv')
test_x = valid_x_df[tokens].apply(vocab.get_index ).to_numpy(dtype='int32')
y_pred = validate(model, None, test_x, None, params.batch_size, device)
y_token = [vocab.idx2token[index] for index in y_pred]<save_to_csv> | X2 = StandardScaler().fit_transform(X)
Test_Data = StandardScaler().fit_transform(Test_Data)
X_Train, X_Test, Y_Train, Y_Test = train_test_split(X2, Y, test_size = 0.20, random_state = 101 ) | Titanic - Machine Learning from Disaster |
5,977,210 | submission = pd.DataFrame({'id':valid_x_df['id'], 'token': y_token}, columns=['id', 'token'])
print(submission.head())
submission.to_csv('submission.csv', index=False )<set_options> | start = time.process_time()
trainedmodel = LogisticRegression().fit(X_Train,Y_Train)
print(time.process_time() - start)
predictions =trainedmodel.predict(X_Test)
print(confusion_matrix(Y_Test,predictions))
print(classification_report(Y_Test,predictions)) | Titanic - Machine Learning from Disaster |
5,977,210 | %matplotlib inline<load_from_csv> | start = time.process_time()
trainedsvm = svm.LinearSVC().fit(X_Train, Y_Train)
print(time.process_time() - start)
predictionsvm = trainedsvm.predict(X_Test)
print(confusion_matrix(Y_Test,predictionsvm))
print(classification_report(Y_Test,predictionsvm)) | Titanic - Machine Learning from Disaster |
5,977,210 | train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv' )<load_from_csv> | start = time.process_time()
trainedtree = tree.DecisionTreeClassifier().fit(X_Train, Y_Train)
print(time.process_time() - start)
predictionstree = trainedtree.predict(X_Test)
print(confusion_matrix(Y_Test,predictionstree))
print(classification_report(Y_Test,predictionstree)) | Titanic - Machine Learning from Disaster |
5,977,210 | train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv' )<count_missing_values> | start = time.process_time()
trainedforest = RandomForestClassifier(n_estimators=700 ).fit(X_Train,Y_Train)
print(time.process_time() - start)
predictionforest = trainedforest.predict(X_Test)
print(confusion_matrix(Y_Test,predictionforest))
print(classification_report(Y_Test,predictionforest)) | Titanic - Machine Learning from Disaster |
5,977,210 | train.isnull().sum()<count_missing_values> | pca = PCA(n_components=2,svd_solver='full')
X_pca = pca.fit_transform(X)
X_reduced, X_test_reduced, Y_reduced, Y_test_reduced = train_test_split(X_pca, Y, test_size = 0.30, random_state = 101)
start = time.process_time()
trainedforest = RandomForestClassifier(n_estimators=700 ).fit(X_reduced,Y_reduced)
print(time.process_time() - start)
predictionforest = trainedforest.predict(X_test_reduced)
print(confusion_matrix(Y_test_reduced,predictionforest))
print(classification_report(Y_test_reduced,predictionforest)) | Titanic - Machine Learning from Disaster |
5,977,210 | test.isnull().sum()<count_missing_values> | model1 = svm.LinearSVC()
model2 = tree.DecisionTreeClassifier()
model3 = RandomForestClassifier()
model = VotingClassifier(estimators=[('svm', model1),('dt', model2),('rf', model3)], voting='hard')
model.fit(X_Train,Y_Train)
model.score(X_Test,Y_Test ) | Titanic - Machine Learning from Disaster |
5,977,210 | test.isnull().sum()<count_values> | model1 = LogisticRegression()
model2 = tree.DecisionTreeClassifier()
model3 = RandomForestClassifier()
model1.fit(X_Train,Y_Train)
model2.fit(X_Train,Y_Train)
model3.fit(X_Train,Y_Train)
pred1 = model1.predict_proba(X_Test)[:,1]
pred2 = model2.predict_proba(X_Test)[:,1]
pred3 = model3.predict_proba(X_Test)[:,1]
finalpred =(pred1+pred2+pred3)/3
preds = np.where(finalpred > 0.5, 1, 0)
print(confusion_matrix(Y_Test, preds))
print(classification_report(Y_Test, preds)) | Titanic - Machine Learning from Disaster |
5,977,210 | train.Outcome.value_counts()<prepare_x_and_y> | model1 = LogisticRegression()
model2 = tree.DecisionTreeClassifier()
model3 = RandomForestClassifier()
model1.fit(X_Train,Y_Train)
model2.fit(X_Train,Y_Train)
model3.fit(X_Train,Y_Train)
pred1 = model1.predict_proba(X_Test)[:,1]
pred2 = model2.predict_proba(X_Test)[:,1]
pred3 = model3.predict_proba(X_Test)[:,1]
finalpred=(pred1*0.2+pred2*0.4+pred3*0.4)
preds = np.where(finalpred > 0.5, 1, 0)
print(confusion_matrix(Y_Test, preds))
print(classification_report(Y_Test, preds)) | Titanic - Machine Learning from Disaster |
5,977,210 | X = train.drop(['Outcome'], axis = 1)
y = train.Outcome<train_model> | from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from xgboost import XGBClassifier
from vecstack import stacking
from sklearn.metrics import accuracy_score | Titanic - Machine Learning from Disaster |
5,977,210 | clf = GradientBoostingClassifier(n_estimators = 105)
clf.fit(X,y )<predict_on_test> | random_search = {'criterion': ['entropy', 'gini'],
'max_depth': list(np.linspace(10, 1200, 10, dtype = int)) + [None],
'max_features': ['auto', 'sqrt','log2', None],
'min_samples_leaf': [4, 6, 8, 12],
'min_samples_split': [5, 7, 10, 14],
'n_estimators': list(np.linspace(151, 1200, 10, dtype = int)) }
clf = RandomForestClassifier()
model = RandomizedSearchCV(estimator = clf, param_distributions = random_search, n_iter = 80,
cv = 4, verbose= 5, random_state= 101, n_jobs = -1)
model.fit(X,Y)
model.best_params_ | Titanic - Machine Learning from Disaster |
5,977,210 | predicted = clf.predict(test )<save_to_csv> | random_search = {'criterion': ['entropy', 'gini'],
'max_depth': list(np.linspace(10, 1200, 10, dtype = int)) ,
'max_features': ['auto', 'sqrt','log2', None],
'min_samples_leaf': [4, 6, 8, 12],
'min_samples_split': [5, 7, 10, 14],
'n_estimators': list(np.linspace(151, 1200, 10, dtype = int)) }
clf = XGBClassifier()
model = RandomizedSearchCV(estimator = clf, param_distributions = random_search, n_iter = 80,
cv = 4, verbose= 5, random_state= 101, n_jobs = -1)
model.fit(X,Y)
model.best_params_ | Titanic - Machine Learning from Disaster |
5,977,210 | output = pd.DataFrame(predicted,columns = ['Outcome'])
test = pd.read_csv('.. /input/test.csv')
output['Id'] = test['Id']
output[['Id','Outcome']].to_csv('submission_cloudy10.csv', index = False)
output.head()<set_options> | random_search = {'dual': [False],
'solver' : ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'],
'max_iter': list(np.linspace(10, 50000, 100, dtype = int)) }
clf = LogisticRegression()
model = RandomizedSearchCV(estimator = clf, param_distributions = random_search, n_iter = 80,
cv = 4, verbose= 5, random_state= 101, n_jobs = -1)
model.fit(X,Y)
model.best_params_ | Titanic - Machine Learning from Disaster |
5,977,210 | %matplotlib inline<load_from_csv> | models = [
LogisticRegression(solver= 'sag', max_iter = 15024, dual= False),
RandomForestClassifier(n_estimators = 1200, min_samples_split = 5, min_samples_leaf = 4, max_features = None, max_depth = 1200, criterion = 'entropy'),
XGBClassifier(n_estimators = 151, min_samples_split = 10, min_samples_leaf = 6,
max_features = 'auto', max_depth = 10, criterion = 'gini')
] | Titanic - Machine Learning from Disaster |
5,977,210 | train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv' )<load_from_csv> | S_train, S_test = stacking(models,
X_Train, Y_Train, Test_Data,
regression=False,
mode='oof_pred_bag',
needs_proba=False,
save_dir=None,
metric=accuracy_score,
n_folds= 4,
stratified=True,
shuffle=True,
random_state=0,
verbose=2 ) | Titanic - Machine Learning from Disaster |
5,977,210 | train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv' )<categorify> | model = XGBClassifier(n_estimators = 151, min_samples_split = 10, min_samples_leaf = 6,
max_features = 'auto', max_depth = 10, criterion = 'gini')
model = model.fit(S_train, Y_Train)
Y_Pred = model.predict(S_test)
| Titanic - Machine Learning from Disaster |
5,977,210 | imputer = Imputer()
train2 =train[['Pregnancies', 'Glucose', 'BloodPressure', 'SkinThickness',
'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age',]].replace(0, numpy.NaN)
train2["Outcome"]= train["Outcome"]
transformed_train = imputer.fit_transform(train2)
print(numpy.isnan(transformed_train ).sum())
train2.BMI.plot()
print(train2.shape)
<count_missing_values> | warnings.simplefilter('ignore')
clf1 = KNeighborsClassifier(n_neighbors=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
lr = LogisticRegression()
sclf = StackingClassifier(classifiers=[clf1, clf2, clf3],
use_probas=True,
average_probas=False,
meta_classifier=lr)
print('3-fold cross validation:
')
for clf, label in zip([clf1, clf2, clf3, sclf],
['KNN',
'Random Forest',
'Naive Bayes',
'StackingClassifier']):
scores = model_selection.cross_val_score(clf, X, Y,
cv=3, scoring='accuracy')
print("Accuracy: %0.2f(+/- %0.2f)[%s]"
%(scores.mean() , scores.std() , label)) | Titanic - Machine Learning from Disaster |
5,977,210 | train.isnull().sum()<count_missing_values> | sclf.fit(X_Train, Y_Train)
Y_Pred = sclf.predict(X_Test)
sclf.score(X_Test,Y_Test ) | Titanic - Machine Learning from Disaster |
5,977,210 | test.isnull().sum()<count_missing_values> | X_Train2, X_Val, Y_Train2, Y_Val = train_test_split(X_Train, Y_Train, test_size = 0.30, random_state = 101)
model1 = tree.DecisionTreeClassifier()
model1.fit(X_Train2,Y_Train2)
val_pred1=model1.predict(X_Val)
test_pred1=model1.predict(X_Test)
val_pred1=pd.DataFrame(val_pred1)
test_pred1=pd.DataFrame(test_pred1)
model2 = LogisticRegression()
model2.fit(X_Train2,Y_Train2)
val_pred2=model2.predict(X_Val)
test_pred2=model2.predict(X_Test)
val_pred2=pd.DataFrame(val_pred2)
test_pred2=pd.DataFrame(test_pred2)
df_val=pd.concat([pd.DataFrame(X_Val), val_pred1,val_pred2],axis=1)
df_test=pd.concat([pd.DataFrame(X_Test), test_pred1,test_pred2],axis=1)
model = LogisticRegression()
model.fit(df_val,Y_Val)
model.score(df_test,Y_Test ) | Titanic - Machine Learning from Disaster |
5,977,210 | test.isnull().sum()<count_values> | model = BaggingClassifier(tree.DecisionTreeClassifier(random_state=1))
model.fit(X_Train,Y_Train)
model.score(X_Test,Y_Test ) | Titanic - Machine Learning from Disaster |
5,977,210 | train.Outcome.value_counts()<prepare_x_and_y> | model = AdaBoostClassifier()
model.fit(X_Train,Y_Train)
model.score(X_Test,Y_Test ) | Titanic - Machine Learning from Disaster |
5,977,210 | X = train2.drop(['Outcome'], axis = 1)
imputer = Imputer()
transformed_X = imputer.fit_transform(X)
X_with_null = train.drop(['Outcome'], axis = 1)
y = train2.Outcome
y_with_null = train.Outcome<compute_train_metric> | model= GradientBoostingClassifier(learning_rate=0.01,random_state=1)
model.fit(X_Train,Y_Train)
model.score(X_Test,Y_Test ) | Titanic - Machine Learning from Disaster |
5,977,210 | X_validate = X[:200]
Y_validate = y[:200]
params = {'learning_rate':0.1}
model = XGBClassifier(n_estimators=500, **params)
kfold = KFold(n_splits=10, random_state=7)
print("dropped nulls")
print(cross_val_score(model, transformed_X, y,cv=kfold))<train_model> | model=xgb.XGBClassifier(random_state=1,learning_rate=0.01)
model.fit(X_Train,Y_Train)
model.score(X_Test,Y_Test ) | Titanic - Machine Learning from Disaster |
5,977,210 | X_validate_null = X_with_null[:200]
Y_validate_null = y_with_null[:200]
params = {'learning_rate':0.1}
model = XGBClassifier(n_estimators=500, **params)
kfold = KFold(n_splits=10, random_state=7)
print("Raw data")
print(cross_val_score(model, X_validate_null, Y_validate_null,cv=kfold))<prepare_output> | train_data=lgb.Dataset(X_Train,Y_Train)
params = {'learning_rate':0.2}
model= lgb.train(params, train_data, 100)
y_pred=model.predict(X_Test)
preds = np.where(y_pred > 0.5, 1, 0)
print(confusion_matrix(Y_Test, preds))
print(classification_report(Y_Test, preds)) | Titanic - Machine Learning from Disaster |
5,977,210 | XX = pd.DataFrame(data=transformed_X)
XX.columns = ['Pregnancies', 'Glucose', 'BloodPressure', 'SkinThickness',
'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age']
XX['Id'] = train['Id']
XX.head()<train_model> | Titanic - Machine Learning from Disaster | |
5,977,210 | params = {'learning_rate':0.01}
clf = XGBClassifier(n_estimators=100, **params)
clf.fit(XX,y )<predict_on_test> | def fit_model(trainX, trainy):
model = Sequential()
model.add(Dense(14, input_dim= 10, activation='relu'))
model.add(Dense(50))
model.add(Dense(10, activation='relu'))
model.add(Dense(2, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(trainX, trainy, epochs=500, verbose=0)
return model
def define_stacked_model(members):
for i in range(len(members)) :
model = members[i]
for layer in model.layers:
layer.trainable = False
layer.name = 'ensemble_' + str(i+1)+ '_' + layer.name
ensemble_visible = [model.input for model in members]
ensemble_outputs = [model.output for model in members]
merge = concatenate(ensemble_outputs)
hidden = Dense(10, activation='relu' )(merge)
output = Dense(2, activation='softmax' )(hidden)
model = Model(inputs=ensemble_visible, outputs=output)
plot_model(model, show_shapes=True, to_file='model_graph.png')
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
def fit_stacked_model(model, inputX, inputy):
X = [inputX for _ in range(len(model.input)) ]
model.fit(X, inputy, epochs=300, verbose=0)
def predict_stacked_model(model, inputX):
X = [inputX for _ in range(len(model.input)) ]
return model.predict(X, verbose=0)
BinY_Train = to_categorical(Y_Train)
n_members = 5
members = []
for i in range(n_members):
members.append(fit_model(X_Train, BinY_Train))
print('Created %d models' % len(members))
stacked_model = define_stacked_model(members ) | Titanic - Machine Learning from Disaster |
5,977,210 | predicted = clf.predict(test)
<save_to_csv> | BinY_Test = to_categorical(Y_Test)
fit_stacked_model(stacked_model, X_Test, BinY_Test)
yhat = predict_stacked_model(stacked_model, X_Test)
yhat = argmax(yhat, axis=1)
acc = accuracy_score(Y_Test, yhat)
print('Stacked Test Accuracy: %.3f' % acc ) | Titanic - Machine Learning from Disaster |
5,977,210 | output = pd.DataFrame(predicted,columns = ['Outcome'])
test = pd.read_csv('.. /input/test.csv')
output['Id'] = test['Id']
output[['Id','Outcome']].to_csv('johnnybgood2.csv', index = False)
output.head()<set_options> | n_members = 7
members = []
for i in range(n_members):
members.append(fit_model(X_Train, BinY_Train))
print('Created %d models' % len(members))
stacked_model = define_stacked_model(members)
fit_stacked_model(stacked_model, X_Test, BinY_Test)
yhat = predict_stacked_model(stacked_model, Test_Data)
yhat = argmax(yhat, axis=1 ) | Titanic - Machine Learning from Disaster |
5,977,210 | <load_from_csv><EOS> | submission = pd.DataFrame({
"PassengerId": df2["PassengerId"],
"Survived": yhat
})
submission.to_csv('titanic.csv', index=False)
submission.head() | Titanic - Machine Learning from Disaster |
11,101,836 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<load_from_csv> | !pip install skorch | Titanic - Machine Learning from Disaster |
11,101,836 | trainingData = read_csv('.. /input/train.csv')
trainingData=trainingData[['Id','Pregnancies','Glucose','BloodPressure','SkinThickness','Insulin','BMI','DiabetesPedigreeFunction','Age','Outcome']]
testingData = read_csv('.. /input/test.csv')
testingData=testingData[['Id','Pregnancies','Glucose','BloodPressure','SkinThickness','Insulin','BMI','DiabetesPedigreeFunction','Age']]
trainingFeatures = trainingData.iloc[:, :-1]
trainingLabels = trainingData.iloc[:, -1]
imputer = SimpleImputer(missing_values=0,strategy='median')
trainingFeatures = imputer.fit_transform(trainingFeatures)
trainingFeatures = pd.DataFrame(trainingFeatures)
trainingFeatures.columns=['Id','Pregnancies','Glucose','BloodPressure','SkinThickness','Insulin','BMI','DiabetesPedigreeFunction','Age']
testingData = imputer.transform(testingData)
testingData = pd.DataFrame(testingData)
testingData.columns=['Id','Pregnancies','Glucose','BloodPressure','SkinThickness','Insulin','BMI','DiabetesPedigreeFunction','Age']
print("
print("
Column Name
")
print(( trainingFeatures[:] == 0 ).sum() )<drop_column> | warnings.filterwarnings('ignore' ) | Titanic - Machine Learning from Disaster |
11,101,836 | trainingFeatures2 = trainingFeatures.drop(['Id'], axis=1 )<train_on_grid> | def seed_everything(seed_value):
random.seed(seed_value)
np.random.seed(seed_value)
torch.manual_seed(seed_value)
os.environ['PYTHONHASHSEED'] = str(seed_value)
if torch.cuda.is_available() :
torch.cuda.manual_seed(seed_value)
torch.cuda.manual_seed_all(seed_value)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
seed = 1234
seed_everything(seed ) | Titanic - Machine Learning from Disaster |
11,101,836 | kf = KFold(n_splits=2, shuffle=True, random_state=rng)
X_train, X_test, y_train, y_test = train_test_split(trainingFeatures2, trainingLabels, test_size=0.2, random_state=rng)
first_model = XGBClassifier().fit(X_train, y_train)
model = GridSearchCV(first_model, {'max_depth': [2,4,6], 'n_estimators': [50,100,200]}, verbose=1)
model.fit(X_train,y_train)
predictions = model.predict(X_test)
actuals = y_test
print(confusion_matrix(actuals, predictions))
<create_dataframe> | train = pd.read_csv('.. /input/titanic/train.csv')
test = pd.read_csv('.. /input/titanic/test.csv' ) | Titanic - Machine Learning from Disaster |
11,101,836 | Feature Importance:
',fullList,'
' )<compute_train_metric> | def title_extract(df):
df['Title'] = df['Name'].str.extract('([a-zA-Z]+)\.')
df['Title'] = df['Title'].apply(lambda x: 'Unknown' if x not in ['Miss','Master','Mr','Mrs', 'Dr', 'Rev'] else x)
return df | Titanic - Machine Learning from Disaster |
11,101,836 | kfold = KFold(n_splits=100, shuffle=True, random_state=rng)
results = cross_val_score(model, trainingFeatures2, trainingLabels, cv=kfold)
print("DecisionTreeClassifier:
Cross_Val_Score: %.2f%%(%.2f%%)" %(results.mean() *100, results.std() *100))
prediction = model.predict(X_test)
cnf_matrix = confusion_matrix(y_test, prediction)
dict_characters = {0: 'Healthy', 1: 'Diabetes'}
plot_confusion_matrix(cnf_matrix, classes=dict_characters,title='Confusion matrix' )<save_to_csv> | train = title_extract(train)
test = title_extract(test ) | Titanic - Machine Learning from Disaster |
11,101,836 | test = testingData
test = pd.DataFrame(test)
test.columns=['Id','Pregnancies','Glucose','BloodPressure','SkinThickness','Insulin','BMI','DiabetesPedigreeFunction','Age']
test2 = test.drop(['Id'], axis=1)
my_predictions = model.predict(test2)
Identifier = test.Id.astype(int)
my_submission = pd.DataFrame({'Id': Identifier, 'Outcome': my_predictions})
my_submission.to_csv('submission_mendoza.csv', index=False)
my_submission.head()<set_options> | def fill_nan_age(df):
df['group_mean_age'] = round(df.groupby(['Sex', 'Title'])['Age'].transform('mean'))
df['Age'].fillna(df['group_mean_age'], inplace=True)
del df['group_mean_age']
return df | Titanic - Machine Learning from Disaster |
11,101,836 | plt.style.use('ggplot')
%matplotlib inline
os.getcwd()
pd.set_option('display.max_columns', 500)
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
<set_options> | train = fill_nan_age(train)
test = fill_nan_age(test ) | Titanic - Machine Learning from Disaster |
11,101,836 | warnings.filterwarnings('ignore')
plt.style.use('ggplot')
%matplotlib inline
<load_from_csv> | def alone_family(df):
df['Family'] = train['SibSp'] + train['Parch']
df['Alone'] = pd.Series(np.where(df['Family'] == 0, 1, 0))
return df | Titanic - Machine Learning from Disaster |
11,101,836 | sample_submission = pd.read_csv(".. /input/exam-for-students20200129/sample_submission.csv", index_col=0)
df_test = pd.read_csv(".. /input/exam-for-students20200129/test.csv")
df_train = pd.read_csv(".. /input/exam-for-students20200129/train.csv" )<load_from_csv> | train = alone_family(train)
test = alone_family(test ) | Titanic - Machine Learning from Disaster |
11,101,836 | df_info = pd.read_csv(".. /input/exam-for-students20200129/country_info.csv" )<merge> | train = fare_cat(train)
test = fare_cat(test ) | Titanic - Machine Learning from Disaster |
11,101,836 | df_train = df_train.merge(df_info, on=['Country'], how='left')
df_test = df_test.merge(df_info, on=['Country'], how='left' )<feature_engineering> | def data_clean(drop_col, dummies, df):
df = df.drop(drop_col, axis=1)
df = pd.get_dummies(data=df, columns=dummies)
return df | Titanic - Machine Learning from Disaster |
11,101,836 | df_train['ConvertedSalary'] = df_train['ConvertedSalary'].apply(np.log1p)
<feature_engineering> | col_drop = ['PassengerId', 'Name', 'Ticket', 'Cabin']
col_dummies = ['Sex', 'Embarked', 'age_cat', 'Fare_cat', 'Pclass', 'Title'] | Titanic - Machine Learning from Disaster |
11,101,836 | df_train['SalaryType_flg'] = df_train.SalaryType.isnull()
df_test['SalaryType_flg'] = df_test.SalaryType.isnull()
df_train['Country_flg'] = df_train.Country.isnull()
df_test['Country_flg'] = df_test.Country.isnull()
df_train['Employment_flg'] = df_train.Employment.isnull()
df_test['Employment_flg'] = df_test.Employment.isnull()
df_train['Currency_flg'] = df_train.Currency.isnull()
df_test['Currency_flg'] = df_test.Currency.isnull()
df_train['YearsCodingProf'] = df_train.YearsCodingProf.isnull()
df_test['YearsCodingProf'] = df_test.YearsCodingProf.isnull()<prepare_x_and_y> | train_clean = data_clean(col_drop, col_dummies, train)
train_clean.head() | Titanic - Machine Learning from Disaster |
11,101,836 | y_train=df_train.ConvertedSalary
X_train=df_train.drop(['ConvertedSalary'], axis=1)
X_test=df_test<count_unique_values> | test_clean = data_clean(col_drop, col_dummies, test)
test_clean.head() | Titanic - Machine Learning from Disaster |
11,101,836 | cats = []
for col in X_train.columns:
if X_train[col].dtype == 'object':
cats.append(col)
print(col, X_train[col].nunique() )<categorify> | X = train_clean.drop('Survived', axis=1)
y = train_clean['Survived'] | Titanic - Machine Learning from Disaster |
11,101,836 | target = 'ConvertedSalary'
col='SalaryType'
X_temp = pd.concat([X_train, y_train], axis=1)
summary = X_temp.groupby([col])[target].mean()
enc_test = X_test[col].map(summary)
enc_test = enc_test.rename(columns={'purpose': 'purpose_te'})
skf = StratifiedKFold(n_splits=5, random_state=71, shuffle=True)
enc_train = Series(np.zeros(len(X_train)) , index=X_train.index)
for i,(train_ix, val_ix)in enumerate(( skf.split(X_train, y_train))):
X_train_, _ = X_temp.iloc[train_ix], y_train.iloc[train_ix]
X_val, _ = X_temp.iloc[val_ix], y_train.iloc[val_ix]
summary = X_train_.groupby([col])[target].mean()
enc_train.iloc[val_ix] = X_val[col].map(summary)
X_train=pd.concat([X_train, enc_train], axis=1)
X_test=pd.concat([X_test, enc_test], axis=1)
X_train = X_train.rename(columns={0: 'SalaryType_te'})
X_test = X_test.rename(columns={0: 'SalaryType_te'} )<categorify> | X = np.array(X, dtype='float32')
y = np.array(y, dtype='float32' ) | Titanic - Machine Learning from Disaster |
11,101,836 | target = 'ConvertedSalary'
col='Country'
X_temp = pd.concat([X_train, y_train], axis=1)
summary = X_temp.groupby([col])[target].mean()
enc_test = X_test[col].map(summary)
enc_test = enc_test.rename(columns={'purpose': 'purpose_te'})
skf = StratifiedKFold(n_splits=5, random_state=71, shuffle=True)
enc_train = Series(np.zeros(len(X_train)) , index=X_train.index)
for i,(train_ix, val_ix)in enumerate(( skf.split(X_train, y_train))):
X_train_, _ = X_temp.iloc[train_ix], y_train.iloc[train_ix]
X_val, _ = X_temp.iloc[val_ix], y_train.iloc[val_ix]
summary = X_train_.groupby([col])[target].mean()
enc_train.iloc[val_ix] = X_val[col].map(summary)
X_train=pd.concat([X_train, enc_train], axis=1)
X_test=pd.concat([X_test, enc_test], axis=1)
X_train = X_train.rename(columns={0: 'Country_te'})
X_test = X_test.rename(columns={0: 'Country_te'} )<categorify> | class TitanicModel(nn.Module):
def __init__(self, neurons=10, dropout=0.2):
super(TitanicModel, self ).__init__()
self.dense0 = nn.Linear(X.shape[1], neurons)
self.activation0 = nn.ReLU()
self.dropout0 = nn.Dropout(dropout)
self.dense1 = nn.Linear(neurons, neurons)
self.activation1 = nn.ReLU()
self.dropout1 = nn.Dropout(dropout)
self.dense2 = nn.Linear(neurons, 1)
self.output = nn.Sigmoid()
def forward(self, x):
x = self.dense0(x)
x = self.activation0(x)
x = self.dropout0(x)
x = self.dense1(x)
x = self.activation1(x)
x = self.dropout1(x)
x = self.dense2(x)
x = self.output(x)
return x | Titanic - Machine Learning from Disaster |
11,101,836 | target = 'ConvertedSalary'
col='Employment'
X_temp = pd.concat([X_train, y_train], axis=1)
summary = X_temp.groupby([col])[target].mean()
enc_test = X_test[col].map(summary)
enc_test = enc_test.rename(columns={'purpose': 'purpose_te'})
skf = StratifiedKFold(n_splits=5, random_state=71, shuffle=True)
enc_train = Series(np.zeros(len(X_train)) , index=X_train.index)
for i,(train_ix, val_ix)in enumerate(( skf.split(X_train, y_train))):
X_train_, _ = X_temp.iloc[train_ix], y_train.iloc[train_ix]
X_val, _ = X_temp.iloc[val_ix], y_train.iloc[val_ix]
summary = X_train_.groupby([col])[target].mean()
enc_train.iloc[val_ix] = X_val[col].map(summary)
X_train=pd.concat([X_train, enc_train], axis=1)
X_test=pd.concat([X_test, enc_test], axis=1)
X_train = X_train.rename(columns={0: 'Employment_te'})
X_test = X_test.rename(columns={0: 'Employment_te'} )<categorify> | model = NeuralNetBinaryClassifier(module=TitanicModel,
lr = 0.001,
optimizer__weight_decay = 0.001,
verbose=0,
train_split=False,
device='cuda' ) | Titanic - Machine Learning from Disaster |
11,101,836 |
<categorify> | params = {'batch_size': [10],
'max_epochs': [25],
'optimizer': [torch.optim.Adam],
'lr': [0.01, 0.001],
'criterion': [nn.BCELoss],
'module__neurons': [10, 15, 20, 25],
'module__dropout': [0, 0.2]}
| Titanic - Machine Learning from Disaster |
11,101,836 |
<categorify> | %%time
model_grid = GridSearchCV(estimator=model, param_grid=params,
scoring = 'accuracy', cv=3, verbose=0)
model_grid = model_grid.fit(X, y ) | Titanic - Machine Learning from Disaster |
11,101,836 |
<categorify> | print(f'Accuracy: {model_grid.best_score_ * 100 :.2f}%')
print(model_grid.best_params_ ) | Titanic - Machine Learning from Disaster |
11,101,836 |
<categorify> | test_clean = np.array(test_clean, dtype='float32' ) | Titanic - Machine Learning from Disaster |
11,101,836 |
<categorify> | pred = model_grid.predict(test_clean ) | Titanic - Machine Learning from Disaster |
11,101,836 |
<categorify> | submission = pd.DataFrame({'PassengerId': test['PassengerId'], 'Survived': pred})
submission.head() | Titanic - Machine Learning from Disaster |
11,101,836 | encoder =OrdinalEncoder(cols=cats)
X_train[cats] = encoder.fit_transform(X_train[cats])
X_test[cats] = encoder.transform(X_test[cats] )<train_model> | submission.to_csv('submission.csv', index=False ) | Titanic - Machine Learning from Disaster |
11,101,836 | X_train.fillna(-9999,inplace=True)
X_test.fillna(-9999,inplace=True )<train_model> | submission.to_csv('submission.csv', index=False ) | Titanic - Machine Learning from Disaster |
9,701,832 |
<predict_on_test> | data_train = pd.read_csv('/kaggle/input/titanic/train.csv', sep = ',', header = 0)
data_test = pd.read_csv('/kaggle/input/titanic/test.csv', sep = ',', header = 0)
print(data_train)
data_train.info()
print(data_test)
data_test.info() | Titanic - Machine Learning from Disaster |
9,701,832 |
<load_from_csv> | combine1 = [data_train]
for data_train in combine1:
data_train['Salutation'] = data_train.Name.str.extract('([A-Za-z]+ ).', expand=False)
for data_train in combine1:
data_train['Salutation'] = data_train['Salutation'].replace(['Lady', 'Countess', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
data_train['Salutation'] = data_train['Salutation'].replace('Mlle', 'Miss')
data_train['Salutation'] = data_train['Salutation'].replace('Ms', 'Miss')
data_train['Salutation'] = data_train['Salutation'].replace('Mme', 'Mrs')
combine2 = [data_test]
for data_test in combine2:
data_test['Salutation'] = data_test.Name.str.extract('([A-Za-z]+ ).', expand=False)
for data_test in combine2:
data_test['Salutation'] = data_test['Salutation'].replace(['Lady', 'Countess', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
data_test['Salutation'] = data_test['Salutation'].replace('Mlle', 'Miss')
data_test['Salutation'] = data_test['Salutation'].replace('Ms', 'Miss')
data_test['Salutation'] = data_test['Salutation'].replace('Mme', 'Mrs')
Salutation_mapping = {'Mr':1, 'Miss':2, 'Mrs':3, 'Master':4, 'Rare':5}
for data_train in combine1:
data_train['Salutation'] = data_train['Salutation'].map(Salutation_mapping)
data_train['Salutation'] = data_train['Salutation'].fillna(0)
for data_test in combine2:
data_test['Salutation'] = data_test['Salutation'].map(Salutation_mapping)
data_test['Salutation'] = data_test['Salutation'].fillna(0)
Age_Salutation_dn1 = data_train[['Age', 'Salutation']].dropna()
Age_Salutation_dn2 = data_test[['Age', 'Salutation']].dropna()
Age_Salutation_dn1 = np.array(Age_Salutation_dn1)
Age_Salutation_dn2 = np.array(Age_Salutation_dn2)
t_age_Mr, t_age_Miss, t_age_Mrs, t_age_Master, t_age_Rare = 0, 0, 0, 0, 0
Mr_count, Miss_count, Mrs_count, Master_count, Rare_count = 0, 0, 0, 0, 0
for i in range(len(Age_Salutation_dn1)) :
age1 = Age_Salutation_dn1[i][0]
if Age_Salutation_dn1[i][1] == 1:
t_age_Mr += age1
Mr_count += 1
elif Age_Salutation_dn1[i][1] == 2:
t_age_Miss += age1
Miss_count += 1
elif Age_Salutation_dn1[i][1] == 3:
t_age_Mrs += age1
Mrs_count += 1
elif Age_Salutation_dn1[i][1] == 4:
t_age_Master += age1
Master_count += 1
else:
t_age_Rare += age1
Rare_count += 1
for i in range(len(Age_Salutation_dn2)) :
age2 = Age_Salutation_dn2[i][0]
if Age_Salutation_dn2[i][1] == 1:
t_age_Mr += age2
Mr_count += 1
elif Age_Salutation_dn2[i][1] == 2:
t_age_Miss += age2
Miss_count += 1
elif Age_Salutation_dn2[i][1] == 3:
t_age_Mrs += age2
Mrs_count += 1
elif Age_Salutation_dn2[i][1] == 4:
t_age_Master += age2
Master_count += 1
else:
t_age_Rare += age2
Rare_count += 1
m_age_Mr = t_age_Mr / Mr_count
m_age_Miss = t_age_Miss / Miss_count
m_age_Mrs = t_age_Mrs / Mrs_count
m_age_Master = t_age_Master / Master_count
m_age_Rare = t_age_Rare / Rare_count
print('Mr:', m_age_Mr)
print('Miss:', m_age_Miss)
print('Mrs:', m_age_Mrs)
print('Master:', m_age_Master)
print('Rare:', m_age_Rare ) | Titanic - Machine Learning from Disaster |
9,701,832 | sample_submission = pd.read_csv(".. /input/exam-for-students20200129/sample_submission.csv", index_col=0)
df_test = pd.read_csv(".. /input/exam-for-students20200129/test.csv")
df_train = pd.read_csv(".. /input/exam-for-students20200129/train.csv" )<feature_engineering> | data_set = DataFrame(data_train.drop(['PassengerId', 'Name', 'Ticket'], axis=1))
data_set = data_set.fillna({'Fare':0, 'Cabin':0,'Embarked':0})
data_set = data_set.replace({'male':0, 'female':1, 'S':1, 'C':2, 'Q':3})
data_set = data_set.replace({'A':1, 'B':2, 'C':3, 'D':4, 'E':5, 'F':6, 'G':7, 'T':8}, regex=True)
for i in range(len(data_set)) :
salutation = data_set['Salutation'][i]
if salutation == 1:
if pd.isnull(data_set['Age'][i]):
data_set['Age'].loc[i] = m_age_Mr
elif salutation == 2:
if pd.isnull(data_set['Age'][i]):
data_set['Age'].loc[i] = m_age_Miss
elif salutation == 3:
if pd.isnull(data_set['Age'][i]):
data_set['Age'].loc[i] = m_age_Mrs
elif salutation == 4:
if pd.isnull(data_set['Age'][i]):
data_set['Age'].loc[i] = m_age_Master
else:
if pd.isnull(data_set['Age'][i]):
data_set['Age'].loc[i] = m_age_Rare
data_set['FamilySize'] = data_set['SibSp'] + data_set['Parch'] + 1
isAlone = []
FamilySize_M = []
FamilySize_L = []
isCabinNo = []
roundAgeArray = []
roundFareArray = []
for i in range(len(data_set)) :
if data_set['FamilySize'][i] == 1:
isAlone.append(1)
else:
isAlone.append(0)
if data_set['Cabin'][i] == 0:
isCabinNo.append(1)
else:
isCabinNo.append(0)
if data_set['FamilySize'][i] < 2:
FamilySize_M.append(0)
FamilySize_L.append(0)
elif data_set['FamilySize'][i] < 5:
FamilySize_M.append(1)
FamilySize_L.append(0)
else:
FamilySize_M.append(0)
FamilySize_L.append(1)
roundAge = int(data_set['Age'][i] / 10)
roundAgeArray.append(roundAge)
if data_set['Fare'][i] > 0:
roundFare = int(math.log(data_set['Fare'][i]))
else:
roundFare = 0
roundFareArray.append(roundFare)
data_set['isAlone'] = isAlone
data_set['FamilySize_M'] = FamilySize_M
data_set['FamilySize_L'] = FamilySize_L
data_set['isCabinNo'] = isCabinNo
data_set['roundAge'] = roundAgeArray
data_set['roundFare'] = roundFareArray
data_set.info()
| Titanic - Machine Learning from Disaster |
9,701,832 | df_train['SalaryType_flg'] = df_train.SalaryType.isnull()
df_test['SalaryType_flg'] = df_test.SalaryType.isnull()
df_train['Country_flg'] = df_train.Country.isnull()
df_test['Country_flg'] = df_test.Country.isnull()
df_train['Employment_flg'] = df_train.Employment.isnull()
df_test['Employment_flg'] = df_test.Employment.isnull()
df_train['Currency_flg'] = df_train.Currency.isnull()
df_test['Currency_flg'] = df_test.Currency.isnull()
df_train['YearsCodingProf'] = df_train.YearsCodingProf.isnull()
df_test['YearsCodingProf'] = df_test.YearsCodingProf.isnull()<feature_engineering> | testData = np.array(data_set)
testData = testData.astype('int32')
for i in range(len(testData)) :
print(testData[i])
data_set = data_set.dropna()
x = DataFrame(data_set[['Pclass', 'Sex', 'Age', 'Salutation', 'FamilySize', 'isAlone']])
t = DataFrame(data_set['Survived'])
x = np.array(x)
t = np.array(t)
t = t.ravel()
x = x.astype('float32')
t = t.astype('int32')
print('x shape:', x.shape)
print(x[:10])
print('t shape:', t.shape)
print(t[:10] ) | Titanic - Machine Learning from Disaster |
9,701,832 | df_train['ConvertedSalary'] = df_train['ConvertedSalary'].apply(np.log1p)
<prepare_x_and_y> | dataset = TupleDataset(x, t)
train_val, innerTest = split_dataset_random(dataset, int(len(dataset)* 0.9), seed=0)
train, valid = split_dataset_random(train_val, int(len(train_val)* 0.7), seed=0)
train_iter = SerialIterator(train, batch_size=64, repeat=True, shuffle=True)
print(dataset[0] ) | Titanic - Machine Learning from Disaster |
9,701,832 | y_train=df_train.ConvertedSalary
X_train=df_train.drop(['ConvertedSalary'], axis=1)
X_test=df_test<count_unique_values> | class Net(chainer.Chain):
def __init__(self, n_in=6, n_hidden=100, n_out=2):
super().__init__()
with self.init_scope() :
self.l1 = L.Linear(n_in, n_hidden)
self.l2 = L.Linear(n_hidden, n_hidden)
self.l3 = L.Linear(n_hidden, n_out)
def forward(self, x):
h = F.sigmoid(self.l1(x))
h = F.sigmoid(self.l2(h))
h = self.l3(h)
return h
net = Net()
optimizer = optimizers.Adam(alpha=0.0001, beta1=0.9, beta2=0.999, eps=1e-08, eta=1.0,
weight_decay_rate=0.00001, amsgrad=False, adabound=False, final_lr=0.1, gamma=0.001)
optimizer.setup(net)
gpu_id = 0
n_epoch =2000
net.to_gpu(gpu_id)
results_train, results_valid = {}, {}
results_train['loss'], results_train['accuracy'] = [], []
results_valid['loss'], results_valid['accuracy'] = [], []
train_iter.reset()
count = 1
for epoch in range(n_epoch):
while True:
train_batch = train_iter.next()
x_train, t_train = chainer.dataset.concat_examples(train_batch, gpu_id)
y_train = net(x_train)
loss_train = F.softmax_cross_entropy(y_train, t_train)
acc_train = F.accuracy(y_train, t_train)
net.cleargrads()
loss_train.backward()
optimizer.update()
count += 1
if train_iter.is_new_epoch:
with chainer.using_config('train', False), chainer.using_config('enable_backprop', False):
x_valid, t_valid = chainer.dataset.concat_examples(valid, gpu_id)
y_valid = net(x_valid)
loss_valid = F.softmax_cross_entropy(y_valid, t_valid)
acc_valid = F.accuracy(y_valid, t_valid)
loss_train.to_cpu()
loss_valid.to_cpu()
acc_train.to_cpu()
acc_valid.to_cpu()
if epoch % 10 == 0:
print('epoch: {}, iteration: {}, loss(train): {:.4f}, loss(valid): {:.4f}, acc(train): {:.4f}, acc(valid): {:.4f}'.format(epoch, count, loss_train.array.mean() , loss_valid.array.mean() , acc_train.array.mean() , acc_valid.array.mean()))
results_train['loss'].append(loss_train.array)
results_train['accuracy'].append(acc_train.array)
results_valid['loss'].append(loss_valid.array)
results_valid['accuracy'].append(acc_valid.array)
break
| Titanic - Machine Learning from Disaster |
9,701,832 | cats = []
for col in X_train.columns:
if X_train[col].dtype == 'object':
cats.append(col)
print(col, X_train[col].nunique() )<categorify> | x_test, t_test = chainer.dataset.concat_examples(innerTest, device=gpu_id)
with chainer.using_config('train', False), chainer.using_config('enable_backprop', False):
y_test = net(x_test)
loss_test = F.softmax_cross_entropy(y_test, t_test)
acc_test = F.accuracy(y_test, t_test)
print('test loss: {:.4f}'.format(loss_test.array.get()))
print('test accuracy: {:.4f}'.format(acc_test.array.get())) | Titanic - Machine Learning from Disaster |
9,701,832 | target = 'ConvertedSalary'
col='SalaryType'
X_temp = pd.concat([X_train, y_train], axis=1)
summary = X_temp.groupby([col])[target].mean()
enc_test = X_test[col].map(summary)
enc_test = enc_test.rename(columns={'purpose': 'purpose_te'})
skf = StratifiedKFold(n_splits=5, random_state=71, shuffle=True)
enc_train = Series(np.zeros(len(X_train)) , index=X_train.index)
for i,(train_ix, val_ix)in enumerate(( skf.split(X_train, y_train))):
X_train_, _ = X_temp.iloc[train_ix], y_train.iloc[train_ix]
X_val, _ = X_temp.iloc[val_ix], y_train.iloc[val_ix]
summary = X_train_.groupby([col])[target].mean()
enc_train.iloc[val_ix] = X_val[col].map(summary)
X_train=pd.concat([X_train, enc_train], axis=1)
X_test=pd.concat([X_test, enc_test], axis=1)
X_train = X_train.rename(columns={0: 'SalaryType_te'})
X_test = X_test.rename(columns={0: 'SalaryType_te'} )<categorify> | net.to_cpu()
chainer.serializers.save_npz('net.npz', net)
!ls | Titanic - Machine Learning from Disaster |
9,701,832 | target = 'ConvertedSalary'
col='Country'
X_temp = pd.concat([X_train, y_train], axis=1)
summary = X_temp.groupby([col])[target].mean()
enc_test = X_test[col].map(summary)
enc_test = enc_test.rename(columns={'purpose': 'purpose_te'})
skf = StratifiedKFold(n_splits=5, random_state=71, shuffle=True)
enc_train = Series(np.zeros(len(X_train)) , index=X_train.index)
for i,(train_ix, val_ix)in enumerate(( skf.split(X_train, y_train))):
X_train_, _ = X_temp.iloc[train_ix], y_train.iloc[train_ix]
X_val, _ = X_temp.iloc[val_ix], y_train.iloc[val_ix]
summary = X_train_.groupby([col])[target].mean()
enc_train.iloc[val_ix] = X_val[col].map(summary)
X_train=pd.concat([X_train, enc_train], axis=1)
X_test=pd.concat([X_test, enc_test], axis=1)
X_train = X_train.rename(columns={0: 'Country_te'})
X_test = X_test.rename(columns={0: 'Country_te'} )<categorify> | test = DataFrame(data_test.drop(['PassengerId', 'Name', 'Ticket'], axis=1))
for i in range(len(test)) :
salutation = test['Salutation'][i]
if salutation == 1:
if pd.isnull(test['Age'][i]):
test['Age'].loc[i] = m_age_Mr
elif salutation == 2:
if pd.isnull(test['Age'][i]):
test['Age'].loc[i] = m_age_Miss
elif salutation == 3:
if pd.isnull(test['Age'][i]):
test['Age'].loc[i] = m_age_Mrs
elif salutation == 4:
if pd.isnull(test['Age'][i]):
test['Age'].loc[i] = m_age_Master
else:
if pd.isnull(test['Age'][i]):
test['Age'].loc[i] = m_age_Rare
test = test.fillna({'Fare':0, 'Cabin':0,'Embarked':0})
test = test.replace({'male':0, 'female':1, 'S':1, 'C':2, 'Q':3})
test = test.replace({'A':1, 'B':2, 'C':3, 'D':4, 'E':5, 'F':6, 'G':7, 'T':8}, regex=True)
test['FamilySize'] = test['SibSp'] + test['Parch'] + 1
isAlone = []
FamilySize_M = []
FamilySize_L = []
isCabinNo = []
roundAgeArray = []
roundFareArray = []
for i in range(len(test)) :
if test['FamilySize'][i] == 1:
isAlone.append(1)
else:
isAlone.append(0)
if test['Cabin'][i] == 0:
isCabinNo.append(1)
else:
isCabinNo.append(0)
if test['FamilySize'][i] < 2:
FamilySize_M.append(0)
FamilySize_L.append(0)
elif test['FamilySize'][i] < 5:
FamilySize_M.append(1)
FamilySize_L.append(0)
else:
FamilySize_M.append(0)
FamilySize_L.append(1)
roundAge = int(test['Age'][i] / 10)
roundAgeArray.append(roundAge)
if test['Fare'][i] > 0:
roundFare = int(math.log(test['Fare'][i]))
else:
roundFare = 0
roundFareArray.append(roundFare)
test['isAlone'] = isAlone
test['FamilySize_M'] = FamilySize_M
test['FamilySize_L'] = FamilySize_L
test['isCabinNo'] = isCabinNo
test['roundAge'] = roundAgeArray
test['roundFare'] = roundFareArray
test = DataFrame(test[['Pclass', 'Sex', 'Age', 'Salutation', 'FamilySize', 'isAlone']])
test.info() | Titanic - Machine Learning from Disaster |
9,701,832 | target = 'ConvertedSalary'
col='Employment'
X_temp = pd.concat([X_train, y_train], axis=1)
summary = X_temp.groupby([col])[target].mean()
enc_test = X_test[col].map(summary)
enc_test = enc_test.rename(columns={'purpose': 'purpose_te'})
skf = StratifiedKFold(n_splits=5, random_state=71, shuffle=True)
enc_train = Series(np.zeros(len(X_train)) , index=X_train.index)
for i,(train_ix, val_ix)in enumerate(( skf.split(X_train, y_train))):
X_train_, _ = X_temp.iloc[train_ix], y_train.iloc[train_ix]
X_val, _ = X_temp.iloc[val_ix], y_train.iloc[val_ix]
summary = X_train_.groupby([col])[target].mean()
enc_train.iloc[val_ix] = X_val[col].map(summary)
X_train=pd.concat([X_train, enc_train], axis=1)
X_test=pd.concat([X_test, enc_test], axis=1)
X_train = X_train.rename(columns={0: 'Employment_te'})
X_test = X_test.rename(columns={0: 'Employment_te'} )<categorify> | class newNet(chainer.Chain):
def __init__(self,n_in=6, n_hidden=100, n_out=2):
super().__init__()
with self.init_scope() :
self.l1 = L.Linear(n_in, n_hidden)
self.l2 = L.Linear(n_hidden, n_hidden)
self.l3 = L.Linear(n_hidden, n_out)
def forward(self, x):
h = F.sigmoid(self.l1(x))
h = F.sigmoid(self.l2(h))
h = self.l3(h)
return h
loaded_net = newNet()
chainer.serializers.load_npz('net.npz', loaded_net)
test = np.array(test)
test = test.astype('float32')
with chainer.using_config('train', False), chainer.using_config('enable_backprop', False):
result = loaded_net(test)
PassengerId = 891
outputArray = []
for i in range(len(test)) :
PassengerId += 1
predict = np.argmax(result[i,:].array)
innerArray = [PassengerId, predict]
outputArray.append(innerArray)
df = pd.DataFrame(outputArray, columns=['PassengerId', 'Survived'])
df.to_csv(path_or_buf='gender_submission.csv', index=False)
df_test_list = pd.DataFrame(outputArray)
df_test_list | Titanic - Machine Learning from Disaster |
10,161,774 | target = 'ConvertedSalary'
col='Currency'
X_temp = pd.concat([X_train, y_train], axis=1)
summary = X_temp.groupby([col])[target].mean()
enc_test = X_test[col].map(summary)
enc_test = enc_test.rename(columns={'purpose': 'purpose_te'})
skf = StratifiedKFold(n_splits=5, random_state=71, shuffle=True)
enc_train = Series(np.zeros(len(X_train)) , index=X_train.index)
for i,(train_ix, val_ix)in enumerate(( skf.split(X_train, y_train))):
X_train_, _ = X_temp.iloc[train_ix], y_train.iloc[train_ix]
X_val, _ = X_temp.iloc[val_ix], y_train.iloc[val_ix]
summary = X_train_.groupby([col])[target].mean()
enc_train.iloc[val_ix] = X_val[col].map(summary)
X_train=pd.concat([X_train, enc_train], axis=1)
X_test=pd.concat([X_test, enc_test], axis=1)
X_train = X_train.rename(columns={0: 'Currency_te'})
X_test = X_test.rename(columns={0: 'Currency_te'} )<categorify> | %matplotlib inline
py.init_notebook_mode(connected=True)
warnings.filterwarnings('ignore' ) | Titanic - Machine Learning from Disaster |
10,161,774 | target = 'ConvertedSalary'
col='MilitaryUS'
X_temp = pd.concat([X_train, y_train], axis=1)
summary = X_temp.groupby([col])[target].mean()
enc_test = X_test[col].map(summary)
enc_test = enc_test.rename(columns={'purpose': 'purpose_te'})
skf = StratifiedKFold(n_splits=5, random_state=71, shuffle=True)
enc_train = Series(np.zeros(len(X_train)) , index=X_train.index)
for i,(train_ix, val_ix)in enumerate(( skf.split(X_train, y_train))):
X_train_, _ = X_temp.iloc[train_ix], y_train.iloc[train_ix]
X_val, _ = X_temp.iloc[val_ix], y_train.iloc[val_ix]
summary = X_train_.groupby([col])[target].mean()
enc_train.iloc[val_ix] = X_val[col].map(summary)
X_train=pd.concat([X_train, enc_train], axis=1)
X_test=pd.concat([X_test, enc_test], axis=1)
X_train = X_train.rename(columns={0: 'MilitaryUS_te'})
X_test = X_test.rename(columns={0: 'MilitaryUS_te'} )<categorify> | from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from xgboost import XGBClassifier | Titanic - Machine Learning from Disaster |
10,161,774 | encoder =OrdinalEncoder(cols=cats)
X_train[cats] = encoder.fit_transform(X_train[cats])
X_test[cats] = encoder.transform(X_test[cats] )<train_model> | train_df = pd.read_csv('.. /input/titanic/train.csv')
print(train_df.shape)
train_df.head() | Titanic - Machine Learning from Disaster |
10,161,774 | X_train.fillna(-9999,inplace=True)
X_test.fillna(-9999,inplace=True )<train_model> | test_df = pd.read_csv('.. /input/titanic/test.csv')
print(test_df.shape)
test_df.head() | Titanic - Machine Learning from Disaster |
10,161,774 | %%time
X_train,X_val,y_train,y_val=train_test_split(X_train,y_train,random_state=71)
train_dataset=lgb.Dataset(X_train,y_train)
valid_dataset=lgb.Dataset(X_val,y_val,reference=train_dataset)
params={"objective":"regression",
"metric":"rmse"
}
model2=lgb_tuner.train(params,
train_set=train_dataset,
valid_sets=[valid_dataset],
num_boost_round=300,
early_stopping_rounds=50
)
y_pred_tuned = model2.predict(X_val)
tuned_metric = mean_squared_error(y_val, y_pred_tuned)**0.5
print('tuned model metric: ', tuned_metric)
<predict_on_test> | df = [train_df, test_df] | Titanic - Machine Learning from Disaster |
10,161,774 | y_pred2= np.expm1(model2.predict(X_test))
<load_from_csv> | for data in df:
data['Title'] = data['Name'].str.extract(r',(\w+)\.', expand=False)
pd.crosstab(train_df['Title'], train_df['Sex'] ).transpose() | Titanic - Machine Learning from Disaster |
10,161,774 | sample_submission = pd.read_csv(".. /input/exam-for-students20200129/sample_submission.csv", index_col=0)
df_test = pd.read_csv(".. /input/exam-for-students20200129/test.csv")
df_train = pd.read_csv(".. /input/exam-for-students20200129/train.csv")
df_train['SalaryType_flg'] = df_train.SalaryType.isnull()
df_test['SalaryType_flg'] = df_test.SalaryType.isnull()
df_train['Country_flg'] = df_train.Country.isnull()
df_test['Country_flg'] = df_test.Country.isnull()
df_train['Employment_flg'] = df_train.Employment.isnull()
df_test['Employment_flg'] = df_test.Employment.isnull()
df_train['Currency_flg'] = df_train.Currency.isnull()
df_test['Currency_flg'] = df_test.Currency.isnull()
df_train['YearsCodingProf'] = df_train.YearsCodingProf.isnull()
df_test['YearsCodingProf'] = df_test.YearsCodingProf.isnull()
df_train['ConvertedSalary'] = df_train['ConvertedSalary'].apply(np.log1p)
y_train=df_train.ConvertedSalary
X_train=df_train.drop(['ConvertedSalary'], axis=1)
X_test=df_test
cats = []
for col in X_train.columns:
if X_train[col].dtype == 'object':
cats.append(col)
print(col, X_train[col].nunique())
encoder =OrdinalEncoder(cols=cats)
X_train[cats] = encoder.fit_transform(X_train[cats])
X_test[cats] = encoder.transform(X_test[cats])
X_train.fillna(-9999,inplace=True)
X_test.fillna(-9999,inplace=True)
<train_model> | for data in df:
data['Title'] = data['Title'].replace(['Lady', 'Countess','Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
data['Title'] = data['Title'].replace('Mlle', 'Miss')
data['Title'] = data['Title'].replace('Ms', 'Miss')
data['Title'] = data['Title'].replace('Mme', 'Mrs')
train_df[['Title', 'Survived']].groupby(['Title'] ).mean()
labels = {'Mr':1, 'Mrs':2, 'Master':3, 'Miss':4, 'Rare':5}
test_df.replace({'Title':labels}, inplace = True)
train_df.replace({'Title':labels}, inplace = True)
train_df['Title'] = train_df['Title'].fillna(0)
train_df['Title'] = train_df['Title'].astype(int ) | Titanic - Machine Learning from Disaster |
10,161,774 | %%time
X_train,X_val,y_train,y_val=train_test_split(X_train,y_train,random_state=71)
train_dataset=lgb.Dataset(X_train,y_train)
valid_dataset=lgb.Dataset(X_val,y_val,reference=train_dataset)
params={"objective":"regression",
"metric":"rmse"
}
model3=lgb_tuner.train(params,
train_set=train_dataset,
valid_sets=[valid_dataset],
num_boost_round=300,
early_stopping_rounds=50
)
y_pred_tuned = model3.predict(X_val)
tuned_metric = mean_squared_error(y_val, y_pred_tuned)**0.5
print('tuned model metric: ', tuned_metric)
<predict_on_test> | pd.DataFrame({'Train':train_df.isnull().sum() , 'Test':test_df.isnull().sum() } ).transpose() | Titanic - Machine Learning from Disaster |
10,161,774 | y_pred3 = np.expm1(model3.predict(X_test))<define_variables> | print('Missing Values in Age column: ',177/len(train_df['Age'])*100)
print('Missing Values in Cabin column: ',687/len(train_df['Cabin'])*100)
print('Missing Values in Embarked column: ',2/len(train_df['Embarked'])*100 ) | Titanic - Machine Learning from Disaster |
10,161,774 | y_pred_x=0.5*y_pred2+0.5*y_pred3
<save_to_csv> | train_df["Age"] = train_df["Age"].fillna(-0.5)
test_df["Age"] = test_df["Age"].fillna(-0.5)
bins = [-1, 0, 5, 12, 18, 24, 35, 60, np.inf]
labels = ['Unknown', 'Baby', 'Child', 'Teenager', 'Student', 'Young Adult', 'Adult', 'Senior']
train_df['AgeGroup'] = pd.cut(train_df["Age"], bins, labels = labels)
test_df['AgeGroup'] = pd.cut(test_df["Age"], bins, labels = labels)
| Titanic - Machine Learning from Disaster |
10,161,774 | sample_submission = pd.read_csv(".. /input/exam-for-students20200129/sample_submission.csv", index_col=0)
sample_submission.ConvertedSalary = y_pred_x
now = datetime.datetime.now()
filename = './output/log_' + now.strftime('%Y%m%d_%H%M%S')+ '.csv'
sample_submission.to_csv(now.strftime('%Y%m%d_%H%M%S')+ '_sample_submission.csv' )<load_from_csv> | mr_age = train_df[train_df["Title"] == 1]["AgeGroup"].mode()
miss_age = train_df[train_df["Title"] == 2]["AgeGroup"].mode()
mrs_age = train_df[train_df["Title"] == 3]["AgeGroup"].mode()
master_age = train_df[train_df["Title"] == 4]["AgeGroup"].mode()
rare_age = train_df[train_df["Title"] == 5]["AgeGroup"].mode()
age_title_mapping = {1: "Young Adult", 2: "Student", 3: "Adult", 4: "Baby", 5: "Adult"}
for x in range(len(train_df["AgeGroup"])) :
if train_df["AgeGroup"][x] == "Unknown":
train_df["AgeGroup"][x] = age_title_mapping[train_df["Title"][x]]
for x in range(len(test_df["AgeGroup"])) :
if test_df["AgeGroup"][x] == "Unknown":
test_df["AgeGroup"][x] = age_title_mapping[test_df["Title"][x]] | Titanic - Machine Learning from Disaster |
10,161,774 |
<set_options> | age_mapping = {'Baby': 1, 'Child': 2, 'Teenager': 3, 'Student': 4, 'Young Adult': 5, 'Adult': 6, 'Senior': 7}
train_df['AgeGroup'] = train_df['AgeGroup'].map(age_mapping ).astype(int)
test_df['AgeGroup'] = test_df['AgeGroup'].map(age_mapping ).astype(int ) | Titanic - Machine Learning from Disaster |
10,161,774 | warnings.filterwarnings('ignore')
plt.style.use('ggplot')
%matplotlib inline
<load_from_csv> | train_df['Embarked'].fillna('S', inplace = True)
label = {'S':1, 'C':2, 'Q':3}
train_df.replace({'Embarked':label}, inplace = True)
test_df.replace({'Embarked':label}, inplace = True ) | Titanic - Machine Learning from Disaster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.