kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
21,768,463 | print(train.shape[0])
print(all_data.shape[0])
X_train = all_data[:train.shape[0]]
X_test = all_data[train.shape[0]:]
train_price = train["logprice"]
print(train_price.head())
y = train_price<compute_test_metric> | guess_ages = np.zeros(( 2,3))
guess_ages | Titanic - Machine Learning from Disaster |
21,768,463 | def rmse_cv(model):
rmse= np.sqrt(-cross_val_score(model, X_train, y, scoring="neg_mean_squared_error", cv = 5))
return(rmse)
def rmse(ypred, yval):
rmseval = np.sqrt(( yval - ypred)^2)
return(rmseval )<train_on_grid> | for dataset in combine:
for i in range(0, 2):
for j in range(0, 3):
guess_df = dataset[(dataset['Sex'] == i)& \
(dataset['Pclass'] == j+1)]['Age'].dropna()
age_guess = guess_df.median()
guess_ages[i,j] = int(age_guess/0.5 + 0.5)* 0.5
for i in range(0, 2):
for j in range(0, 3):
dataset.loc[(dataset.Age.isnull())&(dataset.Sex == i)&(dataset.Pclass == j+1),\
'Age'] = guess_ages[i,j]
dataset['Age'] = dataset['Age'].astype(int)
train_data.head() | Titanic - Machine Learning from Disaster |
21,768,463 | lm = LinearRegression()
lm_fit = lm.fit(X_train,y)
lm_pred = np.array(lm_fit.predict(X_train))
print(lm_fit)
print(round(rmse_cv(lm_fit ).mean() ,4))
<create_dataframe> | train_data['AgeBand'] = pd.cut(train_data['Age'], 5)
train_data[['AgeBand', 'Survived']].groupby(['AgeBand'], as_index=False ).mean().sort_values(by='AgeBand', ascending=True ) | Titanic - Machine Learning from Disaster |
21,768,463 | predp = pd.DataFrame(lm_pred )<rename_columns> | for dataset in combine:
dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0
dataset.loc[(dataset['Age'] > 16)&(dataset['Age'] <= 32), 'Age'] = 1
dataset.loc[(dataset['Age'] > 32)&(dataset['Age'] <= 48), 'Age'] = 2
dataset.loc[(dataset['Age'] > 48)&(dataset['Age'] <= 64), 'Age'] = 3
dataset.loc[ dataset['Age'] > 64, 'Age']
train_data.head() | Titanic - Machine Learning from Disaster |
21,768,463 | predp.columns = ['x']
predp.set_index('x')
<feature_engineering> | train_data = train_data.drop(['AgeBand'], axis=1)
combine = [train_data, test_data]
train_data.head() | Titanic - Machine Learning from Disaster |
21,768,463 | cdf = predp.sort_values('x' ).reset_index()
cdf['p'] = cdf.index / float(len(cdf)- 1)
interp = interp1d(cdf['x'], cdf['p'])
print(cdf.head())
print(predp['x'].quantile(0.57))
print(interp(8.0403611))
print(interp(predp['x'].quantile(0.43)))
<define_variables> | for dataset in combine:
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
train_data[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
21,768,463 | sz = len(lm_pred)
sz<predict_on_test> | for dataset in combine:
dataset['IsAlone'] = 0
dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1
train_data[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=False ).mean() | Titanic - Machine Learning from Disaster |
21,768,463 | lm_predict = lm.predict(X_train)
print(r2_score(lm_predict,y))<choose_model_class> | train_data = train_data.drop(['Parch', 'SibSp', 'FamilySize'], axis=1)
test_data = test_data.drop(['Parch', 'SibSp', 'FamilySize'], axis=1)
combine = [train_data, test_data]
train_data.head() | Titanic - Machine Learning from Disaster |
21,768,463 | model_ridge = Ridge()<find_best_params> | for dataset in combine:
dataset['Age*Class'] = dataset.Age * dataset.Pclass
train_data.loc[:, ['Age*Class', 'Age', 'Pclass']].head(10 ) | Titanic - Machine Learning from Disaster |
21,768,463 | alphas = [0.00001,0.0001, 0.001, 0.005, 0.01, 0.03, 0.05, 0.08, 0.1,.25,.5, 1]
cv_ridge = [rmse_cv(Ridge(alpha = alpha)).mean()
for alpha in alphas]<train_on_grid> | y = train_data["Survived"]
features = ["Pclass", "Sex", "Age", "Embarked", "Title", "IsAlone", "Age*Class"]
X = pd.get_dummies(train_data[features])
X_test = pd.get_dummies(test_data[features])
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model.fit(X, y)
predictions = model.predict(X_test)
output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions})
output.to_csv('submission.csv', index=False)
print("Your submission was successfully saved!" ) | Titanic - Machine Learning from Disaster |
21,768,463 | <find_best_params><EOS> | y = train_data["Survived"]
features = ["Pclass", "Sex", "Age", "Embarked", "Title", "IsAlone"]
X = pd.get_dummies(train_data[features])
X_test = pd.get_dummies(test_data[features])
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model.fit(X, y)
predictions = model.predict(X_test)
output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions})
output.to_csv('submission.csv', index=False)
print("Your submission was successfully saved!" ) | Titanic - Machine Learning from Disaster |
21,660,342 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<define_variables> | import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score | Titanic - Machine Learning from Disaster |
21,660,342 | print("Lasso picked " + str(sum(coef != 0)) + " variables and eliminated the other " + str(sum(coef == 0)) + " variables" )<concatenate> | train=pd.read_csv('.. /input/titanic/train.csv' ) | Titanic - Machine Learning from Disaster |
21,660,342 | imp_coef = pd.concat([coef.sort_values().head(12),
coef.sort_values().tail(11)] )<import_modules> | train.isnull().sum() | Titanic - Machine Learning from Disaster |
21,660,342 | import xgboost as xgb<train_on_grid> | train['Survived'].value_counts() | Titanic - Machine Learning from Disaster |
21,660,342 | dtrain = xgb.DMatrix(X_train, label = y)
dtest = xgb.DMatrix(X_test)
params = {"max_depth":2, "eta":0.1}
model = xgb.cv(params, dtrain, num_boost_round=1000, early_stopping_rounds=100 )<train_model> | train['Survived'].value_counts(normalize=True ) | Titanic - Machine Learning from Disaster |
21,660,342 | model_xgb = xgb.XGBRegressor(n_estimators=1000, max_depth=2, learning_rate=0.1)
model_xgb.fit(X_train, y )<predict_on_test> | train['Sex'].value_counts() | Titanic - Machine Learning from Disaster |
21,660,342 | xgb_preds = np.expm1(model_xgb.predict(X_test))
lasso_preds = np.expm1(model_lasso.predict(X_test))<create_dataframe> | embark=pd.get_dummies(train['Embarked'] ) | Titanic - Machine Learning from Disaster |
21,660,342 | predictions = pd.DataFrame({"xgb":xgb_preds, "lasso":lasso_preds})
predictions.plot(x = "xgb", y = "lasso", kind = "scatter" )<prepare_output> | gender=pd.get_dummies(train['Sex'] ) | Titanic - Machine Learning from Disaster |
21,660,342 | preds = 0.7*lasso_preds + 0.3*xgb_preds<save_to_csv> | train['Age'].fillna(30,inplace=True)
train['Age']=train['Age'].astype('int' ) | Titanic - Machine Learning from Disaster |
21,660,342 | solution = pd.DataFrame({"ID":test.ID, "price":preds})
solution.to_csv("ridge_sol.csv", index = False )<import_modules> | train['Age_grp']=train['Age'].apply(lambda x: age_grp(x)) | Titanic - Machine Learning from Disaster |
21,660,342 | from keras.layers import Dense
from keras.models import Sequential
from keras.regularizers import l1
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split<normalization> | age=pd.get_dummies(train['Age_grp'] ) | Titanic - Machine Learning from Disaster |
21,660,342 | X_train = StandardScaler().fit_transform(X_train )<split> | train_df=pd.concat([train,embark,gender,age],axis=1 ) | Titanic - Machine Learning from Disaster |
21,660,342 | X_tr, X_val, y_tr, y_val = train_test_split(X_train, y, random_state = 3 )<choose_model_class> | def is_var(val):
if val>0:
return 1
else:
return 0 | Titanic - Machine Learning from Disaster |
21,660,342 | model = Sequential()
model.add(Dense(1, input_dim = X_train.shape[1], W_regularizer=l1(0.001)))
model.compile(loss = "mse", optimizer = "adam" )<train_model> | train_df['Family']=train_df['Parch'] + 1 + train_df['SibSp']
train_df['Parch']=train_df['Parch'].apply(lambda x: is_var(x))
train_df['SibSp']=train_df['SibSp'].apply(lambda x: is_var(x)) | Titanic - Machine Learning from Disaster |
21,660,342 | hist = model.fit(X_tr, y_tr, validation_data =(X_val, y_val))<set_options> | sel_cols=['Fare',
'Pclass', 'SibSp',
'Parch', 'C', 'Q',
'S', 'female', 'male', '18-24', '25-34', '35-44', '45+', '<13','13-18', 'Family'
] | Titanic - Machine Learning from Disaster |
21,660,342 | %matplotlib inline<set_options> | train_df.fillna(0,inplace=True ) | Titanic - Machine Learning from Disaster |
21,660,342 | def seed_everything(seed=42):
os.environ['PYTHONHASHSEED'] = str(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True<define_variables> | X=train_df[sel_cols] | Titanic - Machine Learning from Disaster |
21,660,342 | DIRPATH = '.. /input/ailab-ml-training-0/'
TRAIN_IMAGE_DIR = 'train_images/train_images/'
TEST_IMAGE_DIR = 'test_images/test_images/'
ID = 'fname'
TARGET = 'label'
VALID_SIZE = 0.2
EPOCHS = 5
BATCH_SIZE = 64
LR = 1e-3
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
SEED = 42
seed_everything(SEED )<load_from_csv> | y=train_df['Survived'] | Titanic - Machine Learning from Disaster |
21,660,342 | train_df = pd.read_csv(os.path.join(DIRPATH, 'train.csv'))<load_pretrained> | train_X,val_X,train_y,val_y=train_test_split(X,y,test_size=0.3,random_state=1 ) | Titanic - Machine Learning from Disaster |
21,660,342 | class TrainDataset(Dataset):
def __init__(self, fname_list, label_list, image_dir, transform=None):
super().__init__()
self.fname_list = fname_list
self.label_list = label_list
self.image_dir = image_dir
self.transform = transform
def __len__(self):
return len(self.fname_list)
def __getitem__(self, idx):
fname = self.fname_list[idx]
label = self.label_list[idx]
image = cv2.imread(os.path.join(self.image_dir, fname))
if self.transform is not None:
image = self.transform(image)
return image, label<define_search_model> | lr=LogisticRegression(max_iter=400)
lr.fit(train_X,train_y ) | Titanic - Machine Learning from Disaster |
21,660,342 | class SimpleClassifier(nn.Module):
def __init__(self):
super().__init__()
self.conv_module = nn.Sequential(
nn.Conv2d(3, 32, 3, stride=1, padding=1, bias=True),
nn.ReLU(True),
nn.MaxPool2d(2),
nn.Conv2d(32, 64, 3, stride=1, padding=1, bias=True),
nn.ReLU(True),
nn.MaxPool2d(2),
nn.Conv2d(64, 128, 3, stride=1, padding=1, bias=True),
nn.ReLU(True),
)
self.dense_module = nn.Sequential(
nn.Linear(128*7*7, 10, bias=True)
)
def forward(self, x):
x = self.conv_module(x)
x = x.view(x.size(0), -1)
x = self.dense_module(x)
return x<split> | lr.score(train_X,train_y ) | Titanic - Machine Learning from Disaster |
21,660,342 | fname_list = train_df[ID].to_list()
label_list = train_df[TARGET].to_list()
train_fname_list, valid_fname_list, train_label_list, valid_label_list = train_test_split(
fname_list, label_list, test_size=VALID_SIZE, random_state=SEED, shuffle=True
)<create_dataframe> | lr.score(val_X,val_y ) | Titanic - Machine Learning from Disaster |
21,660,342 | image_dir = os.path.join(DIRPATH, TRAIN_IMAGE_DIR)
transform = transforms.Compose([
transforms.ToTensor()
])
train_dataset = TrainDataset(train_fname_list, train_label_list, image_dir, transform=transform)
train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
valid_dataset = TrainDataset(valid_fname_list, valid_label_list, image_dir, transform=transform)
valid_dataloader = DataLoader(valid_dataset, batch_size=BATCH_SIZE, shuffle=False )<choose_model_class> | test=pd.read_csv('.. /input/titanic/test.csv' ) | Titanic - Machine Learning from Disaster |
21,660,342 | model = SimpleClassifier().to(DEVICE)
optim = Adam(model.parameters() , lr=LR)
criterion = nn.CrossEntropyLoss()<train_model> | embark=pd.get_dummies(test['Embarked'] ) | Titanic - Machine Learning from Disaster |
21,660,342 | mb = master_bar(range(EPOCHS))
for epoch in mb:
model.train()
train_loss_list = []
train_accuracy_list = []
for batch_image, batch_label in progress_bar(train_dataloader, parent=mb):
batch_image = batch_image.to(dtype=torch.float32, device=DEVICE)
batch_label = batch_label.to(dtype=torch.long, device=DEVICE)
optim.zero_grad()
batch_pred = model(batch_image)
loss = criterion(batch_pred, batch_label)
loss.backward()
optim.step()
train_loss_list.append(loss.item())
accuracy = accuracy_score(torch.argmax(batch_pred, axis=1 ).cpu().numpy() , batch_label.cpu().numpy())
train_accuracy_list.append(accuracy)
model.eval()
valid_loss_list = []
valid_accuracy_list = []
for batch_image, batch_label in valid_dataloader:
batch_image = batch_image.to(dtype=torch.float32, device=DEVICE)
batch_label = batch_label.to(dtype=torch.long, device=DEVICE)
with torch.no_grad() :
batch_pred = model(batch_image)
loss = criterion(batch_pred, batch_label)
valid_loss_list.append(loss.item())
accuracy = accuracy_score(torch.argmax(batch_pred, axis=1 ).cpu().numpy() , batch_label.cpu().numpy())
valid_accuracy_list.append(accuracy)
mb.write('epoch: {}/{} - loss: {:.5f} - accuracy: {:.3f} - val_loss: {:.5f} - val_accuracy: {:.3f}'.format(
epoch,
EPOCHS,
np.mean(train_loss_list),
np.mean(train_accuracy_list),
np.mean(valid_loss_list),
np.mean(valid_accuracy_list)
))<load_from_csv> | gender=pd.get_dummies(test['Sex'] ) | Titanic - Machine Learning from Disaster |
21,660,342 | submission_df = pd.read_csv(os.path.join(DIRPATH, 'sample_submission.csv'))<categorify> | test['Age'].fillna(30,inplace=True)
test['Age']=test['Age'].astype('int' ) | Titanic - Machine Learning from Disaster |
21,660,342 | fname_list = submission_df[ID].to_list()
label_list = submission_df[TARGET].to_list()
image_dir = os.path.join(DIRPATH, TEST_IMAGE_DIR)
transform = transforms.Compose([transforms.ToTensor() ])
test_dataset = TrainDataset(fname_list, label_list, image_dir, transform=transform)
test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=False )<feature_engineering> | test['Age_grp']=test['Age'].apply(lambda x: age_grp(x)) | Titanic - Machine Learning from Disaster |
21,660,342 | submission_df[TARGET] = predictions<save_to_csv> | age=pd.get_dummies(test['Age_grp'] ) | Titanic - Machine Learning from Disaster |
21,660,342 | submission_df.to_csv('submission.csv', index=False )<import_modules> | test_df=pd.concat([test,embark,gender,age],axis=1 ) | Titanic - Machine Learning from Disaster |
21,660,342 | FileLink('submission.csv' )<import_modules> | test_df['Family']=test_df['Parch']+1+test_df['SibSp']
test_df['Parch']=test_df['Parch'].apply(lambda x: is_var(x))
test_df['SibSp']=test_df['SibSp'].apply(lambda x: is_var(x)) | Titanic - Machine Learning from Disaster |
21,660,342 | import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
import re
import torch
from sklearn.model_selection import train_test_split
from tqdm import tqdm_notebook
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from scipy import stats
from sklearn.model_selection import KFold
import os,urllib,glob,tarfile
from transformers import BertJapaneseTokenizer, BertModel, BertConfig<install_modules> | test_df.fillna(0,inplace=True ) | Titanic - Machine Learning from Disaster |
21,660,342 | !pip install mecab-python3==0.996.2 fugashi ipadic<load_from_csv> | test_X=test_df[sel_cols] | Titanic - Machine Learning from Disaster |
21,660,342 | train_df = pd.read_csv(".. /input/japanesereviewratingprediction/yahoo_review_train.csv")
test_df = pd.read_csv(".. /input/japanesereviewratingprediction/yahoo_review_test.csv")
sample_submission = pd.read_csv(".. /input/japanesereviewratingprediction/sample_submission.csv" )<filter> | test_y=lr.predict(test_X ) | Titanic - Machine Learning from Disaster |
21,660,342 | train_df = train_df[train_df["Label"]!=-1]<load_pretrained> | sub=pd.read_csv('.. /input/titanic/gender_submission.csv' ) | Titanic - Machine Learning from Disaster |
21,660,342 | tokenizer = BertJapaneseTokenizer.from_pretrained("cl-tohoku/bert-base-japanese-whole-word-masking")
for i,item in enumerate(tokenizer.vocab.items()):
print(item)
if i > 20:
break<define_variables> | sub['Survived']=test_y | Titanic - Machine Learning from Disaster |
21,660,342 | <string_transform><EOS> | sub.to_csv('submission.csv',index=False ) | Titanic - Machine Learning from Disaster |
21,648,808 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<define_variables> | !pip install autogluon --user
clear_output() | Titanic - Machine Learning from Disaster |
21,648,808 | token = ["[CLS]"]+tokenizer.tokenize(text)[:max_lengths-2]+["[SEP]"]
input_id = tokenizer.convert_tokens_to_ids(token)
segment_id = [0]*max_lengths
attention_mask = [1]*len(input_id)+[0]*(max_lengths - len(input_id))
input_id = input_id+[pad]*(max_lengths-len(input_id))<define_variables> | import numpy as np
import pandas as pd
import os
import random
from autogluon.tabular import TabularDataset, TabularPredictor | Titanic - Machine Learning from Disaster |
21,648,808 | class ReviewDataset(torch.utils.data.Dataset):
def __init__(self, texts, labels=[]):
self.input_ids, self.segment_ids, self.attention_masks = [],[],[]
for text in tqdm(texts):
token = ["[CLS]"]+tokenizer.tokenize(text)[:max_lengths-2]+["[SEP]"]
input_id = tokenizer.convert_tokens_to_ids(token)
segment_id = [0]*max_lengths
attention_mask = [1]*len(input_id)+[0]*(max_lengths - len(input_id))
input_id = input_id+[pad]*(max_lengths-len(input_id))
self.input_ids.append(input_id)
self.segment_ids.append(segment_id)
self.attention_masks.append(attention_mask)
self.input_ids = np.array(self.input_ids)
self.segment_ids = np.array(self.segment_ids)
self.attention_masks = np.array(self.attention_masks)
self.labels = labels
def __len__(self):
return len(self.input_ids)
def __getitem__(self, idx):
if len(self.labels):
return self.input_ids[idx], self.segment_ids[idx], self.attention_masks[idx], self.labels[idx]
else:
return self.input_ids[idx], self.segment_ids[idx], self.attention_masks[idx]<split> | TRAIN_PATH = ".. /input/titanic/train.csv"
TEST_PATH = ".. /input/titanic/test.csv"
SAMPLE_SUBMISSION_PATH = ".. /input/titanic/gender_submission.csv"
SUMISSION_PATH = "submission.csv"
TARGET = 'Survived'
EVAL_METRIC = "roc_auc"
SAVE_PATH = 'agModels-predictClass'
DEFAULT_RANDOM_SEED = 2021
def seedBasic(seed=DEFAULT_RANDOM_SEED):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
seedBasic() | Titanic - Machine Learning from Disaster |
21,648,808 | batch_size=8
X,y = train_df["Body"].values, train_df["Label"].values
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.3, random_state=100,stratify=y)
train_ds = ReviewDataset(texts=X_train, labels=y_train)
train_dl = torch.utils.data.DataLoader(
train_ds, batch_size=batch_size, shuffle=True)
val_ds = ReviewDataset(texts=X_val, labels=y_val)
val_dl = torch.utils.data.DataLoader(
val_ds, batch_size=batch_size, shuffle=False )<choose_model_class> | train = TabularDataset(TRAIN_PATH)
test = TabularDataset(TEST_PATH ) | Titanic - Machine Learning from Disaster |
21,648,808 | class BertClassification(nn.Module):
def __init__(self, bert):
super(BertClassification, self ).__init__()
self.bert = bert
self.cls = nn.Linear(in_features=768, out_features=2)
nn.init.normal_(self.cls.weight, std=0.02)
nn.init.normal_(self.cls.bias, 0)
def forward(self, input_ids, token_type_ids=None, attention_mask=None):
pooled_output = self.bert(
input_ids, token_type_ids, attention_mask)[1]
pooled_output = pooled_output.view(-1, 768)
out = self.cls(pooled_output)
return out<choose_model_class> | predictor = TabularPredictor(label=TARGET,eval_metric=EVAL_METRIC, path=SAVE_PATH ).fit(train ) | Titanic - Machine Learning from Disaster |
21,648,808 | cuda = torch.cuda.is_available()
bert = BertModel.from_pretrained("cl-tohoku/bert-base-japanese-whole-word-masking")
model = BertClassification(bert)
torch.backends.cudnn.benchmark = True
if cuda:
model.cuda()
optimizer = optim.Adam(model.parameters() ,lr = 4e-4 ,betas=(0.9, 0.999))
criterion = nn.CrossEntropyLoss()<categorify> | y_pred = predictor.predict(test ) | Titanic - Machine Learning from Disaster |
21,648,808 | for param in model.bert.encoder.layer[:-1].parameters() :
param.requires_grad = False<train_model> | submission = pd.read_csv(SAMPLE_SUBMISSION_PATH)
submission[TARGET] = y_pred
submission.to_csv(SUMISSION_PATH,index=False)
submission.head() | Titanic - Machine Learning from Disaster |
21,374,940 | epochs = 2
for epoch in range(epochs):
total_loss = 0
accuracy = 0
model.train()
print("epoch {} start!".format(epoch+1))
for iter_num,(input_ids, segment_ids, attention_masks, labels)in tqdm(enumerate(train_dl),total = len(train_dl)) :
optimizer.zero_grad()
if cuda:
input_ids, segment_ids, attention_masks, labels =\
input_ids.cuda() , segment_ids.cuda() , attention_masks.cuda() , labels.cuda()
outputs = model(input_ids = input_ids,
token_type_ids = segment_ids,
attention_mask = attention_masks)
pred_proba = outputs.softmax(dim=1)[:,1]
pred =(pred_proba>=0.5 ).type(torch.int)
loss = criterion(outputs,labels)
loss.backward()
optimizer.step()
total_loss += loss.item()
accuracy +=(pred==labels ).sum().item()
if(iter_num+1)% 50 == 0:
size = batch_size*(iter_num+1)
print("{} iter loss:{:.4f} accuracy:{:.4f}".format(
iter_num+1,total_loss/(iter_num+1),accuracy/size))
total_loss /= len(train_dl)
accuracy /= len(train_ds)
val_total_loss = 0
val_accuracy = 0
model.eval()
for input_ids, segment_ids, attention_masks, labels in tqdm(val_dl):
if cuda:
input_ids, segment_ids, attention_masks, labels =\
input_ids.cuda() , segment_ids.cuda() , attention_masks.cuda() , labels.cuda()
with torch.no_grad() :
outputs = model(input_ids = input_ids,
token_type_ids = segment_ids,
attention_mask = attention_masks)
pred_proba = outputs.softmax(dim=1)[:,1]
pred =(pred_proba>=0.5 ).type(torch.int)
loss = criterion(outputs,labels)
val_total_loss += loss.item()
val_accuracy +=(pred==labels ).sum().item()
val_total_loss /= len(val_dl)
val_accuracy /= len(val_ds)
print("epoch{} total loss:{:.4f}, accuracy:{:.4f}, val_total loss:{:.4f}, val_accuracy:{:.4f}"\
.format(epoch+1,total_loss,accuracy,val_total_loss,val_accuracy))
<create_dataframe> | init_notebook_mode(connected=True)
| Titanic - Machine Learning from Disaster |
21,374,940 | test_ds = ReviewDataset(texts=test_df["Body"].values)
test_dl = torch.utils.data.DataLoader(
test_ds, batch_size=8, shuffle=False )<categorify> | warnings.filterwarnings('ignore' ) | Titanic - Machine Learning from Disaster |
21,374,940 | lists = []
for input_ids, segment_ids, attention_masks in tqdm(test_dl):
if cuda:
input_ids, segment_ids, attention_masks =\
input_ids.cuda() , segment_ids.cuda() , attention_masks.cuda()
outputs = model(input_ids = input_ids,
token_type_ids = segment_ids,
attention_mask = attention_masks)
pred_proba = outputs.softmax(dim=1)[:,1]
lists += list(pred_proba.cpu().detach().numpy() )<save_to_csv> | train_df=pd.read_csv("/kaggle/input/titanic/train.csv")
test_df = pd.read_csv("/kaggle/input/titanic/test.csv" ) | Titanic - Machine Learning from Disaster |
21,374,940 | test_df["Label"]=lists
submission_df=test_df[["Id","Label"]]
submission_df.to_csv("sample_submission.csv",index=False )<install_modules> | train_len = len(train_df)
combined = train_df.append(test_df,ignore_index=True)
combined.fillna(np.nan ) | Titanic - Machine Learning from Disaster |
21,374,940 | !pip install mecab-python3==0.996.2<load_pretrained> | combined.isnull().sum() | Titanic - Machine Learning from Disaster |
21,374,940 | tokenizer = BertJapaneseTokenizer.from_pretrained("bert-base-japanese")
for i,item in enumerate(tokenizer.vocab.items()):
print(item)
if i > 20:
break<choose_model_class> | combined.groupby(['Pclass','Sex'])['Age'].mean() | Titanic - Machine Learning from Disaster |
21,374,940 | cuda = torch.cuda.is_available()
bert = BertModel.from_pretrained("bert-base-japanese")
model = BertClassification(bert)
torch.backends.cudnn.benchmark = True
if cuda:
model.cuda()
optimizer = optim.Adam(model.parameters() ,lr = 4e-4 ,betas=(0.9, 0.999))
criterion = nn.CrossEntropyLoss()<import_modules> | combined['AgeGroup'] = 'adult'
combined.loc[combined['Name'].str.contains('Master'),'AgeGroup'] = "child"
combined.loc[combined['Age'] <= 14.0,'AgeGroup'] = "child"
combined.loc[(combined['Age'].isnull())&(combined['Name'].str.contains('Miss')) &(combined['Parch'] != 0),'AgeGroup'] = "child" | Titanic - Machine Learning from Disaster |
21,374,940 | import numpy as np
import pandas as pd<load_from_csv> | combined[combined['Age'].notnull() ].groupby(['Pclass','Sex','AgeGroup'])['Age'].mean() | Titanic - Machine Learning from Disaster |
21,374,940 | train_data = pd.read_csv('/kaggle/input/syde522/train.csv' )<load_pretrained> | def Age(cols):
Age=cols[0]
Pclass=cols[1]
Sex=cols[2]
AgeGroup=cols[3]
if pd.isnull(Age):
if Pclass==1:
if Sex=="male":
if AgeGroup=='adult':
return 42
else:
return 7
elif Sex=="female":
if AgeGroup=='adult':
return 37
else:
return 8
elif Pclass==2:
if Sex=="male":
if AgeGroup=='adult':
return 33
else:
return 4
elif Sex=="female":
if AgeGroup=='adult':
return 31
else:
return 7
elif Pclass==3:
if Sex=="male":
if AgeGroup=='adult':
return 29
else:
return 7
elif Sex=="female":
if AgeGroup=='adult':
return 27
else:
return 5
else:
return Age
combined["Age"]=combined[["Age","Pclass","Sex","AgeGroup"]].apply(Age,axis=1 ) | Titanic - Machine Learning from Disaster |
21,374,940 | CNNmodel = VGG16(weights=None,input_shape=(150, 150, 3), include_top=False, pooling='max',)
CNNmodel.load_weights('/kaggle/input/vgg16-weights/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5')
img_path = '/kaggle/input/syde522/train/train/0011.png'
img = image.load_img(img_path, target_size=(150,150))
def getFeatures(img):
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
featur = CNNmodel.predict(x)
return featur.reshape(1, 512)
<categorify> | def AgeBand(col):
Age=col[0]
if Age <=7:
return "0-7"
elif Age <=14:
return "8-14"
elif Age <=21:
return "15-21"
elif Age <= 28:
return "22-28"
elif Age <= 35:
return "29-35"
elif Age <= 42:
return "36-42"
elif Age <= 49:
return "43-49"
elif Age <= 56:
return "50-56"
elif Age <= 63:
return "57-63"
else:
return ">=64"
combined["AgeBand"]=combined[["Age"]].apply(AgeBand,axis=1 ) | Titanic - Machine Learning from Disaster |
21,374,940 | features=np.zeros(( 1,512))
labels=np.array('init')
for idx,(_, entry)in enumerate(train_data.iterrows()):
img=imread(entry.train_file)
feature = getFeatures(img)
features=np.append(features,feature,axis=0)
labels=np.append(labels,entry.Category)
labels=labels.reshape(features[:,1].size)
features=np.delete(features, 0, 0)
labels=np.delete(labels, 0, 0)
print(features.shape)
print(labels.shape)
features =(preprocessing.normalize(features))
neigh = KNeighborsClassifier(n_neighbors=7)
neigh.fit(features, labels)
<save_to_csv> | combined.groupby(['Pclass','Embarked'])['PassengerId'].count() | Titanic - Machine Learning from Disaster |
21,374,940 | def predictor(imgID):
img=imread('/kaggle/input/syde522/test/test/'+imgID)
feature = getFeatures(img)
feature = preprocessing.normalize(feature)
prediction=neigh.predict(feature)
return prediction[0]
test_files = glob.glob('/kaggle/input/syde522/test/test/*.png')
test_file_id = [os.path.basename(test_file)for test_file in test_files]
test_submission = pd.DataFrame({'Id': test_file_id, 'Category': [predictor(test_img_id)for test_img_id in test_file_id]})
test_submission.to_csv('submission.csv', index=False )<install_modules> | combined[combined['Embarked'].isnull() ]['Embarked'] = combined['Embarked'].mode() | Titanic - Machine Learning from Disaster |
21,374,940 | !pip install category_encoders<load_from_csv> | ticketCount = combined.groupby('Ticket')['PassengerId'].count().reset_index()
ticketCount.rename(columns={'PassengerId':'Count on Ticket'},inplace=True)
combined = combined.merge(ticketCount, on="Ticket",how="left" ) | Titanic - Machine Learning from Disaster |
21,374,940 | train = pd.read_csv("/kaggle/input/heroz-internal-competition/train.csv")
test = pd.read_csv("/kaggle/input/heroz-internal-competition/test_features.csv")
ans = pd.read_csv("/kaggle/input/heroz-internal-competition/sample_submission.csv")
print("Train shape : ", train.shape)
print("Test shape : ", test.shape)
print("ans shape : ", ans.shape)
train<count_missing_values> | combined['Diff'] = combined['FamilySize'] - combined['Count on Ticket']
combined['Family Status'] = combined.apply(lambda x:"Has Family On Same Ticket" if(x['FamilySize'] - x['Count on Ticket'])<= 0 else "Family Not on same ticket",axis=1)
| Titanic - Machine Learning from Disaster |
21,374,940 | train.isnull().sum()<count_missing_values> | combined['Family Status'] = combined.apply(lambda x:"Is Alone" if(x['FamilySize']==1)&(x['Count on Ticket']==1)else x['Family Status'],axis=1 ) | Titanic - Machine Learning from Disaster |
21,374,940 | test.isnull().sum()<feature_engineering> | combined['Cabin Class'] = 'No Cabin'
combined['Cabin Class'] = combined.apply(lambda x: "No Cabin" if pd.isna(x["Cabin"])else x["Cabin"][0] , axis=1)
| Titanic - Machine Learning from Disaster |
21,374,940 | train['reviews_per_month'] = train['reviews_per_month'].fillna(0)
test['reviews_per_month'] = test['reviews_per_month'].fillna(0 )<feature_engineering> | tickcab = combined[combined['Cabin Class'] != 'No Cabin'][['Ticket','Cabin Class']].drop_duplicates()
tickcab = tickcab.rename(columns={'Cabin Class':'CabNam'})
combined = combined.merge(tickcab,how="left",on="Ticket")
combined['CabNam'].fillna('No Cabin')
combined['Cabin Class'] = combined.apply(lambda x:x['Cabin Class'] if x['Cabin Class'] != 'No Cabin' else x['CabNam'],axis=1)
combined.drop(columns=['CabNam'],inplace=True)
combined.drop_duplicates(inplace=True)
| Titanic - Machine Learning from Disaster |
21,374,940 | train['last_review'] = train['last_review'].fillna('2019-12-31')
test['last_review'] = test['last_review'].fillna('2019-12-31' )<feature_engineering> | combined["Fare"] = combined["Fare"].map(lambda i: np.log(i)if i > 0 else 0)
combined["Fare"] = combined["Fare"]/combined['Count on Ticket'] | Titanic - Machine Learning from Disaster |
21,374,940 | train['last_review'] = pd.to_datetime(train['last_review'])
test['last_review'] = pd.to_datetime(test['last_review'])
train['year'] = train['last_review'].dt.year-2010
test['year'] = test['last_review'].dt.year-2010
train['month'] = train['last_review'].dt.month-1
test['month'] = test['last_review'].dt.month-1<feature_engineering> | companion = pd.pivot_table(combined, values='PassengerId',index=['Ticket'],columns=['AgeGroup'], aggfunc="count" ).reset_index().fillna(0)
companion.columns = ['Ticket','No.of Adult Companion', 'No.of Child Companion']
combined = combined.merge(companion, on='Ticket',how='left' ) | Titanic - Machine Learning from Disaster |
21,374,940 | train['latitude'] = train['latitude']-40
train['longitude'] = train['longitude']+75
test['latitude'] = test['latitude']-40
test['longitude'] = test['longitude']+75
train['waru'] = train['latitude'] / train['longitude']
train['kakeru'] = train['latitude'] * train['longitude']
test['waru'] = test['latitude'] / test['longitude']
test['kakeru'] = test['latitude'] * test['longitude']<feature_engineering> | combined.loc[combined['AgeGroup']=='adult','No.of Adult Companion'] = combined.loc[combined['AgeGroup']=='adult','No.of Adult Companion'] - 1
combined.loc[combined['AgeGroup']=='child','No.of Child Companion'] = combined.loc[combined['AgeGroup']=='child','No.of Child Companion'] - 1
combined['Companion'] = 'Adult & Child Companion'
combined['Companion'] = combined.apply(lambda x:'Only Adult Companion' if(x['No.of Adult Companion'] > 0)&(x['No.of Child Companion']==0)else x['Companion'],axis=1)
combined['Companion'] = combined.apply(lambda x:'Only Child Companion' if(x['No.of Adult Companion'] == 0)&(x['No.of Child Companion']>0)else x['Companion'],axis=1)
combined['Companion'] = combined.apply(lambda x:'No Companion' if(x['No.of Adult Companion'] == 0)&(x['No.of Child Companion']==0)else x['Companion'],axis=1 ) | Titanic - Machine Learning from Disaster |
21,374,940 | train['minimum_nights_log'] = np.log1p(train['minimum_nights'])
test['minimum_nights_log'] = np.log1p(test['minimum_nights'])
train['calculated_host_listings_count_log'] = np.log1p(train['calculated_host_listings_count'])
test['calculated_host_listings_count_log'] = np.log1p(test['calculated_host_listings_count'])
train['availability_365_log'] = np.log1p(train['availability_365'])
test['availability_365_log'] = np.log1p(test['availability_365'])
train['number_of_reviews_log'] = np.log1p(train['number_of_reviews'])
test['number_of_reviews_log'] = np.log1p(test['number_of_reviews'])
train['reviews_per_month_log'] = np.log1p(train['reviews_per_month'])
test['reviews_per_month_log'] = np.log1p(test['reviews_per_month'] )<define_variables> | combined[combined["Survived"].notnull() ].groupby(['AgeGroup','Companion'])['PassengerId'].count() | Titanic - Machine Learning from Disaster |
21,374,940 | target = 'price'
list_cols = ['neighbourhood_group', 'neighbourhood', 'room_type',]
target_columns = ['neighbourhood_group', 'neighbourhood', 'latitude', 'longitude', 'room_type',
'minimum_nights', 'number_of_reviews', 'reviews_per_month', 'calculated_host_listings_count', 'availability_365', 'waru', 'kakeru', 'minimum_nights_log',
'calculated_host_listings_count_log', 'availability_365_log',
'number_of_reviews_log', 'reviews_per_month_log', 'year', 'month']
ce_ohe = ce.OneHotEncoder(cols=list_cols)
train_onehot = ce_ohe.fit_transform(train[target_columns] )<count_missing_values> | combined[combined["Survived"].notnull() ].groupby(['AgeGroup','Companion'])['Survived'].mean() | Titanic - Machine Learning from Disaster |
21,374,940 | train_onehot.isnull().sum()<categorify> | train_copy = combined[:train_len]
test_copy = combined[train_len:].reset_index(drop=True)
test_copy.drop(columns=['Survived'],inplace=True ) | Titanic - Machine Learning from Disaster |
21,374,940 | test_onehot = ce_ohe.transform(test[target_columns])
<categorify> | combined.drop(columns=['PassengerId','Name','Age', 'AgeGroup','SibSp','Parch','Ticket','Cabin','Count on Ticket','Diff','No.of Adult Companion','No.of Child Companion'],inplace=True ) | Titanic - Machine Learning from Disaster |
21,374,940 | pca2 = PCA(n_components=5)
pca2_results = pca2.fit_transform(train_onehot)
train_onehot['pca0']=pca2_results[:,0]
train_onehot['pca1']=pca2_results[:,1]
train_onehot['pca2']=pca2_results[:,2]
train_onehot['pca3']=pca2_results[:,3]
train_onehot['pca4']=pca2_results[:,4]
pca2_results = pca2.transform(test_onehot)
test_onehot['pca0']=pca2_results[:,0]
test_onehot['pca1']=pca2_results[:,1]
test_onehot['pca2']=pca2_results[:,2]
test_onehot['pca3']=pca2_results[:,3]
test_onehot['pca4']=pca2_results[:,4]<prepare_x_and_y> | combined = pd.get_dummies(combined, columns = ["Sex","Embarked","AgeBand","Family Status","Cabin Class","Companion"],drop_first=True ) | Titanic - Machine Learning from Disaster |
21,374,940 | y_train = np.log1p(train["price"] )<split> | train = combined[:train_len]
test = combined[train_len:]
test.drop(columns=['Survived'],inplace=True ) | Titanic - Machine Learning from Disaster |
21,374,940 | kf = KFold(n_splits=5, shuffle=True, random_state=1)
s = list(kf.split(train_onehot, y_train))<choose_model_class> | X = train.iloc[:,1:]
y = train.iloc[:,0]
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=0 ) | Titanic - Machine Learning from Disaster |
21,374,940 | gbk_models = []
for train_i, val_i in s:
gbk = GradientBoostingRegressor()
gbk.fit(train_onehot.iloc[train_i], y_train.iloc[train_i])
y_pred = gbk.predict(train_onehot.iloc[val_i])
acc_gbk = round(mean_squared_error(np.expm1(y_pred), np.expm1(y_train[val_i])))
print(acc_gbk)
gbk_models.append(gbk )<train_model> | decision_tree = DecisionTreeClassifier()
decision_tree.fit(X_train, y_train)
pred_train = decision_tree.predict(X_train)
pred=decision_tree.predict(X_test)
pred_train_df=pd.DataFrame({"Actual":y_train,"Pred":pred_train})
pred_df=pd.DataFrame({"Actual":y_test,"Pred":pred})
cm=confusion_matrix(y_test,pred)
cm | Titanic - Machine Learning from Disaster |
21,374,940 | mlp_models = []
for train_i, val_i in s:
mlp = MLPRegressor(max_iter=100, hidden_layer_sizes=(100,100),
activation='relu', learning_rate_init=0.01)
mlp.fit(train_onehot.iloc[train_i], y_train.iloc[train_i])
y_pred = mlp.predict(train_onehot.iloc[val_i])
acc_gbk = round(mean_squared_error(np.expm1(y_pred), np.expm1(y_train[val_i])))
print(acc_gbk)
mlp_models.append(mlp )<find_best_model_class> | report=classification_report(y_test,pred)
print("Decision Tree report
",report ) | Titanic - Machine Learning from Disaster |
21,374,940 | rfc_models = []
for train_i, val_i in s:
rfc = RandomForestRegressor()
rfc.fit(train_onehot.iloc[train_i], y_train.iloc[train_i])
y_pred = rfc.predict(train_onehot.iloc[val_i])
acc_gbk = round(mean_squared_error(np.expm1(y_pred), np.expm1(y_train[val_i])))
print(acc_gbk)
rfc_models.append(rfc )<predict_on_test> | rfc=ensemble.RandomForestClassifier(max_depth=6,random_state=0,n_estimators=64)
rfc.fit(X_train, y_train)
pred_train = rfc.predict(X_train)
pred=rfc.predict(X_test)
pred_train_df=pd.DataFrame({"Actual":y_train,"Pred":pred_train})
pred_df=pd.DataFrame({"Actual":y_test,"Pred":pred})
cm=confusion_matrix(y_test,pred)
print(cm)
report=classification_report(y_test,pred)
print("Random Forest report
",report ) | Titanic - Machine Learning from Disaster |
21,374,940 | models = gbk_models + rfc_models
preds = np.array([np.expm1(model.predict(test_onehot)) for model in models])
preds = preds.mean(axis=0)
preds = np.where(preds < 0 , 0, preds )<save_to_csv> | y_test_rfc = rfc.predict(test ).astype(int)
test_out = pd.concat([test_copy['PassengerId'],pd.Series(y_test_rfc,name="Survived")],axis=1)
test_out['Survived'] = test_out['Survived'].astype('int')
test_out.to_csv('submission.csv',index=False ) | Titanic - Machine Learning from Disaster |
22,913,057 | ans["price"] = preds
ans.to_csv("heroz_nakai.csv", index=False )<load_from_csv> | train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
train_data['Age'].fillna(train_data['Age'].mean() , inplace=True)
test_data['Age'].fillna(test_data['Age'].mean() , inplace=True)
train_data['Fare'].fillna(train_data['Fare'].mean() , inplace=True)
test_data['Fare'].fillna(test_data['Fare'].mean() , inplace=True ) | Titanic - Machine Learning from Disaster |
22,913,057 | df_train = pd.read_csv('/kaggle/input/homework-for-students3/train.csv', index_col=0, parse_dates=['issue_d'])
df_test =pd.read_csv('/kaggle/input/homework-for-students3/test.csv', index_col=0, parse_dates=['issue_d'])
year = df_train.issue_d.dt.year
df_train = df_train[year >= 2014]
columns = df_train.columns
y_train = df_train.loan_condition
X_train = df_train.drop(['loan_condition'], axis = 1)
X_test = df_test
del df_train
del df_test<drop_column> | features = ["Pclass", "Sex", "SibSp", "Parch", "Age", "Fare"]
train_y = train_data["Survived"]
train_x = pd.get_dummies(train_data[features])
test_x = pd.get_dummies(test_data[features] ) | Titanic - Machine Learning from Disaster |
22,913,057 | drop_col = ['issue_d','earliest_cr_line']
X_train = X_train.drop(columns=drop_col)
X_test = X_test.drop(columns=drop_col )<categorify> | model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model.fit(train_x, train_y)
test_y = model.predict(test_x ) | Titanic - Machine Learning from Disaster |
22,913,057 | <concatenate><EOS> | output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': test_y})
output.to_csv('submission.csv', index=False ) | Titanic - Machine Learning from Disaster |
22,892,834 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<data_type_conversions> | import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
from sklearn.ensemble import VotingClassifier | Titanic - Machine Learning from Disaster |
22,892,834 | col = 'grade'
mapping_dict = {
"grade": {
"A": 0,"B": 1,"C": 2,"D": 3,"E": 4,"F": 5,"G": 6
}
}
X_train = X_train.replace(mapping_dict)
X_test = X_test.replace(mapping_dict)
mapping_col = ['grade']
X_train[mapping_col] = X_train[mapping_col].fillna(-1)
X_test[mapping_col] = X_test[mapping_col].fillna(-1)
X_train[mapping_col] = X_train[mapping_col].astype(int)
X_test[mapping_col] = X_test[mapping_col].astype(int)
plt.figure(figsize=[7,7])
X_train[col].hist(density=True, alpha=0.5, bins=20)
X_test[col].hist(density=True, alpha=0.5, bins=20)
plt.xlabel(col)
plt.ylabel('density')
plt.show()<define_variables> | train_data = pd.read_csv('.. /input/titanic/train.csv')
test_data = pd.read_csv('.. /input/titanic/test.csv' ) | Titanic - Machine Learning from Disaster |
22,892,834 | col = 'sub_grade'
mapping_dict = {
"sub_grade": {
"A1": 0,"A2": 1,"A3": 2,"A4": 3,"A5": 4,
"B1": 5,"B2": 6,"B3": 7,"B4": 8,"B5": 9,
"C1": 10,"C2": 11,"C3": 12,"C4": 13,"C5": 14,
"D1": 15,"D2": 16,"D3": 17,"D4": 18,"D5": 19,
"E1": 20,"E2": 21,"E3": 22,"E4": 23,"E5": 24,
"F1": 25,"F2": 26,"F3": 27,"F4": 28,"F5": 29,
"G1": 30,"G2": 31,"G3": 32,"G4": 33,"G5": 34
}
}
X_train = X_train.replace(mapping_dict)
X_test = X_test.replace(mapping_dict)
mapping_col = ['sub_grade']
X_train[mapping_col] = X_train[mapping_col].fillna(-1)
X_test[mapping_col] = X_test[mapping_col].fillna(-1)
X_train[mapping_col] = X_train[mapping_col].astype(int)
X_test[mapping_col] = X_test[mapping_col].astype(int)
plt.figure(figsize=[7,7])
X_train[col].hist(density=True, alpha=0.5, bins=20)
X_test[col].hist(density=True, alpha=0.5, bins=20)
plt.xlabel(col)
plt.ylabel('density')
plt.show()
<split> | missing_values = train_data.isna().any()
print('Columns which have missing values:
{0}'.format(missing_values[missing_values == True].index.tolist())) | Titanic - Machine Learning from Disaster |
22,892,834 | col = 'emp_length'
target = 'loan_condition'
X_temp = pd.concat([X_train, y_train], axis=1)
summary = X_temp.groupby([col])[target].mean()
X_test[col] = X_test[col].map(summary)
enc_train = Series(np.zeros(len(X_train)) , index=X_train.index)
skf = StratifiedKFold(n_splits=5, random_state=71, shuffle=True)
for i,(train_ix, val_ix)in enumerate(( skf.split(X_train, y_train))):
X_train_, _ = X_temp.iloc[train_ix], y_train.iloc[train_ix]
X_val, _ = X_temp.iloc[val_ix], y_train.iloc[val_ix]
summary = X_train_.groupby([col])[target].mean()
enc_train.iloc[val_ix] = X_val[col].map(summary)
X_train[col] = enc_train
plt.figure(figsize=[7,7])
X_train[col].hist(density=True, alpha=0.5, bins=20)
X_test[col].hist(density=True, alpha=0.5, bins=20)
plt.xlabel(col)
plt.ylabel('density')
plt.show()<split> | print("Percentage of missing values in `Age` column: {0:.2f}".format(100.*(train_data.Age.isna().sum() /len(train_data))))
print("Percentage of missing values in `Cabin` column: {0:.2f}".format(100.*(train_data.Cabin.isna().sum() /len(train_data))))
print("Percentage of missing values in `Embarked` column: {0:.2f}".format(100.*(train_data.Embarked.isna().sum() /len(train_data)))) | Titanic - Machine Learning from Disaster |
22,892,834 | col = 'home_ownership'
target = 'loan_condition'
X_temp = pd.concat([X_train, y_train], axis=1)
summary = X_temp.groupby([col])[target].mean()
X_test[col] = X_test[col].map(summary)
enc_train = Series(np.zeros(len(X_train)) , index=X_train.index)
skf = StratifiedKFold(n_splits=5, random_state=71, shuffle=True)
for i,(train_ix, val_ix)in enumerate(( skf.split(X_train, y_train))):
X_train_, _ = X_temp.iloc[train_ix], y_train.iloc[train_ix]
X_val, _ = X_temp.iloc[val_ix], y_train.iloc[val_ix]
summary = X_train_.groupby([col])[target].mean()
enc_train.iloc[val_ix] = X_val[col].map(summary)
X_train[col] = enc_train
plt.figure(figsize=[7,7])
X_train[col].hist(density=True, alpha=0.5, bins=20)
X_test[col].hist(density=True, alpha=0.5, bins=20)
plt.xlabel(col)
plt.ylabel('density')
plt.show()<split> | duplicates = train_data.duplicated().sum()
print('Duplicates in train data: {0}'.format(duplicates)) | Titanic - Machine Learning from Disaster |
22,892,834 | col = 'purpose'
target = 'loan_condition'
X_temp = pd.concat([X_train, y_train], axis=1)
summary = X_temp.groupby([col])[target].mean()
X_test[col] = X_test[col].map(summary)
enc_train = Series(np.zeros(len(X_train)) , index=X_train.index)
skf = StratifiedKFold(n_splits=5, random_state=71, shuffle=True)
for i,(train_ix, val_ix)in enumerate(( skf.split(X_train, y_train))):
X_train_, _ = X_temp.iloc[train_ix], y_train.iloc[train_ix]
X_val, _ = X_temp.iloc[val_ix], y_train.iloc[val_ix]
summary = X_train_.groupby([col])[target].mean()
enc_train.iloc[val_ix] = X_val[col].map(summary)
X_train[col] = enc_train
plt.figure(figsize=[7,7])
X_train[col].hist(density=True, alpha=0.5, bins=20)
X_test[col].hist(density=True, alpha=0.5, bins=20)
plt.xlabel(col)
plt.ylabel('density')
plt.show()<split> | categorical = train_data.nunique().sort_values(ascending=True)
print('Categorical variables in train data:
{0}'.format(categorical)) | Titanic - Machine Learning from Disaster |
22,892,834 | col = 'title'
target = 'loan_condition'
X_temp = pd.concat([X_train, y_train], axis=1)
summary = X_temp.groupby([col])[target].mean()
X_test[col] = X_test[col].map(summary)
enc_train = Series(np.zeros(len(X_train)) , index=X_train.index)
skf = StratifiedKFold(n_splits=5, random_state=71, shuffle=True)
for i,(train_ix, val_ix)in enumerate(( skf.split(X_train, y_train))):
X_train_, _ = X_temp.iloc[train_ix], y_train.iloc[train_ix]
X_val, _ = X_temp.iloc[val_ix], y_train.iloc[val_ix]
summary = X_train_.groupby([col])[target].mean()
enc_train.iloc[val_ix] = X_val[col].map(summary)
X_train[col] = enc_train
plt.figure(figsize=[7,7])
X_train[col].hist(density=True, alpha=0.5, bins=20)
X_test[col].hist(density=True, alpha=0.5, bins=20)
plt.xlabel(col)
plt.ylabel('density')
plt.show()<categorify> | for data in [train_data, test_data]:
data.drop(['Cabin'], axis=1, inplace=True)
data.drop(['Ticket', 'Fare'], axis=1, inplace=True ) | Titanic - Machine Learning from Disaster |
22,892,834 | col = 'zip_code'
target = 'loan_condition'
X_train[col] = X_train[col].str[0:2]
X_test[col] = X_test[col].str[0:2]
X_temp = pd.concat([X_train, y_train], axis=1)
summary = X_temp.groupby([col])[target].mean()
X_test[col] = X_test[col].map(summary)
enc_train = Series(np.zeros(len(X_train)) , index=X_train.index)
skf = StratifiedKFold(n_splits=5, random_state=71, shuffle=True)
for i,(train_ix, val_ix)in enumerate(( skf.split(X_train, y_train))):
X_train_, _ = X_temp.iloc[train_ix], y_train.iloc[train_ix]
X_val, _ = X_temp.iloc[val_ix], y_train.iloc[val_ix]
summary = X_train_.groupby([col])[target].mean()
enc_train.iloc[val_ix] = X_val[col].map(summary)
X_train[col] = enc_train
max_threshold = 0.3
min_threshold = 0.1
X_train[col] = capping(X_train[col], min_threshold , max_threshold)
X_test[col]= capping(X_test[col], min_threshold , max_threshold)
plt.figure(figsize=[7,7])
X_train[col].hist(density=True, alpha=0.5, bins=20)
X_test[col].hist(density=True, alpha=0.5, bins=20)
plt.xlabel(col)
plt.ylabel('density')
plt.show()
<split> | for data in [train_data, test_data]:
data['Title'] = data.Name.str.split(',' ).str[1].str.split('.' ).str[0].str.strip()
data['Woman_Or_Boy'] =(data.Title == 'Master')|(data.Sex == 'female')
data.drop('Title', axis=1, inplace=True)
data.drop('Name', axis=1, inplace=True ) | Titanic - Machine Learning from Disaster |
22,892,834 | col = 'addr_state'
target = 'loan_condition'
X_temp = pd.concat([X_train, y_train], axis=1)
summary = X_temp.groupby([col])[target].mean()
X_test[col] = X_test[col].map(summary)
enc_train = Series(np.zeros(len(X_train)) , index=X_train.index)
skf = StratifiedKFold(n_splits=5, random_state=71, shuffle=True)
for i,(train_ix, val_ix)in enumerate(( skf.split(X_train, y_train))):
X_train_, _ = X_temp.iloc[train_ix], y_train.iloc[train_ix]
X_val, _ = X_temp.iloc[val_ix], y_train.iloc[val_ix]
summary = X_train_.groupby([col])[target].mean()
enc_train.iloc[val_ix] = X_val[col].map(summary)
X_train[col] = enc_train
max_threshold = 0.25
min_threshold = 0.12
X_train[col] = capping(X_train[col], min_threshold , max_threshold)
X_test[col]= capping(X_test[col], min_threshold , max_threshold)
plt.figure(figsize=[7,7])
X_train[col].hist(density=True, alpha=0.5, bins=20)
X_test[col].hist(density=True, alpha=0.5, bins=20)
plt.xlabel(col)
plt.ylabel('density')
plt.show()<split> | label_encoder = LabelEncoder()
for data in [train_data, test_data]:
data['Sex'] = label_encoder.fit_transform(data['Sex'])
data['Woman_Or_Boy'] = label_encoder.fit_transform(data['Woman_Or_Boy'] ) | Titanic - Machine Learning from Disaster |
22,892,834 | col = 'emp_title'
col4 = 'emp_title4'
target = 'loan_condition'
X_train[col4] = X_train[col].str[0:4]
X_test[col4] = X_test[col].str[0:4]
X_temp = pd.concat([X_train, y_train], axis=1)
summary = X_temp.groupby([col4])[target].mean()
X_test[col4] = X_test[col4].map(summary)
enc_train = Series(np.zeros(len(X_train)) , index=X_train.index)
skf = StratifiedKFold(n_splits=5, random_state=71, shuffle=True)
for i,(train_ix, val_ix)in enumerate(( skf.split(X_train, y_train))):
X_train_, _ = X_temp.iloc[train_ix], y_train.iloc[train_ix]
X_val, _ = X_temp.iloc[val_ix], y_train.iloc[val_ix]
summary = X_train_.groupby([col4])[target].mean()
enc_train.iloc[val_ix] = X_val[col4].map(summary)
X_train[col4] = enc_train
max_threshold = 0.5
min_threshold = 0
X_train[col4] = capping(X_train[col4],min_threshold, max_threshold)
X_test[col4]= capping(X_test[col4],min_threshold, max_threshold)
plt.figure(figsize=[7,7])
X_train[col4].hist(density=True, alpha=0.5, bins=20)
X_test[col4].hist(density=True, alpha=0.5, bins=20)
plt.xlabel(col4)
plt.ylabel('density')
plt.show()
col = 'emp_title'
target = 'loan_condition'
X_temp = pd.concat([X_train, y_train], axis=1)
summary = X_temp.groupby([col])[target].mean()
X_test[col] = X_test[col].map(summary)
enc_train = Series(np.zeros(len(X_train)) , index=X_train.index)
skf = StratifiedKFold(n_splits=5, random_state=71, shuffle=True)
for i,(train_ix, val_ix)in enumerate(( skf.split(X_train, y_train))):
X_train_, _ = X_temp.iloc[train_ix], y_train.iloc[train_ix]
X_val, _ = X_temp.iloc[val_ix], y_train.iloc[val_ix]
summary = X_train_.groupby([col])[target].mean()
enc_train.iloc[val_ix] = X_val[col].map(summary)
X_train[col] = enc_train
max_threshold = 0.5
min_threshold = 0
X_train[col] = capping(X_train[col],min_threshold, max_threshold)
X_test[col]= capping(X_test[col],min_threshold, max_threshold)
plt.figure(figsize=[7,7])
X_train[col].hist(density=True, alpha=0.5, bins=20)
X_test[col].hist(density=True, alpha=0.5, bins=20)
plt.xlabel(col)
plt.ylabel('density')
plt.show()<drop_column> | most_common = all_data.Embarked.mode()
print("Most common Embarked value: {0}".format(most_common[0]))
for data in [train_data, test_data]:
data.fillna(value={'Embarked': most_common[0]}, inplace=True ) | Titanic - Machine Learning from Disaster |
22,892,834 | drop_col = ['title','acc_now_delinq','grade','installment']
X_train = X_train.drop(columns=drop_col)
X_test = X_test.drop(columns=drop_col )<define_variables> | for data in [train_data, test_data]:
data['TravelAlone'] = np.where(data["SibSp"] + data["Parch"] > 0, 0, 1)
data.drop('SibSp', axis=1, inplace=True)
data.drop('Parch', axis=1, inplace=True ) | Titanic - Machine Learning from Disaster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.