kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
11,317,713
models = [] for i in range(k): print('i =', i) model = build_model() for j in range(k): if i == j: continue current_pos_indexes = folds_indexes[i] model.fit_generator(image_generator(current_pos_indexes), steps_per_epoch= len(current_pos_indexes)/ 16, epochs=20) models.append(model )<find_best_params>
train['Parch'].value_counts()
Titanic - Machine Learning from Disaster
11,317,713
for model in models: for j in range(k): current_pos_indexes = folds_indexes[j] print(model.evaluate_generator(image_generator(indexes=current_pos_indexes), steps=len(current_pos_indexes)/ 16))<define_variables>
train['Survived'].groupby(train['Parch'] ).mean()
Titanic - Machine Learning from Disaster
11,317,713
test_files = os.listdir('.. /input/test/test/' )<load_pretrained>
train['Ticket_Len'] = train['Ticket'].apply(lambda x: len(x)) train['Ticket_Len'].value_counts()
Titanic - Machine Learning from Disaster
11,317,713
preds = [] for _ in range(len(models)) : preds.append([]) batch = 40 for i in range(int(4000/batch)) : images = [] for j in range(batch): img = mpimg.imread('.. /input/test/test/'+test_files[i*batch + j]) images += [img] for k2 in range(len(models)) : model = models[k2] out = model.predict(np.array(images)) preds[k2] += [out] <define_variables>
train['Ticket_Lett'] = train['Ticket'].apply(lambda x: str(x)[0]) train['Ticket_Lett'].value_counts()
Titanic - Machine Learning from Disaster
11,317,713
all_out = np.array(list(map(lambda x: np.array(x ).reshape(-1, 1), preds)) )<feature_engineering>
pd.qcut(train['Fare'], 3 ).value_counts()
Titanic - Machine Learning from Disaster
11,317,713
all_out = np.mean(all_out, axis=0 )<create_dataframe>
train['Survived'].groupby(pd.qcut(train['Fare'], 3)).mean()
Titanic - Machine Learning from Disaster
11,317,713
sub_file = pd.DataFrame(data = {'id': test_files, 'has_cactus': all_out.reshape(-1 ).tolist() } )<save_to_csv>
train['Cabin_Lett'] = train['Cabin'].apply(lambda x: str(x)[0]) train['Cabin_Lett'].value_counts()
Titanic - Machine Learning from Disaster
11,317,713
sub_file.to_csv('sample_submission.csv', index=False )<import_modules>
train['Survived'].groupby(train['Cabin_Lett'] ).mean()
Titanic - Machine Learning from Disaster
11,317,713
print(fastai.__version__ )<define_variables>
train['Cabin_num'] = train['Cabin'].apply(lambda x: str(x ).split(' ')[-1][1:]) train['Cabin_num'].replace('an', np.NaN, inplace = True) train['Cabin_num'] = train['Cabin_num'].apply(lambda x: int(x)if not pd.isnull(x)and x != '' else np.NaN )
Titanic - Machine Learning from Disaster
11,317,713
PATH = Path('.') !ls {PATH}<load_from_csv>
pd.qcut(train['Cabin_num'],3 ).value_counts()
Titanic - Machine Learning from Disaster
11,317,713
df = pd.read_csv(PATH/'.. /input/train.csv'); df.head()<define_variables>
train['Survived'].groupby(pd.qcut(train['Cabin_num'], 3)).mean()
Titanic - Machine Learning from Disaster
11,317,713
bs = 128<feature_engineering>
def names(train, test): for i in [train, test]: i['Name_Len'] = i['Name'].apply(lambda x: len(x)) i['Name_Title'] = i['Name'].apply(lambda x: x.split(',')[1] ).apply(lambda x: x.split() [0]) del i['Name'] return train, test
Titanic - Machine Learning from Disaster
11,317,713
tfms = get_transforms(do_flip=True, flip_vert=True )<load_from_csv>
def age_impute(train, test): for i in [train, test]: i['Age_Null_Flag'] = i['Age'].apply(lambda x: 1 if pd.isnull(x)else 0) data = train.groupby(['Name_Title', 'Pclass'])['Age'] i['Age'] = data.transform(lambda x: x.fillna(x.mean())) return train, test
Titanic - Machine Learning from Disaster
11,317,713
data =(ImageList.from_csv(csv_name='train.csv', path=PATH/'.. /input', folder='train/train') .split_by_rand_pct() .label_from_df(cols='has_cactus') .transform(tfms) .add_test_folder('test/test') .databunch(bs=bs) .normalize(imagenet_stats))<define_variables>
def fam_size(train, test): for i in [train, test]: i['Fam_Size'] = np.where(( i['SibSp']+i['Parch'])== 0 , 'Alone', np.where(( i['SibSp']+i['Parch'])<= 3,'Small', 'Big')) del i['SibSp'] del i['Parch'] return train, test
Titanic - Machine Learning from Disaster
11,317,713
data.show_batch(rows=3, figsize=(7, 7))<find_best_params>
def cabin(train, test): for i in [train, test]: i['Cabin_Lett'] = i['Cabin'].apply(lambda x: str(x)[0]) del i['Cabin'] return train, test
Titanic - Machine Learning from Disaster
11,317,713
data.classes, data.c<choose_model_class>
def cabin_num(train, test): for i in [train, test]: i['Cabin_num1'] = i['Cabin'].apply(lambda x: str(x ).split(' ')[-1][1:]) i['Cabin_num1'].replace('an', np.NaN, inplace = True) i['Cabin_num1'] = i['Cabin_num1'].apply(lambda x: int(x)if not pd.isnull(x)and x != '' else np.NaN) i['Cabin_num'] = pd.qcut(train['Cabin_num1'],3) train = pd.concat(( train, pd.get_dummies(train['Cabin_num'], prefix = 'Cabin_num')) , axis = 1) test = pd.concat(( test, pd.get_dummies(test['Cabin_num'], prefix = 'Cabin_num')) , axis = 1) del train['Cabin_num'] del test['Cabin_num'] del train['Cabin_num1'] del test['Cabin_num1'] return train, test
Titanic - Machine Learning from Disaster
11,317,713
learn = cnn_learner(data, models.resnet34, metrics=accuracy, path=PATH )<train_model>
train.Embarked.value_counts()
Titanic - Machine Learning from Disaster
11,317,713
learn.fit_one_cycle(4 )<train_model>
train['Embarked'].fillna('S', inplace=True )
Titanic - Machine Learning from Disaster
11,317,713
learn.fit_one_cycle(1 )<predict_on_test>
test[test.Fare.isnull() ]
Titanic - Machine Learning from Disaster
11,317,713
probs, preds = learn.get_preds(ds_type=DatasetType.Test )<prepare_x_and_y>
train[(train.Pclass==3)&(train.Age>50)].Fare.median()
Titanic - Machine Learning from Disaster
11,317,713
ilst = data.test_ds.x<define_variables>
test['Fare'].fillna(7.75, inplace=True )
Titanic - Machine Learning from Disaster
11,317,713
fnames = [item.name for item in ilst.items]; fnames[:10]<create_dataframe>
def dummies(train, test, columns = ['Pclass', 'Sex', 'Embarked', 'Ticket_Lett', 'Cabin_Lett', 'Name_Title', 'Fam_Size']): for column in columns: train[column] = train[column].apply(lambda x: str(x)) test[column] = test[column].apply(lambda x: str(x)) good_cols = [column+'_'+i for i in train[column].unique() if i in test[column].unique() ] train = pd.concat(( train, pd.get_dummies(train[column], prefix = column)[good_cols]), axis = 1) test = pd.concat(( test, pd.get_dummies(test[column], prefix = column)[good_cols]), axis = 1) del train[column] del test[column] return train, test
Titanic - Machine Learning from Disaster
11,317,713
test_df = pd.DataFrame({'id': fnames, 'has_cactus': probs[:, 1]}); test_df<save_to_csv>
def drop(train, test, bye = ['PassengerId']): for i in [train, test]: for z in bye: del i[z] return train, test
Titanic - Machine Learning from Disaster
11,317,713
test_df.to_csv('submission.csv', index=None )<set_options>
train = pd.read_csv('.. /input/titanic/train.csv') test = pd.read_csv('.. /input/titanic/test.csv') train, test = names(train, test) train, test = age_impute(train, test) train, test = cabin_num(train, test) train, test = cabin(train, test) train, test = fam_size(train, test) train, test = ticket_grouped(train, test) train['Embarked'].fillna('S', inplace=True) test['Fare'].fillna(7.75, inplace=True) train, test = dummies(train, test, columns = ['Pclass', 'Sex', 'Embarked', 'Ticket_Lett', 'Cabin_Lett', 'Name_Title', 'Fam_Size']) train, test = drop(train, test )
Titanic - Machine Learning from Disaster
11,317,713
%matplotlib inline train_on_gpu = True <install_modules>
rf = RandomForestClassifier(criterion='gini', n_estimators=700, min_samples_split=10, min_samples_leaf=1, max_features='auto', oob_score=True, random_state=1, n_jobs=-1) rf.fit(train.iloc[:, 1:], train.iloc[:, 0]) print("%.4f" % rf.oob_score_ )
Titanic - Machine Learning from Disaster
11,317,713
!pip install albumentations > /dev/null 2>&1 !pip install pretrainedmodels > /dev/null 2>&1 !pip install kekas > /dev/null 2>&1 !pip install adabound > /dev/null 2>&1<import_modules>
pd.concat(( pd.DataFrame(train.iloc[:, 1:].columns, columns = ['variable']), pd.DataFrame(rf.feature_importances_, columns = ['importance'])) , axis = 1 ).sort_values(by='importance', ascending = False)[:20]
Titanic - Machine Learning from Disaster
11,317,713
import albumentations from albumentations import torch as AT import pretrainedmodels import adabound from kekas import Keker, DataOwner, DataKek from kekas.transformations import Transformer, to_torch, normalize from kekas.metrics import accuracy from kekas.modules import Flatten, AdaptiveConcatPool2d from kekas.callbacks import Callback, Callbacks, DebuggerCallback from kekas.utils import DotDict<load_from_csv>
predictions = rf.predict(test) predictions = pd.DataFrame(predictions, columns=['Survived']) test = pd.read_csv(".. /input/titanic/test.csv") predictions = pd.concat(( test.iloc[:, 0], predictions), axis = 1) predictions.to_csv('submission.csv', sep=",", index = False )
Titanic - Machine Learning from Disaster
4,286,398
labels = pd.read_csv('.. /input/train.csv') fig = plt.figure(figsize=(25, 8)) train_imgs = os.listdir(".. /input/train/train") for idx, img in enumerate(np.random.choice(train_imgs, 20)) : ax = fig.add_subplot(4, 20//4, idx+1, xticks=[], yticks=[]) im = Image.open(".. /input/train/train/" + img) plt.imshow(im) lab = labels.loc[labels['id'] == img, 'has_cactus'].values[0] ax.set_title(f'Label: {lab}' )<create_dataframe>
sns.set_palette('viridis') sns.set_style('whitegrid') %matplotlib inline np.random.seed(42 )
Titanic - Machine Learning from Disaster
4,286,398
test_img = os.listdir('.. /input/test/test') test_df = pd.DataFrame(test_img, columns=['id']) test_df['has_cactus'] = -1 test_df['data_type'] = 'test' labels['has_cactus'] = labels['has_cactus'].astype(int) labels['data_type'] = 'train' labels.head()<count_values>
train_df = pd.read_csv('.. /input/train.csv') test_df = pd.read_csv('.. /input/test.csv' )
Titanic - Machine Learning from Disaster
4,286,398
labels.loc[labels['data_type'] == 'train', 'has_cactus'].value_counts()<split>
def get_df_by_group(df, group): df_groupedby = df.groupby(group ).agg({'PassengerId':'count', 'Survived': 'sum'} ).rename(columns={'PassengerId': 'NumPassengers'}) df_groupedby['Rate'] = df_groupedby['Survived'] / df_groupedby['NumPassengers'] return df_groupedby df = train_df.copy()
Titanic - Machine Learning from Disaster
4,286,398
train, valid = train_test_split(labels, stratify=labels.has_cactus, test_size=0.2 )<load_from_csv>
train_groupby_sex = get_df_by_group(df, ['Sex']) train_groupby_sex
Titanic - Machine Learning from Disaster
4,286,398
def reader_fn(i, row): image = cv2.imread(f".. /input/{row['data_type']}/{row['data_type']}/{row['id']}")[:,:,::-1] label = torch.Tensor([row["has_cactus"]]) return {"image": image, "label": label}<concatenate>
train_groupby_pclass = get_df_by_group(df, ['Pclass']) train_groupby_pclass
Titanic - Machine Learning from Disaster
4,286,398
def augs(p=0.5): return albumentations.Compose([ albumentations.HorizontalFlip() , albumentations.VerticalFlip() , albumentations.RandomBrightness() , ], p=p )<categorify>
df['Embarked'].value_counts()
Titanic - Machine Learning from Disaster
4,286,398
def get_transforms(dataset_key, size, p): PRE_TFMS = Transformer(dataset_key, lambda x: cv2.resize(x,(size, size))) AUGS = Transformer(dataset_key, lambda x: augs()(image=x)["image"]) NRM_TFMS = transforms.Compose([ Transformer(dataset_key, to_torch()), Transformer(dataset_key, normalize()) ]) train_tfms = transforms.Compose([PRE_TFMS, AUGS, NRM_TFMS]) val_tfms = transforms.Compose([PRE_TFMS, NRM_TFMS]) return train_tfms, val_tfms<feature_engineering>
def fill_embarked_nan(df): return df['Embarked'].fillna(value='S') df['Embarked'] = fill_embarked_nan(df) df['Embarked'].isnull().any()
Titanic - Machine Learning from Disaster
4,286,398
train_tfms, val_tfms = get_transforms("image", 32, 0.5 )<load_pretrained>
class_median_age_series = df.groupby(['Pclass'])['Age'].median() class_median_age_series
Titanic - Machine Learning from Disaster
4,286,398
train_dk = DataKek(df=train, reader_fn=reader_fn, transforms=train_tfms) val_dk = DataKek(df=valid, reader_fn=reader_fn, transforms=val_tfms) batch_size = 64 workers = 0 train_dl = DataLoader(train_dk, batch_size=batch_size, num_workers=workers, shuffle=True, drop_last=True) val_dl = DataLoader(val_dk, batch_size=batch_size, num_workers=workers, shuffle=False )<load_pretrained>
def fill_age_nan(df): return df[['Age', 'Pclass']].apply(lambda x: class_median_age_series.get(x['Pclass'])if(pd.isnull(x['Age'])) else x['Age'], axis=1) df['Age'] = fill_age_nan(df )
Titanic - Machine Learning from Disaster
4,286,398
test_dk = DataKek(df=test_df, reader_fn=reader_fn, transforms=val_tfms) test_dl = DataLoader(test_dk, batch_size=batch_size, num_workers=workers, shuffle=False )<init_hyperparams>
def ageclass_by_age(age): if age < 10: return '< 10' elif(age >= 10 and age < 20): return '>= 10 and < 20' elif(age >= 20 and age < 35): return '>= 20 and < 35' elif(age >= 35 and age < 50): return '>= 35 and < 50' elif(age >= 50 and age < 65): return '>= 50 and < 65' else: return '> 65' def convert_age_to_ageclass(df): return df['Age'].apply(ageclass_by_age) df['Age'] = convert_age_to_ageclass(df )
Titanic - Machine Learning from Disaster
4,286,398
class Net(nn.Module): def __init__( self, num_classes: int, p: float = 0.2, pooling_size: int = 2, last_conv_size: int = 1664, arch: str = "densenet169", pretrained: str = "imagenet")-> None: super().__init__() net = pretrainedmodels.__dict__[arch](pretrained=pretrained) modules = list(net.children())[:-1] modules += [nn.Sequential( Flatten() , nn.BatchNorm1d(1664), nn.Dropout(p), nn.Linear(1664, num_classes) )] self.net = nn.Sequential(*modules) def forward(self, x): logits = self.net(x) return logits<create_dataframe>
train_groupby_age = get_df_by_group(df, ['Age']) train_groupby_age
Titanic - Machine Learning from Disaster
4,286,398
dataowner = DataOwner(train_dl, val_dl, None) model = Net(num_classes=1) criterion = nn.BCEWithLogitsLoss()<categorify>
def extract_title_from_name(df): return df['Name'].apply(lambda x:(x[x.index(',')+ 1:x.index('.')] ).strip()) df['Title'] = extract_title_from_name(df) df['Title'].value_counts()
Titanic - Machine Learning from Disaster
4,286,398
def step_fn(model: torch.nn.Module, batch: torch.Tensor)-> torch.Tensor: inp = batch["image"] return model(inp )<compute_test_metric>
def reduce_list_titles(title): if(title in ['Mrs', 'Miss', 'Master', 'Mr', 'Dr']): title = title elif(title in ['Ms','Mme']): title = 'Mrs' elif(title in ['Mlle', 'Lady']): title = 'Miss' elif(title in ['Don']): title = 'Mr' else: title = 'Other' return title def convert_title_to_reduced_list(df): return df['Title'].apply(reduce_list_titles) df['Title'] = convert_title_to_reduced_list(df )
Titanic - Machine Learning from Disaster
4,286,398
def bce_accuracy(target: torch.Tensor, preds: torch.Tensor, thresh: bool = 0.5)-> float: target = target.cpu().detach().numpy() preds =(torch.sigmoid(preds ).cpu().detach().numpy() > thresh ).astype(int) return accuracy_score(target, preds) def roc_auc(target: torch.Tensor, preds: torch.Tensor)-> float: target = target.cpu().detach().numpy() preds = torch.sigmoid(preds ).cpu().detach().numpy() return roc_auc_score(target, preds )<choose_model_class>
train_groupby_title = get_df_by_group(df, ['Title']) train_groupby_title.sort_values(by='Rate', ascending=False )
Titanic - Machine Learning from Disaster
4,286,398
keker = Keker(model=model, dataowner=dataowner, criterion=criterion, step_fn=step_fn, target_key="label", metrics={"acc": bce_accuracy, 'auc': roc_auc}, opt=torch.optim.SGD, opt_params={"momentum": 0.99} )<load_pretrained>
def convert_family_size_cat(df): family_members=df['Parch'] + df['SibSp'] if(family_members == 0): return 'NO_FAMILY' if(family_members <= 3 and family_members >=0): return 'SMALL_FAMILY' else: return 'BIG_FAMILY' def convert_parch_and_sibsp_to_family_cat(df): return df.apply(convert_family_size_cat, axis=1) df['Family'] = convert_parch_and_sibsp_to_family_cat(df )
Titanic - Machine Learning from Disaster
4,286,398
keker.unfreeze(model_attr="net") layer_num = -1 keker.freeze_to(layer_num, model_attr="net" )<find_best_params>
train_groupby_family = get_df_by_group(df, ['Family']) train_groupby_family.sort_values(by='Family', ascending=False )
Titanic - Machine Learning from Disaster
4,286,398
keker.kek_one_cycle(max_lr=1e-2, cycle_len=5, momentum_range=(0.95, 0.85), div_factor=25, increase_fraction=0.3, logdir='train_logs') keker.plot_kek('train_logs' )<find_best_params>
def fill_cabin_nan(df): return df['Cabin'].apply(lambda x: str(x)[0] if(pd.notnull(x)) else str('O')) df['Cabin'] = fill_cabin_nan(df )
Titanic - Machine Learning from Disaster
4,286,398
keker.kek_one_cycle(max_lr=1e-3, cycle_len=5, momentum_range=(0.95, 0.85), div_factor=25, increase_fraction=0.2, logdir='train_logs1') keker.plot_kek('train_logs1' )<predict_on_test>
train_groupby_cabin = get_df_by_group(df, ['Cabin']) train_groupby_cabin.sort_values(by='Rate', ascending=False )
Titanic - Machine Learning from Disaster
4,286,398
preds = keker.predict_loader(loader=test_dl )<categorify>
grouped_df = df.groupby(['Sex', 'Age', 'Pclass', 'Family', 'Title'] ).agg({'PassengerId':'count', 'Survived': 'sum'} ).rename(columns={'PassengerId': 'NumPassengers'}) grouped_df['Rate'] = grouped_df['Survived'] / grouped_df['NumPassengers'] grouped_df = grouped_df.reset_index() grouped_df.head(10 )
Titanic - Machine Learning from Disaster
4,286,398
<save_to_csv>
def convert_to_categories_and_fill_nan(X): df = pd.DataFrame(X, columns=cat_attributes) df['Title'] = extract_title_from_name(df) df['Title'] = convert_title_to_reduced_list(df) df['Embarked'] = fill_embarked_nan(df) df['Age'] = fill_age_nan(df) df['Age'] = convert_age_to_ageclass(df) df['Family'] = convert_parch_and_sibsp_to_family_cat(df) df['Cabin'] = fill_cabin_nan(df) df.drop(['Name', 'SibSp', 'Parch'], axis=1, inplace=True) return df.values
Titanic - Machine Learning from Disaster
4,286,398
test_preds = pd.DataFrame({'imgs': test_df.id.values, 'preds': preds.reshape(-1,)}) test_preds.columns = ['id', 'has_cactus'] test_preds.to_csv('sub.csv', index=False) test_preds.head()<load_from_csv>
cat_attributes = ['Pclass', 'Age', 'Sex', 'Embarked', 'Cabin', 'Name', 'SibSp', 'Parch'] num_attributes = ['Fare']
Titanic - Machine Learning from Disaster
4,286,398
data_path = '.. /input/' df_train = pd.read_csv(data_path + 'train.csv') df_test = pd.read_csv(data_path + 'test.csv') df_train = df_train.sample(n=5000000) print(df_train.shape) print(df_test.shape )<count_missing_values>
num_pipeline = Pipeline([ ('imputer', SimpleImputer(strategy='most_frequent')) , ('std_scaler', StandardScaler()) ]) cat_pipeline = Pipeline([ ('convert_to_categories_and_fill_nan', FunctionTransformer(convert_to_categories_and_fill_nan, validate=False)) , ('cat_encoder', OneHotEncoder()) ] )
Titanic - Machine Learning from Disaster
4,286,398
df_train.isnull().sum()<count_missing_values>
full_pipeline = ColumnTransformer([ ('num', num_pipeline, num_attributes), ('cat', cat_pipeline, cat_attributes) ] )
Titanic - Machine Learning from Disaster
4,286,398
df_train = df_train.dropna() df_train.isnull().sum()<count_missing_values>
X_train = train_df[['Fare','Sex', 'Pclass', 'Age', 'SibSp', 'Parch', 'Embarked', 'Cabin', 'Name']].copy() X_test = test_df[['Fare','Sex', 'Pclass', 'Age', 'SibSp', 'Parch','Embarked', 'Cabin', 'Name']].copy() y_train = train_df['Survived'] X_train.info()
Titanic - Machine Learning from Disaster
4,286,398
df_test.isnull().sum()<filter>
X_concat = pd.concat([X_train, X_test]) X_concat_prepared = full_pipeline.fit_transform(X_concat) X_train_prepared = X_concat_prepared[:891] X_test_prepared = X_concat_prepared[891:] X_train_prepared[:10]
Titanic - Machine Learning from Disaster
4,286,398
df_train = df_train.loc[(df_train['fare_amount'] > 0)&(df_train['fare_amount'] < 200)] df_train = df_train.loc[(df_train['pickup_longitude'] > -300)&(df_train['pickup_longitude'] < 300)] df_train = df_train.loc[(df_train['pickup_latitude'] > -300)&(df_train['pickup_latitude'] < 300)] df_train = df_train.loc[(df_train['dropoff_longitude'] > -300)&(df_train['dropoff_longitude'] < 300)] df_train = df_train.loc[(df_train['dropoff_latitude'] > -300)&(df_train['dropoff_latitude'] < 300)] df_train = df_train.loc[df_train['passenger_count'] <= 8] df_train.head()<filter>
from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score, cross_val_predict from sklearn.metrics import precision_score, recall_score, confusion_matrix, precision_score, recall_score, f1_score, classification_report from sklearn.metrics import precision_recall_curve, roc_curve, roc_auc_score, accuracy_score
Titanic - Machine Learning from Disaster
4,286,398
df_test = df_test.loc[(df_test['pickup_longitude'] > -300)&(df_test['pickup_longitude'] < 300)] df_test = df_test.loc[(df_test['pickup_latitude'] > -300)&(df_test['pickup_latitude'] < 300)] df_test = df_test.loc[(df_test['dropoff_longitude'] > -300)&(df_test['dropoff_longitude'] < 300)] df_test = df_test.loc[(df_test['dropoff_latitude'] > -300)&(df_test['dropoff_latitude'] < 300)] df_test = df_test.loc[df_test['passenger_count'] <= 8] df_test.head()<drop_column>
logistic_reg = LogisticRegression(solver='lbfgs',max_iter=1000,random_state=101) y_logistic_score = cross_val_score(logistic_reg, X_train_prepared, y_train, cv=10, scoring='accuracy') y_logistic_score.mean()
Titanic - Machine Learning from Disaster
4,286,398
df_train = df_train.drop(['index'], axis=1) df_train.head()<prepare_x_and_y>
y_logistic_pred = cross_val_predict(logistic_reg, X_train_prepared, y_train, cv=10 )
Titanic - Machine Learning from Disaster
4,286,398
ids = df_test['key'] train_Y = df_train['fare_amount'] df_train = df_train.drop(['fare_amount'], axis=1) print(df_train.shape) print(df_test.shape )<data_type_conversions>
confusion_matrix(y_train, y_logistic_pred )
Titanic - Machine Learning from Disaster
4,286,398
df_train['pickup_datetime'] = df_train['pickup_datetime'].apply(lambda x: datetime.datetime.strptime(x, '%Y-%m-%d %H:%M:%S UTC')) df_train['pickup_year'] = df_train['pickup_datetime'].apply(lambda x: datetime.datetime.strftime(x, '%Y')).astype('int64') df_train['pickup_month'] = df_train['pickup_datetime'].apply(lambda x: datetime.datetime.strftime(x, '%m')).astype('int64') df_train['pickup_day'] = df_train['pickup_datetime'].apply(lambda x: datetime.datetime.strftime(x, '%d')).astype('int64') df_train['pickup_hour'] = df_train['pickup_datetime'].apply(lambda x: datetime.datetime.strftime(x, '%H')).astype('int64') df_train['pickup_minute'] = df_train['pickup_datetime'].apply(lambda x: datetime.datetime.strftime(x, '%M')).astype('int64') df_train['pickup_second'] = df_train['pickup_datetime'].apply(lambda x: datetime.datetime.strftime(x, '%S')).astype('int64') df_test['pickup_datetime'] = df_test['pickup_datetime'].apply(lambda x: datetime.datetime.strptime(x, '%Y-%m-%d %H:%M:%S UTC')) df_test['pickup_year'] = df_test['pickup_datetime'].apply(lambda x: datetime.datetime.strftime(x, '%Y')).astype('int64') df_test['pickup_month'] = df_test['pickup_datetime'].apply(lambda x: datetime.datetime.strftime(x, '%m')).astype('int64') df_test['pickup_day'] = df_test['pickup_datetime'].apply(lambda x: datetime.datetime.strftime(x, '%d')).astype('int64') df_test['pickup_hour'] = df_test['pickup_datetime'].apply(lambda x: datetime.datetime.strftime(x, '%H')).astype('int64') df_test['pickup_minute'] = df_test['pickup_datetime'].apply(lambda x: datetime.datetime.strftime(x, '%M')).astype('int64') df_test['pickup_second'] = df_test['pickup_datetime'].apply(lambda x: datetime.datetime.strftime(x, '%S')).astype('int64' )<drop_column>
precision_score(y_train, y_logistic_pred )
Titanic - Machine Learning from Disaster
4,286,398
df_train = df_train.drop(['pickup_datetime'], axis=1) df_test = df_test.drop(['pickup_datetime'], axis=1 )<feature_engineering>
recall_score(y_train, y_logistic_pred )
Titanic - Machine Learning from Disaster
4,286,398
df_train['longitude_diff'] = df_train['dropoff_longitude'] - df_train['pickup_longitude'] df_train['latitude_diff'] = df_train['dropoff_latitude'] - df_train['pickup_latitude'] df_train['distance'] =(( df_train['longitude_diff']**2)+(df_train['latitude_diff']**2)) **0.5 df_test['longitude_diff'] = df_test['dropoff_longitude'] - df_test['pickup_longitude'] df_test['latitude_diff'] = df_test['dropoff_latitude'] - df_test['pickup_latitude'] df_test['distance'] =(( df_test['longitude_diff']**2)+(df_test['latitude_diff']**2)) **0.5<drop_column>
f1_score(y_train, y_logistic_pred )
Titanic - Machine Learning from Disaster
4,286,398
df_train = df_train.drop(['key'], axis=1) df_test = df_test.drop(['key'], axis=1 )<train_model>
print(classification_report(y_train,y_logistic_pred))
Titanic - Machine Learning from Disaster
4,286,398
x_train,x_test,y_train,y_test = train_test_split(df_train, train_Y, test_size=0.2, random_state=0) def xgbmodel(x_train,x_test,y_train,y_test): matrix_train = xgb.DMatrix(x_train, label=y_train) matrix_test = xgb.DMatrix(x_test, label=y_test) model = xgb.train(params={'objective':'reg:linear', 'eval_metric':'rmse'}, dtrain=matrix_train, num_boost_round=200, early_stopping_rounds=20, evals=[(matrix_test,'test')]) return model myxgbmodel = xgbmodel(x_train,x_test,y_train,y_test) pred = myxgbmodel.predict(xgb.DMatrix(df_test), ntree_limit=myxgbmodel.best_ntree_limit )<save_to_csv>
y_decision_function = cross_val_predict(logistic_reg, X_train_prepared, y_train, cv=10, method='decision_function' )
Titanic - Machine Learning from Disaster
4,286,398
submission = pd.DataFrame({'key':ids, 'fare_amount':pred}) submission.to_csv('submission.csv', index=False )<load_from_csv>
threshold = 2 print(confusion_matrix(y_train, y_decision_function > threshold)) print(precision_score(y_train, y_decision_function > threshold)) print(recall_score(y_train, y_decision_function > threshold))
Titanic - Machine Learning from Disaster
4,286,398
nyc_weather = pd.read_csv('.. /input/nyc-weather/nyc_weather.csv') weather_cols = ['DATE','AWND','PRCP','SNOW','TMAX','TMIN'] nyc_weather = nyc_weather[weather_cols].copy() nyc_weather['DATE'] = pd.to_datetime(nyc_weather['DATE'], utc=True, format='%m/%d/%Y') nyc_weather.head() <load_from_csv>
threshold = -2 print(confusion_matrix(y_train, y_decision_function > threshold)) print(precision_score(y_train, y_decision_function > threshold)) print(recall_score(y_train, y_decision_function > threshold))
Titanic - Machine Learning from Disaster
4,286,398
holidays = pd.read_csv('.. /input/us-bank-holidays-20092018/US Bank Holidays 2012-2018.csv') holidays['Date'] = pd.to_datetime(holidays['Date'], utc=True, format='%m/%d/%y') holidays.head(12 )<load_from_csv>
precisions, recalls, thresholds = precision_recall_curve(y_train, y_decision_function )
Titanic - Machine Learning from Disaster
4,286,398
%%time n = sum(1 for line in open('.. /input/new-york-city-taxi-fare-prediction/train.csv')) - 1 s = 10000000 skip = sorted(random.sample(range(1,n+1),n-s)) train_full = pd.read_csv('.. /input/new-york-city-taxi-fare-prediction/train.csv', skiprows=skip) test = pd.read_csv('.. /input/new-york-city-taxi-fare-prediction/test.csv') test_id = test.key.values <randomize_order>
roc_auc_score(y_train, y_decision_function )
Titanic - Machine Learning from Disaster
4,286,398
train_one = train_full.iloc[:2500000,:] train_two = train_full.iloc[2500000:5000000,:] train_three = train_full.iloc[5000000:7500000,:] train_four = train_full.iloc[7500000:,:] train_one.head()<data_type_conversions>
param_grid = { 'penalty': ['l1', 'l2'], 'C': [0.01,0.1,0.9,0.95] } logistic_reg = LogisticRegression() grid_search = GridSearchCV(logistic_reg, param_grid, cv=5,scoring='accuracy') grid_search.fit(X_train_prepared, y_train )
Titanic - Machine Learning from Disaster
4,286,398
%%time train_sample = train_one.dropna() train_sample['pickup_datetime'] = train_sample.pickup_datetime.str.slice(0, 16) test['pickup_datetime'] = test.pickup_datetime.str.slice(0, 16) train_sample['pickup_datetime'] = pd.to_datetime(train_sample.pickup_datetime, utc=True, format='%Y-%m-%d %H:%M') test['pickup_datetime'] = pd.to_datetime(test.pickup_datetime, utc=True,format='%Y-%m-%d %H:%M') train_sample.drop(labels='key', axis=1, inplace=True) test.drop(labels='key', axis=1, inplace=True) train_sample.loc[:,'passenger_count'] = train_sample.passenger_count.astype(dtype = 'uint8') train_sample['pickup_longitude'] = train_sample.pickup_longitude.astype(dtype = 'float32') train_sample['pickup_latitude'] = train_sample.pickup_latitude.astype(dtype = 'float32') train_sample['dropoff_longitude'] = train_sample.dropoff_longitude.astype(dtype = 'float32') train_sample['dropoff_latitude'] = train_sample.dropoff_latitude.astype(dtype = 'float32') train_sample['fare_amount'] = train_sample.fare_amount.astype(dtype = 'float32') test['pickup_longitude'] = test.pickup_longitude.astype(dtype = 'float32') test['pickup_latitude'] = test.pickup_latitude.astype(dtype = 'float32') test['dropoff_longitude'] = test.dropoff_longitude.astype(dtype = 'float32') test['dropoff_latitude'] = test.dropoff_latitude.astype(dtype = 'float32') train_sample = train_sample.loc[train_sample.pickup_longitude.between(test.pickup_longitude.min() , test.pickup_longitude.max())] train_sample = train_sample.loc[train_sample.pickup_latitude.between(test.pickup_latitude.min() , test.pickup_latitude.max())] train_sample = train_sample.loc[train_sample.dropoff_longitude.between(test.dropoff_longitude.min() , test.dropoff_longitude.max())] train_sample = train_sample.loc[train_sample.dropoff_latitude.between(test.dropoff_latitude.min() , test.dropoff_latitude.max())] train_sample['hour'] = train_sample['pickup_datetime'].apply(lambda time: time.hour) train_sample['month'] = train_sample['pickup_datetime'].apply(lambda time: time.month) train_sample['day_of_week'] = train_sample['pickup_datetime'].apply(lambda time: time.dayofweek) train_sample['year'] = train_sample['pickup_datetime'].apply(lambda t: t.year) test['hour'] = test['pickup_datetime'].apply(lambda time: time.hour) test['month'] = test['pickup_datetime'].apply(lambda time: time.month) test['day_of_week'] = test['pickup_datetime'].apply(lambda time: time.dayofweek) test['year'] = test['pickup_datetime'].apply(lambda t: t.year) train_sample['hour'] = train_sample.hour.astype(dtype = 'uint8') train_sample['month'] = train_sample.month.astype(dtype = 'uint8') train_sample['day_of_week'] = train_sample.day_of_week.astype(dtype = 'uint8') train_sample['year'] = train_sample.year.astype(dtype = 'uint16') test['hour'] = test.hour.astype(dtype = 'uint8') test['month'] = test.month.astype(dtype = 'uint8') test['day_of_week'] = test.day_of_week.astype(dtype = 'uint8') test['year'] = test.year.astype(dtype = 'uint16') train_sample['pickup_day'] = train_sample.pickup_datetime.dt.floor('d') train_sample = train_sample.merge(nyc_weather, how = 'left', left_on ='pickup_day', right_on = 'DATE') train_sample.drop(columns = ['pickup_day','DATE'], axis = 0, inplace = True) test['pickup_day'] = test.pickup_datetime.dt.floor('d') test = test.merge(nyc_weather, how = 'left', left_on ='pickup_day', right_on = 'DATE') test.drop(columns = ['pickup_day','DATE'], axis = 0, inplace = True) train_sample['AWND'] = train_sample.AWND.astype(dtype = 'float16') train_sample['PRCP'] = train_sample.PRCP.astype(dtype = 'float16') train_sample['SNOW'] = train_sample.day_of_week.astype(dtype = 'float16') train_sample['TMAX'] = train_sample.TMAX.astype(dtype = 'float16') train_sample['TMIN'] = train_sample.TMAX.astype(dtype = 'float16') test['AWND'] = test.AWND.astype(dtype = 'float16') test['PRCP'] = test.PRCP.astype(dtype = 'float16') test['SNOW'] = test.day_of_week.astype(dtype = 'float16') test['TMAX'] = test.TMAX.astype(dtype = 'float16') test['TMIN'] = test.TMAX.astype(dtype = 'float16') train_sample['hot_day'] = np.where(train_sample.TMAX >= 30,1,0) train_sample['cold_day'] = np.where(train_sample.TMIN <= 0,1,0) test['hot_day'] = np.where(test.TMAX >= 30,1,0) test['cold_day'] = np.where(test.TMIN <= 0,1,0) train_sample['hot_day'] = train_sample.hot_day.astype(dtype = 'uint8') train_sample['cold_day'] = train_sample.cold_day.astype(dtype = 'uint8') test['hot_day'] = test.hot_day.astype(dtype = 'uint8') test['cold_day'] = test.cold_day.astype(dtype = 'uint8') train_sample['rainy_day'] = np.where(train_sample.PRCP >= 0,1,0) train_sample['snowy_day'] = np.where(train_sample.SNOW <= 0,1,0) test['rainy_day'] = np.where(test.PRCP >= 0,1,0) test['snowy_day'] = np.where(test.SNOW <= 0,1,0) train_sample['rainy_day'] = train_sample.rainy_day.astype(dtype = 'uint8') train_sample['snowy_day'] = train_sample.snowy_day.astype(dtype = 'uint8') test['rainy_day'] = test.rainy_day.astype(dtype = 'uint8') test['snowy_day'] = test.snowy_day.astype(dtype = 'uint8') train_sample['windy_day'] = np.where(train_sample.AWND >= 0,1,0) test['windy_day'] = np.where(test.AWND >= 0,1,0) train_sample['windy_day'] = train_sample.windy_day.astype(dtype = 'uint8') test['windy_day'] = test.windy_day.astype(dtype = 'uint8') def degree_to_radion(degree): return degree*(np.pi/180) def calculate_distance(pickup_latitude, pickup_longitude, dropoff_latitude, dropoff_longitude): from_lat = degree_to_radion(pickup_latitude) from_long = degree_to_radion(pickup_longitude) to_lat = degree_to_radion(dropoff_latitude) to_long = degree_to_radion(dropoff_longitude) radius = 6371.01 lat_diff = to_lat - from_lat long_diff = to_long - from_long a = np.sin(lat_diff / 2)**2 + np.cos(degree_to_radion(from_lat)) * np.cos(degree_to_radion(to_lat)) * np.sin(long_diff / 2)**2 c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a)) return radius * c train_sample['distance'] = calculate_distance(train_sample.pickup_latitude, train_sample.pickup_longitude, train_sample.dropoff_latitude, train_sample.dropoff_longitude) test['distance'] = calculate_distance(test.pickup_latitude, test.pickup_longitude, test.dropoff_latitude, test.dropoff_longitude) train_sample['distance'] = train_sample.distance.astype(dtype = 'float32') test['distance'] = test.distance.astype(dtype = 'float32') train_sample['day_hour'] = train_sample.day_of_week.astype(str)+ "_" + train_sample.hour.astype(str) train_sample['day_hour'] = train_sample['day_hour'].astype('category') test['day_hour'] = test.day_of_week.astype(str)+ test.hour.astype(str) test['day_hour'] = test['day_hour'].astype('category') train_sample = train_sample[train_sample.fare_amount > 0] train_sample['pickup_day'] = train_sample.pickup_datetime.dt.floor('d') train_sample = train_sample.merge(holidays, left_on = 'pickup_day', right_on = 'Date', how = 'left') train_sample['Holiday'] =train_sample.Holiday.fillna('None') le = LabelEncoder() train_sample['holiday'] = le.fit_transform(train_sample.Holiday.values) train_sample.drop(['Holiday','Date','pickup_day'], axis = 1, inplace = True) test['pickup_day'] = test.pickup_datetime.dt.floor('d') test = test.merge(holidays, left_on = 'pickup_day', right_on = 'Date', how = 'left') test['Holiday'] =test.Holiday.fillna('None') test['holiday'] = le.fit_transform(test.Holiday.values) test.drop(['Holiday','Date','pickup_day'], axis = 1, inplace = True) train_sample['holiday'] = train_sample.holiday.astype(dtype = 'uint8') test['holiday'] = test.holiday.astype(dtype = 'uint8') train_one = train_sample.copy() del train_sample train_one.head()<data_type_conversions>
print(grid_search.best_params_) print(grid_search.best_score_ )
Titanic - Machine Learning from Disaster
4,286,398
%%time train_sample = train_two.dropna() train_sample['pickup_datetime'] = train_sample.pickup_datetime.str.slice(0, 16) train_sample['pickup_datetime'] = pd.to_datetime(train_sample.pickup_datetime, utc=True, format='%Y-%m-%d %H:%M') train_sample.drop(labels='key', axis=1, inplace=True) train_sample.loc[:,'passenger_count'] = train_sample.passenger_count.astype(dtype = 'uint8') train_sample['pickup_longitude'] = train_sample.pickup_longitude.astype(dtype = 'float32') train_sample['pickup_latitude'] = train_sample.pickup_latitude.astype(dtype = 'float32') train_sample['dropoff_longitude'] = train_sample.dropoff_longitude.astype(dtype = 'float32') train_sample['dropoff_latitude'] = train_sample.dropoff_latitude.astype(dtype = 'float32') train_sample['fare_amount'] = train_sample.fare_amount.astype(dtype = 'float32') train_sample = train_sample.loc[train_sample.pickup_longitude.between(test.pickup_longitude.min() , test.pickup_longitude.max())] train_sample = train_sample.loc[train_sample.pickup_latitude.between(test.pickup_latitude.min() , test.pickup_latitude.max())] train_sample = train_sample.loc[train_sample.dropoff_longitude.between(test.dropoff_longitude.min() , test.dropoff_longitude.max())] train_sample = train_sample.loc[train_sample.dropoff_latitude.between(test.dropoff_latitude.min() , test.dropoff_latitude.max())] train_sample['hour'] = train_sample['pickup_datetime'].apply(lambda time: time.hour) train_sample['month'] = train_sample['pickup_datetime'].apply(lambda time: time.month) train_sample['day_of_week'] = train_sample['pickup_datetime'].apply(lambda time: time.dayofweek) train_sample['year'] = train_sample['pickup_datetime'].apply(lambda t: t.year) train_sample['hour'] = train_sample.hour.astype(dtype = 'uint8') train_sample['month'] = train_sample.month.astype(dtype = 'uint8') train_sample['day_of_week'] = train_sample.day_of_week.astype(dtype = 'uint8') train_sample['year'] = train_sample.year.astype(dtype = 'uint16') train_sample['pickup_day'] = train_sample.pickup_datetime.dt.floor('d') train_sample = train_sample.merge(nyc_weather, how = 'left', left_on ='pickup_day', right_on = 'DATE') train_sample.drop(columns = ['pickup_day','DATE'], axis = 0, inplace = True) train_sample['AWND'] = train_sample.AWND.astype(dtype = 'float16') train_sample['PRCP'] = train_sample.PRCP.astype(dtype = 'float16') train_sample['SNOW'] = train_sample.day_of_week.astype(dtype = 'float16') train_sample['TMAX'] = train_sample.TMAX.astype(dtype = 'float16') train_sample['TMIN'] = train_sample.TMAX.astype(dtype = 'float16') train_sample['hot_day'] = np.where(train_sample.TMAX >= 30,1,0) train_sample['cold_day'] = np.where(train_sample.TMIN <= 0,1,0) train_sample['hot_day'] = train_sample.hot_day.astype(dtype = 'uint8') train_sample['cold_day'] = train_sample.cold_day.astype(dtype = 'uint8') train_sample['rainy_day'] = np.where(train_sample.PRCP >= 0,1,0) train_sample['snowy_day'] = np.where(train_sample.SNOW <= 0,1,0) train_sample['rainy_day'] = train_sample.rainy_day.astype(dtype = 'uint8') train_sample['snowy_day'] = train_sample.snowy_day.astype(dtype = 'uint8') train_sample['windy_day'] = np.where(train_sample.AWND >= 0,1,0) train_sample['windy_day'] = train_sample.windy_day.astype(dtype = 'uint8') train_sample['distance'] = calculate_distance(train_sample.pickup_latitude, train_sample.pickup_longitude, train_sample.dropoff_latitude, train_sample.dropoff_longitude) train_sample['distance'] = train_sample.distance.astype(dtype = 'float32') train_sample['day_hour'] = train_sample.day_of_week.astype(str)+ "_" + train_sample.hour.astype(str) train_sample['day_hour'] = train_sample['day_hour'].astype('category') train_sample = train_sample[train_sample.fare_amount > 0] train_sample['pickup_day'] = train_sample.pickup_datetime.dt.floor('d') train_sample = train_sample.merge(holidays, left_on = 'pickup_day', right_on = 'Date', how = 'left') train_sample['Holiday'] =train_sample.Holiday.fillna('None') le = LabelEncoder() train_sample['holiday'] = le.fit_transform(train_sample.Holiday.values) train_sample.drop(['Holiday','Date','pickup_day'], axis = 1, inplace = True) train_sample['holiday'] = train_sample.holiday.astype(dtype = 'uint8') train_two = train_sample.copy() del train_sample train_two.head()<data_type_conversions>
means = grid_search.cv_results_['mean_test_score'] stds = grid_search.cv_results_['std_test_score'] for mean, std, params in zip(means, stds, grid_search.cv_results_['params']): print("%0.3f(+/-%0.03f)for %r" %(mean, std * 2, params))
Titanic - Machine Learning from Disaster
4,286,398
%%time train_sample = train_three.dropna() train_sample['pickup_datetime'] = train_sample.pickup_datetime.str.slice(0, 16) train_sample['pickup_datetime'] = pd.to_datetime(train_sample.pickup_datetime, utc=True, format='%Y-%m-%d %H:%M') train_sample.drop(labels='key', axis=1, inplace=True) train_sample.loc[:,'passenger_count'] = train_sample.passenger_count.astype(dtype = 'uint8') train_sample['pickup_longitude'] = train_sample.pickup_longitude.astype(dtype = 'float32') train_sample['pickup_latitude'] = train_sample.pickup_latitude.astype(dtype = 'float32') train_sample['dropoff_longitude'] = train_sample.dropoff_longitude.astype(dtype = 'float32') train_sample['dropoff_latitude'] = train_sample.dropoff_latitude.astype(dtype = 'float32') train_sample['fare_amount'] = train_sample.fare_amount.astype(dtype = 'float32') train_sample = train_sample.loc[train_sample.pickup_longitude.between(test.pickup_longitude.min() , test.pickup_longitude.max())] train_sample = train_sample.loc[train_sample.pickup_latitude.between(test.pickup_latitude.min() , test.pickup_latitude.max())] train_sample = train_sample.loc[train_sample.dropoff_longitude.between(test.dropoff_longitude.min() , test.dropoff_longitude.max())] train_sample = train_sample.loc[train_sample.dropoff_latitude.between(test.dropoff_latitude.min() , test.dropoff_latitude.max())] train_sample['hour'] = train_sample['pickup_datetime'].apply(lambda time: time.hour) train_sample['month'] = train_sample['pickup_datetime'].apply(lambda time: time.month) train_sample['day_of_week'] = train_sample['pickup_datetime'].apply(lambda time: time.dayofweek) train_sample['year'] = train_sample['pickup_datetime'].apply(lambda t: t.year) train_sample['hour'] = train_sample.hour.astype(dtype = 'uint8') train_sample['month'] = train_sample.month.astype(dtype = 'uint8') train_sample['day_of_week'] = train_sample.day_of_week.astype(dtype = 'uint8') train_sample['year'] = train_sample.year.astype(dtype = 'uint16') train_sample['pickup_day'] = train_sample.pickup_datetime.dt.floor('d') train_sample = train_sample.merge(nyc_weather, how = 'left', left_on ='pickup_day', right_on = 'DATE') train_sample.drop(columns = ['pickup_day','DATE'], axis = 0, inplace = True) train_sample['AWND'] = train_sample.AWND.astype(dtype = 'float16') train_sample['PRCP'] = train_sample.PRCP.astype(dtype = 'float16') train_sample['SNOW'] = train_sample.day_of_week.astype(dtype = 'float16') train_sample['TMAX'] = train_sample.TMAX.astype(dtype = 'float16') train_sample['TMIN'] = train_sample.TMAX.astype(dtype = 'float16') train_sample['hot_day'] = np.where(train_sample.TMAX >= 30,1,0) train_sample['cold_day'] = np.where(train_sample.TMIN <= 0,1,0) train_sample['hot_day'] = train_sample.hot_day.astype(dtype = 'uint8') train_sample['cold_day'] = train_sample.cold_day.astype(dtype = 'uint8') train_sample['rainy_day'] = np.where(train_sample.PRCP >= 0,1,0) train_sample['snowy_day'] = np.where(train_sample.SNOW <= 0,1,0) train_sample['rainy_day'] = train_sample.rainy_day.astype(dtype = 'uint8') train_sample['snowy_day'] = train_sample.snowy_day.astype(dtype = 'uint8') train_sample['windy_day'] = np.where(train_sample.AWND >= 0,1,0) train_sample['windy_day'] = train_sample.windy_day.astype(dtype = 'uint8') train_sample['distance'] = calculate_distance(train_sample.pickup_latitude, train_sample.pickup_longitude, train_sample.dropoff_latitude, train_sample.dropoff_longitude) train_sample['distance'] = train_sample.distance.astype(dtype = 'float32') train_sample['day_hour'] = train_sample.day_of_week.astype(str)+ "_" + train_sample.hour.astype(str) train_sample['day_hour'] = train_sample['day_hour'].astype('category') train_sample = train_sample[train_sample.fare_amount > 0] train_sample['pickup_day'] = train_sample.pickup_datetime.dt.floor('d') train_sample = train_sample.merge(holidays, left_on = 'pickup_day', right_on = 'Date', how = 'left') train_sample['Holiday'] =train_sample.Holiday.fillna('None') le = LabelEncoder() train_sample['holiday'] = le.fit_transform(train_sample.Holiday.values) train_sample.drop(['Holiday','Date','pickup_day'], axis = 1, inplace = True) train_sample['holiday'] = train_sample.holiday.astype(dtype = 'uint8') train_three = train_sample.copy() del train_sample train_three.head()<data_type_conversions>
param_grid = { 'n_neighbors':[2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18] } grid_search_knn = GridSearchCV(KNeighborsClassifier() , param_grid, cv=5, scoring='accuracy') grid_search_knn.fit(X_train_prepared, y_train )
Titanic - Machine Learning from Disaster
4,286,398
%%time train_sample = train_four.dropna() train_sample['pickup_datetime'] = train_sample.pickup_datetime.str.slice(0, 16) train_sample['pickup_datetime'] = pd.to_datetime(train_sample.pickup_datetime, utc=True, format='%Y-%m-%d %H:%M') train_sample.drop(labels='key', axis=1, inplace=True) train_sample.loc[:,'passenger_count'] = train_sample.passenger_count.astype(dtype = 'uint8') train_sample['pickup_longitude'] = train_sample.pickup_longitude.astype(dtype = 'float32') train_sample['pickup_latitude'] = train_sample.pickup_latitude.astype(dtype = 'float32') train_sample['dropoff_longitude'] = train_sample.dropoff_longitude.astype(dtype = 'float32') train_sample['dropoff_latitude'] = train_sample.dropoff_latitude.astype(dtype = 'float32') train_sample['fare_amount'] = train_sample.fare_amount.astype(dtype = 'float32') train_sample = train_sample.loc[train_sample.pickup_longitude.between(test.pickup_longitude.min() , test.pickup_longitude.max())] train_sample = train_sample.loc[train_sample.pickup_latitude.between(test.pickup_latitude.min() , test.pickup_latitude.max())] train_sample = train_sample.loc[train_sample.dropoff_longitude.between(test.dropoff_longitude.min() , test.dropoff_longitude.max())] train_sample = train_sample.loc[train_sample.dropoff_latitude.between(test.dropoff_latitude.min() , test.dropoff_latitude.max())] train_sample['hour'] = train_sample['pickup_datetime'].apply(lambda time: time.hour) train_sample['month'] = train_sample['pickup_datetime'].apply(lambda time: time.month) train_sample['day_of_week'] = train_sample['pickup_datetime'].apply(lambda time: time.dayofweek) train_sample['year'] = train_sample['pickup_datetime'].apply(lambda t: t.year) train_sample['hour'] = train_sample.hour.astype(dtype = 'uint8') train_sample['month'] = train_sample.month.astype(dtype = 'uint8') train_sample['day_of_week'] = train_sample.day_of_week.astype(dtype = 'uint8') train_sample['year'] = train_sample.year.astype(dtype = 'uint16') train_sample['pickup_day'] = train_sample.pickup_datetime.dt.floor('d') train_sample = train_sample.merge(nyc_weather, how = 'left', left_on ='pickup_day', right_on = 'DATE') train_sample.drop(columns = ['pickup_day','DATE'], axis = 0, inplace = True) train_sample['AWND'] = train_sample.AWND.astype(dtype = 'float16') train_sample['PRCP'] = train_sample.PRCP.astype(dtype = 'float16') train_sample['SNOW'] = train_sample.day_of_week.astype(dtype = 'float16') train_sample['TMAX'] = train_sample.TMAX.astype(dtype = 'float16') train_sample['TMIN'] = train_sample.TMAX.astype(dtype = 'float16') train_sample['hot_day'] = np.where(train_sample.TMAX >= 30,1,0) train_sample['cold_day'] = np.where(train_sample.TMIN <= 0,1,0) train_sample['hot_day'] = train_sample.hot_day.astype(dtype = 'uint8') train_sample['cold_day'] = train_sample.cold_day.astype(dtype = 'uint8') train_sample['rainy_day'] = np.where(train_sample.PRCP >= 0,1,0) train_sample['snowy_day'] = np.where(train_sample.SNOW <= 0,1,0) train_sample['rainy_day'] = train_sample.rainy_day.astype(dtype = 'uint8') train_sample['snowy_day'] = train_sample.snowy_day.astype(dtype = 'uint8') train_sample['windy_day'] = np.where(train_sample.AWND >= 0,1,0) train_sample['windy_day'] = train_sample.windy_day.astype(dtype = 'uint8') train_sample['distance'] = calculate_distance(train_sample.pickup_latitude, train_sample.pickup_longitude, train_sample.dropoff_latitude, train_sample.dropoff_longitude) train_sample['distance'] = train_sample.distance.astype(dtype = 'float32') train_sample['day_hour'] = train_sample.day_of_week.astype(str)+ "_" + train_sample.hour.astype(str) train_sample['day_hour'] = train_sample['day_hour'].astype('category') train_sample = train_sample[train_sample.fare_amount > 0] train_sample['pickup_day'] = train_sample.pickup_datetime.dt.floor('d') train_sample = train_sample.merge(holidays, left_on = 'pickup_day', right_on = 'Date', how = 'left') train_sample['Holiday'] =train_sample.Holiday.fillna('None') le = LabelEncoder() train_sample['holiday'] = le.fit_transform(train_sample.Holiday.values) train_sample.drop(['Holiday','Date','pickup_day'], axis = 1, inplace = True) train_sample['holiday'] = train_sample.holiday.astype(dtype = 'uint8') train_four = train_sample.copy() del train_sample train_four.head()<concatenate>
print(grid_search_knn.best_params_) print(grid_search_knn.best_score_ )
Titanic - Machine Learning from Disaster
4,286,398
train_full_pre = pd.concat([train_one, train_two, train_three,train_four] )<drop_column>
means = grid_search_knn.cv_results_['mean_test_score'] stds = grid_search_knn.cv_results_['std_test_score'] for mean, std, params in zip(means, stds, grid_search_knn.cv_results_['params']): print("%0.3f(+/-%0.03f)for %r" %(mean, std * 2, params))
Titanic - Machine Learning from Disaster
4,286,398
del train_full<concatenate>
param_grid = [ {'kernel':['rbf'], 'C':[0.7,0.8,1], 'gamma':[0.07, 0.08, 0.09]}, {'kernel':['poly'],'C':[0.1,1,10,100], 'gamma':['auto']} ] grid_search_svc = GridSearchCV(SVC() , param_grid, cv=5, scoring='accuracy') grid_search_svc.fit(X_train_prepared, y_train )
Titanic - Machine Learning from Disaster
4,286,398
train_sample = train_full_pre.copy() full_pickups = pd.concat([train_sample[['pickup_longitude','pickup_latitude']],test[['pickup_longitude','pickup_latitude']]], axis = 0) full_pickups.columns = ['x','y'] full_dropoffs = pd.concat([train_sample[['dropoff_longitude','dropoff_latitude']],test[['dropoff_longitude','dropoff_latitude']]], axis = 0) full_dropoffs.columns = ['x','y'] full_locs = pd.concat([full_pickups,full_dropoffs], axis = 0) full_locs = full_locs.groupby(['x','y'] ).count().reset_index() del full_pickups, full_dropoffs, train_sample, train_full_pre full_locs.info() <prepare_x_and_y>
print(grid_search_svc.best_params_) print(grid_search_svc.best_score_ )
Titanic - Machine Learning from Disaster
4,286,398
%%time X_df = full_locs.copy() X_kmeans = full_locs.values del full_locs num_clusters = 200 with open('.. /input/taxi-weather-holidays-kmeans-neighborhoods/kmeans_200_round4.pkl', 'rb')as fid: kmeans = pickle.load(fid) z = kmeans.predict(X_kmeans) centers = kmeans.cluster_centers_ x_centers = [pair[0] for pair in centers] y_centers = [pair[1] for pair in centers] z_centers = np.arange(num_clusters) plt.subplot(1,2,1) plt.scatter(X_df['x'], X_df['y'], c=z) plt.gray() plt.xlabel('Pickup/Dropoff Longitude') plt.ylabel('Pickup/Dropoff Latitude') plt.title('Clusters of NYC locations') plt.subplot(1,2,2) plt.scatter(x_centers, y_centers, c=z_centers) plt.gray() plt.xlabel('Pickup/Dropoff Longitude') plt.ylabel('Pickup/Dropoff Latitude') plt.title('Cluster Centers of NYC locations') plt.show()<predict_on_test>
means = grid_search_svc.cv_results_['mean_test_score'] stds = grid_search_svc.cv_results_['std_test_score'] for mean, std, params in zip(means, stds, grid_search_knn.cv_results_['params']): print("%0.3f(+/-%0.03f)for %r" %(mean, std * 2, params))
Titanic - Machine Learning from Disaster
4,286,398
del X_kmeans, X_df train_one['pickup_neighborhood'] = kmeans.predict(np.column_stack([train_one.pickup_longitude.values,train_one.pickup_latitude.values])) train_one['dropoff_neighborhood'] = kmeans.predict(np.column_stack([train_one.dropoff_longitude.values,train_one.dropoff_latitude.values])) train_one['pickup_neighborhood'] = train_one.pickup_neighborhood.astype(dtype = 'uint8') train_one['dropoff_neighborhood'] = train_one.dropoff_neighborhood.astype(dtype = 'uint8') train_two['pickup_neighborhood'] = kmeans.predict(np.column_stack([train_two.pickup_longitude.values,train_two.pickup_latitude.values])) train_two['dropoff_neighborhood'] = kmeans.predict(np.column_stack([train_two.dropoff_longitude.values,train_two.dropoff_latitude.values])) train_two['pickup_neighborhood'] = train_two.pickup_neighborhood.astype(dtype = 'uint8') train_two['dropoff_neighborhood'] = train_two.dropoff_neighborhood.astype(dtype = 'uint8') train_three['pickup_neighborhood'] = kmeans.predict(np.column_stack([train_three.pickup_longitude.values,train_three.pickup_latitude.values])) train_three['dropoff_neighborhood'] = kmeans.predict(np.column_stack([train_three.dropoff_longitude.values,train_three.dropoff_latitude.values])) train_three['pickup_neighborhood'] = train_three.pickup_neighborhood.astype(dtype = 'uint8') train_three['dropoff_neighborhood'] = train_three.dropoff_neighborhood.astype(dtype = 'uint8') train_four['pickup_neighborhood'] = kmeans.predict(np.column_stack([train_four.pickup_longitude.values,train_four.pickup_latitude.values])) train_four['dropoff_neighborhood'] = kmeans.predict(np.column_stack([train_four.dropoff_longitude.values,train_four.dropoff_latitude.values])) train_four['pickup_neighborhood'] = train_four.pickup_neighborhood.astype(dtype = 'uint8') train_four['dropoff_neighborhood'] = train_four.dropoff_neighborhood.astype(dtype = 'uint8') test['pickup_neighborhood'] = kmeans.predict(np.column_stack([test.pickup_longitude.values, test.pickup_latitude.values])) test['dropoff_neighborhood'] = kmeans.predict(np.column_stack([test.dropoff_longitude.values,test.dropoff_latitude.values])) test['pickup_neighborhood'] = test.pickup_neighborhood.astype(dtype = 'uint8') test['dropoff_neighborhood'] = test.dropoff_neighborhood.astype(dtype = 'uint8') <load_pretrained>
param_grid = { 'max_features':[8,9,10,11,12,13], 'max_leaf_nodes':[7,8,9,10,11], 'min_samples_split':[3,4,5] } grid_search_tree = GridSearchCV(DecisionTreeClassifier() , param_grid, cv=5, scoring='accuracy') grid_search_tree.fit(X_train_prepared, y_train )
Titanic - Machine Learning from Disaster
4,286,398
with open('kmeans_minibatch_200_round4_v2.pkl', 'wb')as fid: pickle.dump(kmeans, fid) <drop_column>
print(grid_search_tree.best_params_) print(grid_search_tree.best_score_ )
Titanic - Machine Learning from Disaster
4,286,398
del KMeans, le_dict, MiniBatchKMeans,StandardScaler, calculate_distance, centers, degree_to_radion, holidays, kmeans,nyc_weather, pickle, z, z_centers, x_centers, y_centers<set_options>
means = grid_search_tree.cv_results_['mean_test_score'] stds = grid_search_tree.cv_results_['std_test_score'] for mean, std, params in zip(means, stds, grid_search_tree.cv_results_['params']): print("%0.3f(+/-%0.03f)for %r" %(mean, std * 2, params))
Titanic - Machine Learning from Disaster
4,286,398
gc.collect() dir()<concatenate>
feature_importance_list = grid_search_tree.best_estimator_.feature_importances_ list_categories = full_pipeline.named_transformers_['cat']['cat_encoder'].categories_ flat_list = [item for sublist in list_categories for item in sublist] attribute_list = num_attributes + flat_list df_attribute_importance = pd.DataFrame({'attribute_name': attribute_list, 'importance': feature_importance_list}) df_attribute_importance.sort_values('importance', ascending=False )
Titanic - Machine Learning from Disaster
4,286,398
train_full = pd.concat([train_one, train_two, train_three, train_four] )<categorify>
ada_boost_clf = AdaBoostClassifier(DecisionTreeClassifier() , random_state=42) param_grid = {"base_estimator__criterion" : ["gini", "entropy"], "base_estimator__splitter" : ["best", "random"], "algorithm" : ["SAMME","SAMME.R"], "n_estimators" :[1,2,3], "learning_rate": [0.0001, 0.001, 0.01, 0.1, 0.2, 0.3,1.5]} grid_search_ada = GridSearchCV(ada_boost_clf, param_grid, cv=5, scoring='accuracy', verbose=1) grid_search_ada.fit(X_train_prepared, y_train )
Titanic - Machine Learning from Disaster
4,286,398
categorical_cols = ['pickup_neighborhood','dropoff_neighborhood','day_hour','month','year','passenger_count','holiday','hot_day','cold_day','rainy_day','snowy_day','windy_day'] numerical_cols = ['distance'] df_cats = train_full[categorical_cols].copy() le_dict = {} for col in categorical_cols: le_dict[col] = LabelEncoder().fit(df_cats[col]) df_cats[col] = le_dict[col].transform(df_cats[col]) X_cats_full = df_cats.values ohe = OneHotEncoder(categories = 'auto', drop = 'first') X_onehot = ohe.fit_transform(X_cats_full) X_nums_full = train_full[numerical_cols].values X_nums_sparse = csr_matrix(X_nums_full) X_full = hstack([X_onehot, X_nums_sparse]) X_full = X_full.tocsr() si = SimpleImputer() X = si.fit_transform(X_full) y = train_full.fare_amount.values del X_onehot, X_nums_sparse, X_full, train_full gc.collect()<categorify>
print(grid_search_ada.best_params_) print(grid_search_ada.best_score_ )
Titanic - Machine Learning from Disaster
4,286,398
df = test.copy() df_cats = df[categorical_cols].copy() le_dict = {} for col in categorical_cols: le_dict[col] = LabelEncoder().fit(df_cats[col]) df_cats[col] = le_dict[col].transform(df_cats[col]) X_cats_full = df_cats.values X_onehot = ohe.transform(X_cats_full) del df_cats, X_cats_full X_nums_full = df[numerical_cols].values X_nums_sparse = csr_matrix(X_nums_full) del df, X_nums_full X_full = hstack([X_onehot, X_nums_sparse]) X_full = X_full.tocsr() si = SimpleImputer() X_public = si.fit_transform(X_full) del X_onehot, X_nums_sparse, X_full <split>
means = grid_search_ada.cv_results_['mean_test_score'] stds = grid_search_ada.cv_results_['std_test_score'] for mean, std, params in zip(means, stds, grid_search_ada.cv_results_['params']): print("%0.3f(+/-%0.03f)for %r" %(mean, std * 2, params))
Titanic - Machine Learning from Disaster
4,286,398
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size =.1) <init_hyperparams>
feature_importance_list = grid_search_ada.best_estimator_.feature_importances_ list_categories = full_pipeline.named_transformers_['cat']['cat_encoder'].categories_ flat_list = [item for sublist in list_categories for item in sublist] attribute_list = num_attributes + flat_list df_attribute_importance = pd.DataFrame({'attribute_name': attribute_list, 'importance': feature_importance_list}) df_attribute_importance.sort_values('importance', ascending=False )
Titanic - Machine Learning from Disaster
4,286,398
%%time params = {'objective': 'regression', 'boosting': 'gbdt', 'metric': 'rmse', 'num_leaves': 50, 'max_depth': 8, 'learning_rate': 0.5, 'bagging_fraction': 0.8, 'feature_fraction': 0.8, 'min_split_gain': 0.02, 'min_child_samples': 10, 'min_child_weight': 0.02, 'lambda_l2': 0.0475, 'verbosity': -1, 'data_random_seed': 17, 'early_stop': 100, 'verbose_eval': 100, 'num_rounds': 100} d_train = lgb.Dataset(X_train, label=y_train) d_test = lgb.Dataset(X_test, label=y_test) watchlist = [d_train, d_test] num_rounds = 100 verbose_eval = 100 early_stop = 100 model_lgb = lgb.train(params, train_set=d_train, num_boost_round=num_rounds, valid_sets=watchlist, verbose_eval=verbose_eval, early_stopping_rounds=early_stop) pred_test_y_lgb = model_lgb.predict(X_test, num_iteration=model_lgb.best_iteration) print("LGB Loss = " + str(sqrt(mean_squared_error(y_test,pred_test_y_lgb)))) <save_to_csv>
gradient_boost_clf = GradientBoostingClassifier(DecisionTreeClassifier() , random_state=42) param_grid = {'loss' : ["deviance"], 'n_estimators' : [300, 400, 500], 'learning_rate': [0.2, 0.15, 0.1], 'max_depth': [3, 4, 6, 8], 'min_samples_leaf': [60, 80, 100], 'max_features': [0.5, 0.3, 0.1] } grid_search_gradient_boost = GridSearchCV(gradient_boost_clf, param_grid, cv=5, scoring='accuracy', verbose=1) grid_search_gradient_boost.fit(X_train_prepared, y_train )
Titanic - Machine Learning from Disaster
4,286,398
lgb_public= model_lgb.predict(X_public, num_iteration=model_lgb.best_iteration) final_pred_public =lgb_public.flatten() test_predictions_lgb = [float(np.asscalar(x)) for x in final_pred_public] test_predictions_lgb = [x if x>0 else 0 for x in test_predictions_lgb] sample = pd.DataFrame({'key': test_id,'fare_amount':test_predictions_lgb}) sample = sample.reindex(['key', 'fare_amount'], axis=1) sample.to_csv('submission_lgb.csv', index=False) sample.head()<set_options>
print(grid_search_gradient_boost.best_params_) print(grid_search_gradient_boost.best_score_ )
Titanic - Machine Learning from Disaster
4,286,398
plt.style.use('dark_background') sns.set_style("darkgrid") <define_variables>
means = grid_search_gradient_boost.cv_results_['mean_test_score'] stds = grid_search_gradient_boost.cv_results_['std_test_score'] for mean, std, params in zip(means, stds, grid_search_gradient_boost.cv_results_['params']): print("%0.3f(+/-%0.03f)for %r" %(mean, std * 2, params))
Titanic - Machine Learning from Disaster
4,286,398
%%time train_path = '.. /input/train.csv' traintypes = {'fare_amount': 'float32', 'pickup_datetime': 'str', 'pickup_longitude': 'float32', 'pickup_latitude': 'float32', 'dropoff_longitude': 'float32', 'dropoff_latitude': 'float32', 'passenger_count': 'uint8'} cols = list(traintypes.keys()) train_df = pd.read_csv(train_path, usecols=cols, dtype=traintypes, nrows=2_000_000 )<data_type_conversions>
param_grid = { 'bootstrap': [True], 'max_depth': [80,120,140], 'max_features':[4,5,6,7,8], 'min_samples_leaf':[2,3,4], 'min_samples_split':[8,10,12,14,16], 'n_estimators': [100,200,300,500,1000] } forest2_clf = RandomForestClassifier() rnd_search = RandomizedSearchCV(forest2_clf, param_distributions=param_grid, n_iter=500, cv=10, scoring='accuracy', random_state=101) rnd_search.fit(X_train_prepared, y_train )
Titanic - Machine Learning from Disaster
4,286,398
%%time train_df.to_feather('nyc_taxi_data_raw.feather' )<load_from_csv>
rnd_search.best_params_
Titanic - Machine Learning from Disaster
4,286,398
df_train = pd.read_feather('nyc_taxi_data_raw.feather') <filter>
rnd_search.best_score_
Titanic - Machine Learning from Disaster
4,286,398
len(df_train[df_train.fare_amount > 0] )<drop_column>
means = rnd_search.cv_results_['mean_test_score'] stds = rnd_search.cv_results_['std_test_score'] for mean, std, params in zip(means, stds, rnd_search.cv_results_['params']): print("%0.3f(+/-%0.03f)for %r" %(mean, std * 2, params))
Titanic - Machine Learning from Disaster
4,286,398
df_train = df_train[df_train.fare_amount>=0]<count_missing_values>
feature_importance_list = rnd_search.best_estimator_.feature_importances_
Titanic - Machine Learning from Disaster
4,286,398
df_train.isnull().sum()<drop_column>
list_categories = full_pipeline.named_transformers_['cat']['cat_encoder'].categories_ flat_list = [item for sublist in list_categories for item in sublist] attribute_list = num_attributes + flat_list df_attribute_importance = pd.DataFrame({'attribute_name': attribute_list, 'importance': feature_importance_list}) df_attribute_importance.sort_values('importance', ascending=False )
Titanic - Machine Learning from Disaster
4,286,398
df_train = df_train.dropna(how = 'any', axis = 'rows') <load_from_csv>
best_estimator = rnd_search.best_estimator_
Titanic - Machine Learning from Disaster
4,286,398
df_test = pd.read_csv('.. /input/test.csv') df_test.head(5 )<data_type_conversions>
y_test_predictions = best_estimator.predict(X_test_prepared )
Titanic - Machine Learning from Disaster
4,286,398
df_train['pickup_datetime'] = pd.to_datetime(df_train['pickup_datetime'],format="%Y-%m-%d %H:%M:%S UTC" )<data_type_conversions>
submission = pd.DataFrame({ "PassengerId": test_df["PassengerId"], "Survived": y_test_predictions }) submission.head()
Titanic - Machine Learning from Disaster
4,286,398
<feature_engineering><EOS>
submission.to_csv('submission5.csv', index=False )
Titanic - Machine Learning from Disaster
4,211,056
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<feature_engineering>
import numpy as np import pandas as pd
Titanic - Machine Learning from Disaster
4,211,056
df_train = add_new_date_time_features(df_train) df_test = add_new_date_time_features(df_test )<feature_engineering>
trainData = pd.read_csv('.. /input/train.csv') testData = pd.read_csv(".. /input/test.csv" )
Titanic - Machine Learning from Disaster
4,211,056
def calculate_abs_different(df): df['abs_diff_longitude'] =(df.dropoff_longitude - df.pickup_longitude ).abs() df['abs_diff_latitude'] =(df.dropoff_latitude - df.pickup_latitude ).abs() calculate_abs_different(df_train) calculate_abs_different(df_test )<feature_engineering>
print(trainData.dtypes.sort_values()) print(testData.dtypes.sort_values() )
Titanic - Machine Learning from Disaster
4,211,056
def convert_different_miles(df): df['abs_diff_longitude'] = df.abs_diff_longitude*50 df['abs_diff_latitude'] = df.abs_diff_latitude*69 convert_different_miles(df_train) convert_different_miles(df_test )<feature_engineering>
trainData.isnull().sum() [trainData.isnull().sum() >0]
Titanic - Machine Learning from Disaster
4,211,056
meas_ang = 0.506 def add_distance(df): df['Euclidean'] =(df.abs_diff_latitude**2 + df.abs_diff_longitude**2)**0.5 df['delta_manh_long'] =(df.Euclidean*np.sin(np.arctan(df.abs_diff_longitude / df.abs_diff_latitude)-meas_ang)).abs() df['delta_manh_lat'] =(df.Euclidean*np.cos(np.arctan(df.abs_diff_longitude / df.abs_diff_latitude)-meas_ang)).abs() df['distance'] = df.delta_manh_long + df.delta_manh_lat df.drop(['abs_diff_longitude', 'abs_diff_latitude','Euclidean', 'delta_manh_long', 'delta_manh_lat'], axis=1, inplace=True) add_distance(df_train) add_distance(df_test )<prepare_x_and_y>
testData.isnull().sum() [testData.isnull().sum() >0]
Titanic - Machine Learning from Disaster