kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
9,770,354
data = data.loc[data['pickup_latitude'].between(40, 42)] data = data.loc[data['pickup_longitude'].between(-75, -72)] data = data.loc[data['dropoff_latitude'].between(40, 42)] data = data.loc[data['dropoff_longitude'].between(-75, -72)] print(f'New number of observations: {data.shape[0]}' )<define_search_space>
warnings.filterwarnings('ignore') train["CabinBool"] =(train["Cabin"].notnull().astype('int')) test["CabinBool"] =(test["Cabin"].notnull().astype('int'))
Titanic - Machine Learning from Disaster
9,770,354
BB_zoom =(-74.1, -73.7, 40.6, 40.85) nyc_map_zoom = plt.imread('https://github.com/WillKoehrsen/Machine-Learning-Projects/blob/master/images/nyc_-74.1_-73.7_40.6_40.85.PNG?raw=true' )<define_variables>
main_features = ['Sex', 'FamilySurvival', 'FareBin', 'Pclass', 'Title'] X_test = test[main_features] X_train = train[main_features] y_train = train['Survived']
Titanic - Machine Learning from Disaster
9,770,354
color_mapping = {fare_bin: palette[i] for i, fare_bin in enumerate(data['fare-bin'].unique())} color_mapping<feature_engineering>
ensemble = [RandomForestClassifier() , svm.NuSVC(probability=True), neighbors.KNeighborsClassifier() ] classifiers_with_names = [] _ = [classifiers_with_names.append(( clf.__class__.__name__, clf)) for clf in ensemble] voting = VotingClassifier(classifiers_with_names, voting='hard') cv_results = cross_validate(voting, X_train, y_train, cv=5) print(cv_results['test_score'].mean()) voting.fit(X_train, y_train) predictions = voting.predict(X_test) output = pd.DataFrame({'PassengerId': test.PassengerId, 'Survived': predictions.astype(int)}) output.to_csv('my_submission.csv', index=False) print("Your submission was successfully saved!" )
Titanic - Machine Learning from Disaster
9,770,354
data['abs_lat_diff'] =(data['dropoff_latitude'] - data['pickup_latitude'] ).abs() data['abs_lon_diff'] =(data['dropoff_longitude'] - data['pickup_longitude'] ).abs()<filter>
Titanic - Machine Learning from Disaster
9,174,426
no_diff = data[(data['abs_lat_diff'] == 0)&(data['abs_lon_diff'] == 0)] no_diff.shape<compute_test_metric>
%matplotlib inline sns.set_style('whitegrid' )
Titanic - Machine Learning from Disaster
9,174,426
def minkowski_distance(x1, x2, y1, y2, p): return(( abs(x2 - x1)** p)+(abs(y2 - y1)) ** p)**(1 / p )<compute_test_metric>
train = pd.read_csv('/kaggle/input/titanic/train.csv' )
Titanic - Machine Learning from Disaster
9,174,426
minkowski_distance(0, 3, 0, 4, 2 )<groupby>
train['Ticket_First'] = train['Ticket'].apply(lambda x:x.replace('.','' ).replace('/','' ).split() [0][:2] )
Titanic - Machine Learning from Disaster
9,174,426
data.groupby('fare-bin')['euclidean'].agg(['mean', 'count'] )<groupby>
ticket_freq = train[['Ticket_First','Survived']].groupby(['Ticket_First'] ).agg([('Nos people', 'count'),('Nos survived', 'sum')]) ticket_freq.columns = ticket_freq.columns.get_level_values(1) ticket_freq = ticket_freq.reset_index(level = [0]) ticket_freq['Survival %'] = round(ticket_freq['Nos survived']*100/ticket_freq['Nos people']) ticket_freq.sort_values(by = ['Nos people'], ascending = False)
Titanic - Machine Learning from Disaster
9,174,426
data.groupby('passenger_count')['fare_amount'].agg(['mean', 'count'] )<feature_engineering>
def Ticket_Grp(col): if col[0] in ticket_freq[ticket_freq['Nos people'] > 10]['Ticket_First'].to_list() : return col[0] else: return 'Others'
Titanic - Machine Learning from Disaster
9,174,426
test = pd.read_csv('.. /input/test.csv', parse_dates = ['pickup_datetime']) test['abs_lat_diff'] =(test['dropoff_latitude'] - test['pickup_latitude'] ).abs() test['abs_lon_diff'] =(test['dropoff_longitude'] - test['pickup_longitude'] ).abs() test_id = list(test.pop('key')) test.describe()<feature_engineering>
train['Ticket_Grp'] = train[['Ticket_First']].apply(Ticket_Grp, axis =1) train['Ticket_Grp'].value_counts()
Titanic - Machine Learning from Disaster
9,174,426
test['manhattan'] = minkowski_distance(test['pickup_longitude'], test['dropoff_longitude'], test['pickup_latitude'], test['dropoff_latitude'], 1) test['euclidean'] = minkowski_distance(test['pickup_longitude'], test['dropoff_longitude'], test['pickup_latitude'], test['dropoff_latitude'], 2 )<compute_test_metric>
train['Salute'] = train['Name'].apply(lambda x:x.split() [1] )
Titanic - Machine Learning from Disaster
9,174,426
R = 6378 def haversine_np(lon1, lat1, lon2, lat2): lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2]) dlon = lon2 - lon1 dlat = lat2 - lat1 a = np.sin(dlat/2.0)**2 + np.cos(lat1)* np.cos(lat2)* np.sin(dlon/2.0)**2 c = 2 * np.arcsin(np.sqrt(a)) km = R * c return km<compute_test_metric>
def Salute_group(col): if col[0] in ['Mr.', 'Miss.', 'Mrs.', 'Master.']: return col[0] else: return 'Others'
Titanic - Machine Learning from Disaster
9,174,426
data['haversine'] = haversine_np(data['pickup_longitude'], data['pickup_latitude'], data['dropoff_longitude'], data['dropoff_latitude']) test['haversine'] = haversine_np(test['pickup_longitude'], test['pickup_latitude'], test['dropoff_longitude'], test['dropoff_latitude'] )<groupby>
train['Salute_Grp'] = train[['Salute']].apply(Salute_group, axis =1 )
Titanic - Machine Learning from Disaster
9,174,426
data.groupby('fare-bin')['haversine'].agg(['mean', 'count'] )<choose_model_class>
PclassXSex_med = train[['Sex','Age','Pclass']].groupby(['Sex','Pclass'] ).median()
Titanic - Machine Learning from Disaster
9,174,426
lr = LinearRegression()<split>
train['Age_PclXSex'] = train[['Age', 'Pclass', 'Sex']].apply(age_PclassSex, axis = 1 )
Titanic - Machine Learning from Disaster
9,174,426
X_train, X_valid, y_train, y_valid = train_test_split(data, np.array(data['fare_amount']), stratify = data['fare-bin'], random_state = RSEED, test_size = 1_000_000 )<train_model>
train.drop(['Age', 'Cabin'], axis =1 , inplace = True )
Titanic - Machine Learning from Disaster
9,174,426
lr.fit(X_train[['abs_lat_diff', 'abs_lon_diff', 'passenger_count']], y_train) print('Intercept', round(lr.intercept_, 4)) print('abs_lat_diff coef: ', round(lr.coef_[0], 4), '\tabs_lon_diff coef:', round(lr.coef_[1], 4), '\tpassenger_count coef:', round(lr.coef_[2], 4))<compute_test_metric>
train.dropna(inplace = True )
Titanic - Machine Learning from Disaster
9,174,426
warnings.filterwarnings('ignore', category = RuntimeWarning) def metrics(train_pred, valid_pred, y_train, y_valid): train_rmse = np.sqrt(mean_squared_error(y_train, train_pred)) valid_rmse = np.sqrt(mean_squared_error(y_valid, valid_pred)) train_ape = abs(( y_train - train_pred)/ y_train) valid_ape = abs(( y_valid - valid_pred)/ y_valid) train_ape[train_ape == np.inf] = 0 train_ape[train_ape == -np.inf] = 0 valid_ape[valid_ape == np.inf] = 0 valid_ape[valid_ape == -np.inf] = 0 train_mape = 100 * np.mean(train_ape) valid_mape = 100 * np.mean(valid_ape) return train_rmse, valid_rmse, train_mape, valid_mape def evaluate(model, features, X_train, X_valid, y_train, y_valid): train_pred = model.predict(X_train[features]) valid_pred = model.predict(X_valid[features]) train_rmse, valid_rmse, train_mape, valid_mape = metrics(train_pred, valid_pred, y_train, y_valid) print(f'Training: rmse = {round(train_rmse, 2)} \t mape = {round(train_mape, 2)}') print(f'Validation: rmse = {round(valid_rmse, 2)} \t mape = {round(valid_mape, 2)}' )<compute_test_metric>
Sex_Dumm = pd.get_dummies(train['Sex'], drop_first = True) Embarked_Dumm = pd.get_dummies(train['Embarked'], drop_first = True) Ticket_Grp = pd.get_dummies(train['Ticket_Grp'], drop_first = True, prefix = 'Ticket') Salute_Group = pd.get_dummies(train['Salute_Grp'], drop_first = True )
Titanic - Machine Learning from Disaster
9,174,426
evaluate(lr, ['abs_lat_diff', 'abs_lon_diff', 'passenger_count'], X_train, X_valid, y_train, y_valid )<compute_train_metric>
train = pd.concat([train, Sex_Dumm, Embarked_Dumm, Ticket_Grp, Salute_Group], axis = 1) train.head()
Titanic - Machine Learning from Disaster
9,174,426
train_mean = y_train.mean() train_preds = [train_mean for _ in range(len(y_train)) ] valid_preds = [train_mean for _ in range(len(y_valid)) ] tr, vr, tm, vm = metrics(train_preds, valid_preds, y_train, y_valid) print(f'Baseline Training: rmse = {round(tr, 2)} \t mape = {round(tm, 2)}') print(f'Baseline Validation: rmse = {round(vr, 2)} \t mape = {round(vm, 2)}' )<save_to_csv>
y = train['Survived'] X_train, X_test, y_train, y_test = train_test_split(train[['Pclass', 'SibSp', 'Parch', 'Fare', 'Age_PclXSex', 'male', 'Q', 'S', 'Ticket_13', 'Ticket_17', 'Ticket_19', 'Ticket_23', 'Ticket_24', 'Ticket_25', 'Ticket_26', 'Ticket_28', 'Ticket_29', 'Ticket_31', 'Ticket_33', 'Ticket_34', 'Ticket_35', 'Ticket_36', 'Ticket_37', 'Ticket_A5', 'Ticket_CA', 'Ticket_Others', 'Ticket_PC', 'Ticket_SC', 'Ticket_SO', 'Ticket_ST', 'Miss.', 'Mr.', 'Mrs.', 'Others']], y, test_size = 0.3, random_state = 143)
Titanic - Machine Learning from Disaster
9,174,426
preds = lr.predict(test[['abs_lat_diff', 'abs_lon_diff', 'passenger_count']]) sub = pd.DataFrame({'key': test_id, 'fare_amount': preds}) sub.to_csv('sub_lr_simple.csv', index = False )<create_dataframe>
rf = RandomForestClassifier() rf.fit(X_train,y_train )
Titanic - Machine Learning from Disaster
9,174,426
sns.distplot(sub['fare_amount']) plt.title('Distribution of Linear Regression Predictions');<filter>
pred = rf.predict(X_test )
Titanic - Machine Learning from Disaster
9,174,426
test.loc[sub[sub['fare_amount'] > 100].index]<filter>
print(accuracy_score(y_test, pred))
Titanic - Machine Learning from Disaster
9,174,426
sub[sub['fare_amount'] > 100]<train_model>
n_estimators = [int(x)for x in np.arange(200, 2200, 200)] max_features = ['auto','sqrt'] max_depth = [int(x)for x in np.arange(10,110,10)] max_depth.append(None) min_samples_leaf = [1,2,3,4,5] min_samples_split = [2,4,6,8,10] bootstrap = [True,False] param_grid = {'n_estimators' : n_estimators, 'max_features' : max_features, 'max_depth' : max_depth, 'min_samples_leaf' : min_samples_leaf, 'min_samples_split' : min_samples_split, 'bootstrap' : bootstrap} rf1 = RandomForestClassifier() rf1_random = RandomizedSearchCV(rf1, param_grid, n_iter = 100, cv = 3, verbose =2 , random_state = 143, n_jobs = -1)
Titanic - Machine Learning from Disaster
9,174,426
lr.fit(X_train[['haversine', 'abs_lat_diff', 'abs_lon_diff', 'passenger_count']], y_train) evaluate(lr, ['haversine', 'abs_lat_diff', 'abs_lon_diff', 'passenger_count'], X_train, X_valid, y_train, y_valid )<train_model>
rf1_random.fit(X_train, y_train )
Titanic - Machine Learning from Disaster
9,174,426
random_forest = RandomForestRegressor(n_estimators = 20, max_depth = 20, max_features = None, oob_score = True, bootstrap = True, verbose = 1, n_jobs = -1) random_forest.fit(X_train[['haversine', 'abs_lat_diff', 'abs_lon_diff', 'passenger_count']], y_train )<compute_test_metric>
rf1_random.best_params_
Titanic - Machine Learning from Disaster
9,174,426
evaluate(random_forest, ['haversine', 'abs_lat_diff', 'abs_lon_diff', 'passenger_count'], X_train, X_valid, y_train, y_valid )<save_to_csv>
rf1_random.best_estimator_
Titanic - Machine Learning from Disaster
9,174,426
preds = random_forest.predict(test[['haversine', 'abs_lat_diff', 'abs_lon_diff', 'passenger_count']]) sub = pd.DataFrame({'key': test_id, 'fare_amount': preds}) sub.to_csv('sub_rf_simple.csv', index = False) sns.distplot(sub['fare_amount']) plt.title('Distribution of Random Forest Predicted Fare Amount');<filter>
pred2 = rf1_random.best_estimator_.predict(X_test )
Titanic - Machine Learning from Disaster
9,174,426
sub.loc[simple_over_100]<predict_on_test>
print(accuracy_score(y_test, pred2))
Titanic - Machine Learning from Disaster
9,174,426
lr_tpred = lr.predict(X_train[['haversine', 'abs_lat_diff', 'abs_lon_diff', 'passenger_count']]) rf_tpred = random_forest.predict(X_train[['haversine', 'abs_lat_diff', 'abs_lon_diff', 'passenger_count']]) lr_pred = lr.predict(X_valid[['haversine', 'abs_lat_diff', 'abs_lon_diff', 'passenger_count']]) rf_pred = random_forest.predict(X_valid[['haversine', 'abs_lat_diff', 'abs_lon_diff', 'passenger_count']]) train_pred =(lr_tpred + rf_tpred)/ 2 valid_pred =(lr_pred + rf_pred)/ 2 tr, vr, tm, vm = metrics(train_pred, valid_pred, y_train, y_valid) print(f'Combined Training: rmse = {round(tr, 2)} \t mape = {round(tm, 2)}') print(f'Combined Validation: rmse = {round(vr, 2)} \t mape = {round(vm, 2)}' )<predict_on_test>
Titanic - Machine Learning from Disaster
9,174,426
def model_rf(X_train, X_valid, y_train, y_valid, test, features, model = RandomForestRegressor(n_estimators = 20, max_depth = 20, n_jobs = -1), return_model = False): model.fit(X_train[features], y_train) evaluate(model, features, X_train, X_valid, y_train, y_valid) preds = model.predict(test[features]) sub = pd.DataFrame({'key': test_id, 'fare_amount': preds}) feature_importances = pd.DataFrame({'feature': features, 'importance': model.feature_importances_} ).\ sort_values('importance', ascending = False ).set_index('feature') if return_model: return sub, feature_importances, model return sub, feature_importances<compute_test_metric>
test = pd.read_csv('/kaggle/input/titanic/test.csv' )
Titanic - Machine Learning from Disaster
9,174,426
sub, fi = model_rf(X_train, X_valid, y_train, y_valid, test, features = ['abs_lat_diff', 'abs_lon_diff', 'haversine', 'passenger_count', 'pickup_latitude', 'pickup_longitude', 'dropoff_latitude', 'dropoff_longitude'] )<save_to_csv>
test['Ticket_First'] = test['Ticket'].apply(lambda x:x.replace('.','' ).replace('/','' ).split() [0][:2] )
Titanic - Machine Learning from Disaster
9,174,426
sub.to_csv('sub_rf_8_features.csv', index = False) sub['fare_amount'].plot.hist() ;<create_dataframe>
test['Ticket_Grp'] = test[['Ticket_First']].apply(Ticket_Grp2, axis =1 )
Titanic - Machine Learning from Disaster
9,174,426
def extract_dateinfo(df, date_col, drop=True, time=False, start_ref = pd.datetime(1900, 1, 1), extra_attr = False): df = df.copy() fld = df[date_col] fld_dtype = fld.dtype if isinstance(fld_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype): fld_dtype = np.datetime64 if not np.issubdtype(fld_dtype, np.datetime64): df[date_col] = fld = pd.to_datetime(fld, infer_datetime_format=True) pre = re.sub('[Dd]ate', '', date_col) pre = re.sub('[Tt]ime', '', pre) attr = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear', 'Days_in_month', 'is_leap_year'] if extra_attr: attr = attr + ['Is_month_end', 'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start'] if time: attr = attr + ['Hour', 'Minute', 'Second'] for n in attr: df[pre + n] = getattr(fld.dt, n.lower()) df[pre + 'Days_in_year'] = df[pre + 'is_leap_year'] + 365 if time: df[pre + 'frac_day'] =(( df[pre + 'Hour'])+(df[pre + 'Minute'] / 60)+(df[pre + 'Second'] / 60 / 60)) / 24 df[pre + 'frac_week'] =(df[pre + 'Dayofweek'] + df[pre + 'frac_day'])/ 7 df[pre + 'frac_month'] =(df[pre + 'Day'] +(df[pre + 'frac_day'])) /(df[pre + 'Days_in_month'] + 1) df[pre + 'frac_year'] =(df[pre + 'Dayofyear'] + df[pre + 'frac_day'])/(df[pre + 'Days_in_year'] + 1) df[pre + 'Elapsed'] =(fld - start_ref ).dt.total_seconds() if drop: df = df.drop(date_col, axis=1) return df<drop_column>
test['Salute'] = test['Name'].apply(lambda x:x.split() [1] )
Titanic - Machine Learning from Disaster
9,174,426
test = extract_dateinfo(test, 'pickup_datetime', drop = False, time = True, start_ref = data['pickup_datetime'].min()) test.head()<groupby>
test['Salute'] = test['Name'].apply(lambda x:x.split() [1]) def Salute_group(col): if col[0] in ['Mr.', 'Miss.', 'Mrs.', 'Master.']: return col[0] else: return 'Others'
Titanic - Machine Learning from Disaster
9,174,426
fare_counts = data.groupby('fare_amount')['haversine'].agg(['count', pd.Series.nunique] ).sort_values('count', ascending = False) fare_counts.head()<split>
test['Salute_Grp'] = test[['Salute']].apply(Salute_group, axis =1 )
Titanic - Machine Learning from Disaster
9,174,426
X_train, X_valid, y_train, y_valid = train_test_split(data, np.array(data['fare_amount']), stratify = data['fare-bin'], random_state = RSEED, test_size = 1_000_000 )<define_variables>
PclassXSex_med = test[['Sex','Age','Pclass']].groupby(['Sex','Pclass'] ).median() PclassXSex_med
Titanic - Machine Learning from Disaster
9,174,426
time_features = ['pickup_frac_day', 'pickup_frac_week', 'pickup_frac_year', 'pickup_Elapsed'] features = ['abs_lat_diff', 'abs_lon_diff', 'haversine', 'passenger_count', 'pickup_latitude', 'pickup_longitude', 'dropoff_latitude', 'dropoff_longitude'] + time_features<compute_test_metric>
test['Age_PclXSex'] = test[['Age', 'Pclass', 'Sex']].apply(age_PclassSex, axis = 1 )
Titanic - Machine Learning from Disaster
9,174,426
sub, fi = model_rf(X_train, X_valid, y_train, y_valid, test, features = features )<train_model>
test.drop(['Cabin', 'Age'], axis =1 , inplace = True )
Titanic - Machine Learning from Disaster
9,174,426
lr = LinearRegression() lr.fit(X_train[features], y_train) evaluate(lr, features, X_train, X_valid, y_train, y_valid )<save_to_csv>
test[pd.isnull(test['Fare'])]
Titanic - Machine Learning from Disaster
9,174,426
sub.to_csv('sub_rf_frac_time.csv', index = False )<drop_column>
Fare_med = test[['Pclass','Fare','Sex', 'Embarked']].groupby(['Pclass','Sex', 'Embarked'] ).agg(['count', 'mean']) Fare_med
Titanic - Machine Learning from Disaster
9,174,426
features = list(data.columns) for f in ['pickup_datetime', 'fare_amount', 'fare-bin', 'color']: features.remove(f) len(features )<compute_train_metric>
test['Fare'].fillna(12.718, inplace = True )
Titanic - Machine Learning from Disaster
9,174,426
sub, fi, random_forest = model_rf(X_train, X_valid, y_train, y_valid, test, features = features, return_model = True )<save_to_csv>
Sex_Dumm = pd.get_dummies(test['Sex'], drop_first = True) Embarked_Dumm = pd.get_dummies(test['Embarked'], drop_first = True) Ticket_Grp = pd.get_dummies(test['Ticket_Grp'], drop_first = True, prefix = 'Ticket') Salute_Group = pd.get_dummies(test['Salute_Grp'], drop_first = True )
Titanic - Machine Learning from Disaster
9,174,426
sub.to_csv('sub_rf_all_features.csv', index = False )<train_on_grid>
test = pd.concat([test, Sex_Dumm, Embarked_Dumm, Ticket_Grp, Salute_Group], axis = 1) test.head()
Titanic - Machine Learning from Disaster
9,174,426
param_grid = { 'n_estimators': np.linspace(10, 100 ).astype(int), 'max_depth': [None] + list(np.linspace(5, 30 ).astype(int)) , 'max_features': ['auto', 'sqrt', None] + list(np.arange(0.5, 1, 0.1)) , 'max_leaf_nodes': [None] + list(np.linspace(10, 50, 500 ).astype(int)) , 'min_samples_split': [2, 5, 10], 'bootstrap': [True, False] } estimator = RandomForestRegressor(random_state = RSEED) rs = RandomizedSearchCV(estimator, param_grid, n_jobs = -1, scoring = 'neg_mean_absolute_error', cv = 3, n_iter = 100, verbose = 1, random_state=RSEED )<train_model>
X = train[['Pclass', 'SibSp', 'Parch', 'Fare', 'Age_PclXSex', 'male', 'Q', 'S', 'Ticket_13', 'Ticket_17', 'Ticket_19', 'Ticket_23', 'Ticket_24', 'Ticket_25', 'Ticket_26', 'Ticket_28', 'Ticket_29', 'Ticket_31', 'Ticket_33', 'Ticket_34', 'Ticket_35', 'Ticket_36', 'Ticket_37', 'Ticket_A5', 'Ticket_CA', 'Ticket_Others', 'Ticket_PC', 'Ticket_SC', 'Ticket_SO', 'Ticket_ST', 'Miss.', 'Mr.', 'Mrs.', 'Others']] y = train['Survived']
Titanic - Machine Learning from Disaster
9,174,426
tune_data = data.sample(100_000, random_state = RSEED) time_features = ['pickup_frac_day', 'pickup_frac_week', 'pickup_frac_year', 'pickup_Elapsed'] features = ['abs_lat_diff', 'abs_lon_diff', 'haversine', 'passenger_count', 'pickup_latitude', 'pickup_longitude', 'dropoff_latitude', 'dropoff_longitude'] + time_features rs.fit(tune_data[features], np.array(tune_data['fare_amount']))<find_best_params>
rf_fin = rf1_random.best_estimator_ rf_fin.fit(X,y )
Titanic - Machine Learning from Disaster
9,174,426
model = rs.best_estimator_ print(f'The best parameters were {rs.best_params_} with a negative mae of {rs.best_score_}' )<train_model>
test.set_index('PassengerId', inplace = True )
Titanic - Machine Learning from Disaster
9,174,426
model.n_jobs = -1 model.fit(X_train[features], y_train) evaluate(model, features, X_train, X_valid, y_train, y_valid )<save_to_csv>
test_fin =test[['Pclass', 'SibSp', 'Parch', 'Fare', 'Age_PclXSex', 'male', 'Q', 'S', 'Ticket_13', 'Ticket_17', 'Ticket_19', 'Ticket_23', 'Ticket_24', 'Ticket_25', 'Ticket_26', 'Ticket_28', 'Ticket_29', 'Ticket_31', 'Ticket_33', 'Ticket_34', 'Ticket_35', 'Ticket_36', 'Ticket_37', 'Ticket_A5', 'Ticket_CA', 'Ticket_Others', 'Ticket_PC', 'Ticket_SC', 'Ticket_SO', 'Ticket_ST', 'Miss.', 'Mr.', 'Mrs.', 'Others']] test_fin
Titanic - Machine Learning from Disaster
9,174,426
pred = np.array(model.predict(test[features])).reshape(( -1)) sub = pd.DataFrame({'key': test_id, 'fare_amount': pred}) sub.to_csv('sub_rf_tuned.csv', index = False) sub['fare_amount'].plot.hist() ; plt.title('Predicted Test Fare Distribution');<set_options>
pred_fin = rf_fin.predict(test_fin) pred_df = pd.DataFrame(pred_fin, columns = ['Survived'],index = test_fin.index) pred_df
Titanic - Machine Learning from Disaster
9,174,426
plt.figure(figsize=(8, 5), dpi=80) sns.set_style("darkgrid" )<load_from_csv>
pred_df['Survived'].to_csv('My_Titanic_Predictions.csv', index = True, header = True )
Titanic - Machine Learning from Disaster
9,174,426
test_dataset = pd.read_csv('.. /input/test.csv') train_dataset = pd.read_csv('.. /input/train.csv', nrows=2_000_000 )<drop_column>
feat_imp = pd.DataFrame(importance, index = X.columns, columns = ['Importance']) feat_imp.sort_values(['Importance'], inplace = True)
Titanic - Machine Learning from Disaster
9,730,497
train_dataset.drop(labels='key', axis=1, inplace=True) test_dataset.drop(labels='key', axis=1, inplace=True )<data_type_conversions>
train_data=pd.read_csv("/kaggle/input/titanic/train.csv") train_data.head(10 )
Titanic - Machine Learning from Disaster
9,730,497
train_dataset.passenger_count = train_dataset.passenger_count.astype(dtype = 'uint8' )<data_type_conversions>
test_data=pd.read_csv('/kaggle/input/titanic/test.csv') test_data.head()
Titanic - Machine Learning from Disaster
9,730,497
train_dataset.pickup_longitude = train_dataset.pickup_longitude.astype(dtype = 'float32') train_dataset.pickup_latitude = train_dataset.pickup_latitude.astype(dtype = 'float32') train_dataset.dropoff_longitude = train_dataset.dropoff_longitude.astype(dtype = 'float32') train_dataset.dropoff_latitude = train_dataset.dropoff_latitude.astype(dtype = 'float32') train_dataset.fare_amount = train_dataset.fare_amount.astype(dtype = 'float32' )<count_missing_values>
train_data=train_data.drop(['PassengerId', 'Ticket','Cabin'],axis=1) test_data=test_data.drop(['PassengerId', 'Ticket','Cabin'],axis=1 )
Titanic - Machine Learning from Disaster
9,730,497
train_dataset.isnull().sum()<data_type_conversions>
test_data['Fare'].fillna(7,inplace=True) test_data.isnull().sum()
Titanic - Machine Learning from Disaster
9,730,497
train_dataset['pickup_datetime'] = pd.to_datetime(arg=train_dataset['pickup_datetime'], infer_datetime_format=True) test_dataset['pickup_datetime'] = pd.to_datetime(arg=test_dataset['pickup_datetime'], infer_datetime_format=True )<feature_engineering>
train_data['Embarked'].fillna('S',inplace=True )
Titanic - Machine Learning from Disaster
9,730,497
def add_new_date_time_features(dataset): dataset['hour'] = dataset.pickup_datetime.dt.hour dataset['day'] = dataset.pickup_datetime.dt.day dataset['month'] = dataset.pickup_datetime.dt.month dataset['year'] = dataset.pickup_datetime.dt.year dataset['day_of_week'] = dataset.pickup_datetime.dt.dayofweek return dataset train_dataset = add_new_date_time_features(train_dataset) test_dataset = add_new_date_time_features(test_dataset )<count_values>
train_data['Sex']=train_data['Sex'].map({'male':1,'female':0}) test_data['Sex']=test_data['Sex'].map({'male':1,'female':0} )
Titanic - Machine Learning from Disaster
9,730,497
train_dataset.fare_amount[(train_dataset.fare_amount <= 0)|(train_dataset.fare_amount >= 350)].count()<define_variables>
def title(x): if 'Mr.' in x.split() : return 'Mr' elif 'Master.' in x.split() : return 'Master' elif 'Miss.' in x.split() : return 'Miss' elif 'Mrs.' in x.split() : return 'Mrs' else: return 'X'
Titanic - Machine Learning from Disaster
9,730,497
print(f'Row count before elimination - {train_dataset.shape[0]}') train_dataset = train_dataset[train_dataset.fare_amount.between(0, 350, inclusive=False)] print(f'Row count after elimination - {train_dataset.shape[0]}' )<count_values>
train_data['Title']=train_data['Name'].apply(lambda x:title(x)) test_data['Title']=test_data['Name'].apply(lambda x:title(x))
Titanic - Machine Learning from Disaster
9,730,497
train_dataset.passenger_count[(train_dataset.passenger_count < 1)|(train_dataset.passenger_count > 8)].count()<define_variables>
train_data['Title'].value_counts()
Titanic - Machine Learning from Disaster
9,730,497
print(f'Row count before elimination - {train_dataset.shape[0]}') train_dataset = train_dataset[train_dataset.passenger_count.between(0, 8, inclusive=False)] print(f'Row count after elimination - {train_dataset.shape[0]}' )<normalization>
train_data=train_data.drop('Name',axis=1) test_data=test_data.drop('Name',axis=1 )
Titanic - Machine Learning from Disaster
9,730,497
def degree_to_radion(degree): return degree*(np.pi/180) def calculate_distance(pickup_latitude, pickup_longitude, dropoff_latitude, dropoff_longitude): from_lat = degree_to_radion(pickup_latitude) from_long = degree_to_radion(pickup_longitude) to_lat = degree_to_radion(dropoff_latitude) to_long = degree_to_radion(dropoff_longitude) radius = 6371.01 lat_diff = to_lat - from_lat long_diff = to_long - from_long a = np.sin(lat_diff / 2)**2 + np.cos(degree_to_radion(from_lat)) * np.cos(degree_to_radion(to_lat)) * np.sin(long_diff / 2)**2 c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a)) return radius * c <feature_engineering>
train_data.groupby('Title' ).mean() ['Age']
Titanic - Machine Learning from Disaster
9,730,497
train_dataset['distance'] = calculate_distance(train_dataset.pickup_latitude, train_dataset.pickup_longitude, train_dataset.dropoff_latitude, train_dataset.dropoff_longitude) test_dataset['distance'] = calculate_distance(test_dataset.pickup_latitude, test_dataset.pickup_longitude, test_dataset.dropoff_latitude, test_dataset.dropoff_longitude )<sort_values>
train_data['age'] = train_data.groupby('Title')['Age'].transform(lambda x: x.fillna(x.median())) test_data['age'] = test_data.groupby('Title')['Age'].transform(lambda x: x.fillna(x.median()))
Titanic - Machine Learning from Disaster
9,730,497
train_dataset.sort_values(by='distance' )<count_values>
train_data=train_data.drop('Age',axis=1) test_data=test_data.drop('Age',axis=1 )
Titanic - Machine Learning from Disaster
9,730,497
train_dataset.distance[(train_dataset.distance == 0)].count()<filter>
train_data=pd.get_dummies(train_data,drop_first=True) test_data=pd.get_dummies(test_data,drop_first=True )
Titanic - Machine Learning from Disaster
9,730,497
train_dataset[(train_dataset.pickup_latitude != train_dataset.dropoff_latitude)& (train_dataset.pickup_longitude != train_dataset.dropoff_latitude)& (train_dataset.distance == 0)].count()<feature_engineering>
X=train_data.drop('Survived',axis=1 ).values y=train_data['Survived'].values test_data=test_data.values
Titanic - Machine Learning from Disaster
9,730,497
def add_distances_from_airport(dataset): jfk_coords =(40.639722, -73.778889) ewr_coords =(40.6925, -74.168611) lga_coords =(40.77725, -73.872611) dataset['pickup_jfk_distance'] = calculate_distance(jfk_coords[0], jfk_coords[1], dataset.pickup_latitude, dataset.pickup_longitude) dataset['dropof_jfk_distance'] = calculate_distance(jfk_coords[0], jfk_coords[1], dataset.dropoff_latitude, dataset.dropoff_longitude) dataset['pickup_ewr_distance'] = calculate_distance(ewr_coords[0], ewr_coords[1], dataset.pickup_latitude, dataset.pickup_longitude) dataset['dropof_ewr_distance'] = calculate_distance(ewr_coords[0], ewr_coords[1], dataset.dropoff_latitude, dataset.dropoff_longitude) dataset['pickup_lga_distance'] = calculate_distance(lga_coords[0], lga_coords[1], dataset.pickup_latitude, dataset.pickup_longitude) dataset['dropof_lga_distance'] = calculate_distance(lga_coords[0], lga_coords[1], dataset.dropoff_latitude, dataset.dropoff_longitude) return dataset train_dataset = add_distances_from_airport(train_dataset) test_dataset = add_distances_from_airport(test_dataset )<filter>
from sklearn.model_selection import train_test_split
Titanic - Machine Learning from Disaster
9,730,497
train_dataset[(train_dataset.distance>90)&(train_dataset.fare_amount<70)]<prepare_x_and_y>
from sklearn.model_selection import train_test_split
Titanic - Machine Learning from Disaster
9,730,497
selected_predictors = [ 'pickup_longitude', 'pickup_latitude', 'dropoff_longitude', 'dropoff_latitude', 'pickup_jfk_distance', 'dropof_jfk_distance', 'pickup_ewr_distance', 'dropof_ewr_distance', 'pickup_lga_distance', 'dropof_lga_distance', 'hour', 'month', 'year', 'distance' ] X = train_dataset.loc[:, selected_predictors].values y = train_dataset.iloc[:, 0].values X_test_dataset = test_dataset.loc[:, selected_predictors].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1/20 )<compute_train_metric>
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=85 )
Titanic - Machine Learning from Disaster
9,730,497
rand_forest_regressor = RandomForestRegressor() rand_forest_regressor.fit(X_train, y_train) y_rand_forest_predict = rand_forest_regressor.predict(X_test) random_forest_model_error = sqrt(mean_squared_error(np.expm1(y_test), np.expm1(y_rand_forest_predict))) print(f' Random Forest Mean Squared Error - {random_forest_model_error}' )<choose_model_class>
scaler=MinMaxScaler() X_train=scaler.fit_transform(X_train) X_test=scaler.transform(X_test) test_data=scaler.transform(test_data )
Titanic - Machine Learning from Disaster
9,730,497
<train_model>
import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense,Dropout from tensorflow.keras.callbacks import EarlyStopping
Titanic - Machine Learning from Disaster
9,730,497
XGB_model = XGBRegressor(learning_rate=0.3, max_depth=6, n_estimators=500) XGB_model.fit(X_train, y_train) y_XGB_predict = XGB_model.predict(X_test) XGB_model_error = sqrt(mean_squared_error(np.expm1(y_test), np.expm1(y_XGB_predict))) print(f'XGBoost Mean Squared Error - {XGB_model_error}' )<train_model>
max_features = X_train.shape[1]
Titanic - Machine Learning from Disaster
9,730,497
lgb_model = lgb.LGBMRegressor(objective='regression',num_leaves=35, n_estimators=300) lgb_model.fit(X_train, y_train) y_LGB_predict = lgb_model.predict(X_test) LGB_model_error = sqrt(mean_squared_error(np.expm1(y_test), np.expm1(y_LGB_predict))) print(f'LGBM Mean Squared Error - {LGB_model_error}' )<compute_test_metric>
model = Sequential() model.add(Dense(50, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(25, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(10, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(units=1,activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer='adam' )
Titanic - Machine Learning from Disaster
9,730,497
ensembled_prediction =(0.5*np.expm1(y_XGB_predict)) +(0.5*np.expm1(y_LGB_predict)) ensembled_prediction_error = sqrt(mean_squared_error(np.expm1(y_test), ensembled_prediction)) print(f'Ensembled Mean Squared Error - {ensembled_prediction_error}' )<save_to_csv>
model.fit(x=X_train, y=y_train, epochs=80, validation_data=(X_test, y_test), )
Titanic - Machine Learning from Disaster
9,730,497
y_XGB_predict = np.expm1(XGB_model.predict(X_test_dataset)) submission = pd.read_csv('.. /input/sample_submission.csv') submission['fare_amount'] = y_XGB_predict submission.to_csv('xgb_submission.csv', index=False) submission.head(10 )<save_to_csv>
loss=pd.DataFrame(model.history.history )
Titanic - Machine Learning from Disaster
9,730,497
y_LGB_predict = np.expm1(lgb_model.predict(X_test_dataset)) submission = pd.read_csv('.. /input/sample_submission.csv') submission['fare_amount'] = y_LGB_predict submission.to_csv('lgbm_submission.csv', index=False) submission.head(10 )<save_to_csv>
predictions=model.predict_classes(test_data )
Titanic - Machine Learning from Disaster
9,730,497
ensembled_prediction =(0.5*y_XGB_predict)+(0.5*y_LGB_predict) submission.to_csv('ensembled_submission.csv', index=False) submission.head(10 )<load_from_csv>
predictions=predictions.ravel() predictions
Titanic - Machine Learning from Disaster
9,730,497
<define_variables>
test_data1=pd.read_csv('/kaggle/input/titanic/test.csv') test_data1.head()
Titanic - Machine Learning from Disaster
9,730,497
traintypes = {'fare_amount': 'float32', 'pickup_datetime': 'str', 'pickup_longitude': 'float32', 'pickup_latitude': 'float32', 'dropoff_longitude': 'float32', 'dropoff_latitude': 'float32', 'passenger_count': 'uint8'} cols = list(traintypes.keys() )<load_from_csv>
output=pd.DataFrame({'PassengerId':test_data1['PassengerId'],'Survived':predictions}) output.to_csv('my_submission.csv',index=False )
Titanic - Machine Learning from Disaster
9,606,142
NROWS = 12000000 test_df = pd.read_csv(TEST_PATH) train_df = pd.read_csv(TRAIN_PATH, usecols=cols,nrows = NROWS, dtype=traintypes )<load_from_csv>
test_data = pd.read_csv("/kaggle/input/titanic/test.csv") test_data.head()
Titanic - Machine Learning from Disaster
9,606,142
<drop_column>
women = train_data.loc[train_data.Sex == "female"]["Survived"] rate_women = sum(women)/len(women) print("% of women who survived:", rate_women )
Titanic - Machine Learning from Disaster
9,606,142
train_df = train_df.dropna(how = 'any', axis = 'rows' )<drop_column>
men = train_data.loc[train_data.Sex == "male"]["Survived"] rate_men = sum(men)/len(men) print("% of men who survived:", rate_men )
Titanic - Machine Learning from Disaster
9,606,142
def clean_df(df): return df[(df.fare_amount > 0)& (df.pickup_longitude > -80)&(df.pickup_longitude < -70)& (df.pickup_latitude > 35)&(df.pickup_latitude < 45)& (df.dropoff_longitude > -80)&(df.dropoff_longitude < -70)& (df.dropoff_latitude > 35)&(df.dropoff_latitude < 45)& (df.passenger_count > 0)&(df.passenger_count < 10)] <concatenate>
y = train_data["Survived"] features = ["Sex", "Pclass", "SibSp", "Parch"] X = train_data[features].values X_test = test_data[features].values labelEncoder_gender = LabelEncoder() X[:,0] = labelEncoder_gender.fit_transform(X[:,0]) X = np.vstack(X[:, :] ).astype(np.float) labelEncoder_gender_test = LabelEncoder() X_test[:,0] = labelEncoder_gender_test.fit_transform(X_test[:,0]) X_test = np.vstack(X_test[:, :] ).astype(np.float) model = RandomForestClassifier(n_estimators=300, max_depth=5, random_state=1) model.fit(X, y) predictions = model.predict(X_test) output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions}) output.to_csv('my_submission.csv', index=False) print("Your submission was successfully saved!" )
Titanic - Machine Learning from Disaster
9,379,738
def sphere_dist(pickup_lat, pickup_lon, dropoff_lat, dropoff_lon): R_earth = 6371 pickup_lat, pickup_lon, dropoff_lat, dropoff_lon = map(np.radians, [pickup_lat, pickup_lon, dropoff_lat, dropoff_lon]) dlat = dropoff_lat - pickup_lat dlon = dropoff_lon - pickup_lon a = np.sin(dlat/2.0)**2 + np.cos(pickup_lat)* np.cos(dropoff_lat)* np.sin(dlon/2.0)**2 return 2 * R_earth * np.arcsin(np.sqrt(a)) def add_airport_dist(dataset): jfk_coord =(40.639722, -73.778889) ewr_coord =(40.6925, -74.168611) lga_coord =(40.77725, -73.872611) pickup_lat = dataset['pickup_latitude'] dropoff_lat = dataset['dropoff_latitude'] pickup_lon = dataset['pickup_longitude'] dropoff_lon = dataset['dropoff_longitude'] pickup_jfk = sphere_dist(pickup_lat, pickup_lon, jfk_coord[0], jfk_coord[1]) dropoff_jfk = sphere_dist(jfk_coord[0], jfk_coord[1], dropoff_lat, dropoff_lon) pickup_ewr = sphere_dist(pickup_lat, pickup_lon, ewr_coord[0], ewr_coord[1]) dropoff_ewr = sphere_dist(ewr_coord[0], ewr_coord[1], dropoff_lat, dropoff_lon) pickup_lga = sphere_dist(pickup_lat, pickup_lon, lga_coord[0], lga_coord[1]) dropoff_lga = sphere_dist(lga_coord[0], lga_coord[1], dropoff_lat, dropoff_lon) dataset['jfk_dist'] = pd.concat([pickup_jfk, dropoff_jfk], axis=1 ).min(axis=1) dataset['ewr_dist'] = pd.concat([pickup_ewr, dropoff_ewr], axis=1 ).min(axis=1) dataset['lga_dist'] = pd.concat([pickup_lga, dropoff_lga], axis=1 ).min(axis=1) return dataset def add_datetime_info(dataset): dataset['pickup_datetime'] = pd.to_datetime(dataset['pickup_datetime'],format="%Y-%m-%d %H:%M:%S UTC") dataset['hour'] = dataset.pickup_datetime.dt.hour dataset['day'] = dataset.pickup_datetime.dt.day dataset['month'] = dataset.pickup_datetime.dt.month dataset['weekday'] = dataset.pickup_datetime.dt.weekday dataset['year'] = dataset.pickup_datetime.dt.year return dataset <feature_engineering>
%matplotlib inline sns.set_style('whitegrid' )
Titanic - Machine Learning from Disaster
9,379,738
def transform_features(df): df = add_datetime_info(df) df = add_airport_dist(df) df['distance'] = sphere_dist(df['pickup_latitude'], df['pickup_longitude'], df['dropoff_latitude'] , df['dropoff_longitude']) df.drop(columns=['pickup_datetime'], inplace=True) return df<categorify>
train = pd.read_csv('/kaggle/input/titanic/train.csv' )
Titanic - Machine Learning from Disaster
9,379,738
train_df = transform_features(train_df) test_df = transform_features(test_df) test_df.drop(columns=['key'], inplace=True )<prepare_x_and_y>
train['Salute'] = train['Name'].apply(lambda x:x.split() [1] )
Titanic - Machine Learning from Disaster
9,379,738
train_df_y = train_df.fare_amount.copy() train_df_X = train_df[test_df.columns]<create_dataframe>
def Salute_group(col): if col[0] in ['Mr.', 'Miss.', 'Mrs.', 'Master.']: return col[0] else: return 'Others'
Titanic - Machine Learning from Disaster
9,379,738
dtrain = lgb.Dataset(train_df_X, label = train_df_y, free_raw_data = False )<init_hyperparams>
train['Salute_Grp'] = train[['Salute']].apply(Salute_group, axis =1 )
Titanic - Machine Learning from Disaster
9,379,738
print("Light Gradient Boosting Regressor: ") lgbm_params = { 'task': 'train', 'boosting_type': 'gbdt', 'objective': 'regression', 'metric': 'rmse', 'learning_rate' :'0.03', 'num_leaves':'31', 'max_depth' : '-1', 'subsample' :'.8', 'colsample_bytree' : '0.6', 'min_split_gain' : '0.5', 'min_child_weight' : '1', 'min_child_samples' :'10', 'scale_pos_weight' : '1', 'num_threads' : '4', 'seed' : '0', 'eval_freq' : '50' } folds = KFold(n_splits=5, shuffle=True, random_state=1) fold_preds = np.zeros(testshape[0]) oof_preds = np.zeros(trainshape[0]) dtrain.construct() modelstart = time.time() for trn_idx, val_idx in folds.split(train_df_X): clf = lgb.train( params=lgbm_params, train_set=dtrain.subset(trn_idx), valid_sets=dtrain.subset(val_idx), num_boost_round=10000, early_stopping_rounds=125, verbose_eval=500 ) oof_preds[val_idx] = clf.predict(dtrain.data.iloc[val_idx]) fold_preds += clf.predict(test_df)/ folds.n_splits print(mean_squared_error(train_df_y.iloc[val_idx], oof_preds[val_idx])**.5) print("Model Runtime: %0.2f Minutes"%(( time.time() - modelstart)/60))<load_from_csv>
train['Ticket_First'] = train['Ticket'].apply(lambda x:x.replace('.','' ).replace('/','' ).split() [0][:2])
Titanic - Machine Learning from Disaster
9,379,738
test_df = pd.read_csv(TEST_PATH )<save_to_csv>
ticket_freq = train[['Ticket_First','Survived']].groupby(['Ticket_First'] ).agg([('Nos people', 'count'),('Nos survived', 'sum')]) ticket_freq.columns = ticket_freq.columns.get_level_values(1) ticket_freq = ticket_freq.reset_index(level = [0]) ticket_freq['Survival %'] = round(ticket_freq['Nos survived']*100/ticket_freq['Nos people']) ticket_freq.sort_values(by = ['Nos people'], ascending = False)
Titanic - Machine Learning from Disaster
9,379,738
result = pd.DataFrame({'key':test_df['key'], 'fare_amount':fold_preds}) result.head() result.to_csv('taxi-fare-prediction.csv', index=False )<load_from_csv>
def Ticket_Grp(col): if col[0] in ticket_freq[ticket_freq['Nos people'] > 10]['Ticket_First'].to_list() : return col[0] else: return 'Others'
Titanic - Machine Learning from Disaster
9,379,738
train_df = pd.read_csv('.. /input/train.csv', nrows = 6_000_000) train_df.dtypes<count_missing_values>
train['Ticket_Grp'] = train[['Ticket_First']].apply(Ticket_Grp, axis =1 )
Titanic - Machine Learning from Disaster
9,379,738
print(train_df.isnull().sum() )<drop_column>
train['Ticket_Grp'].value_counts()
Titanic - Machine Learning from Disaster
9,379,738
train_df = train_df.dropna(how = 'any', axis = 'rows' )<drop_column>
PclassXSex_med = train[['Sex','Age','Pclass']].groupby(['Sex','Pclass'] ).median()
Titanic - Machine Learning from Disaster
9,379,738
def clean_df(df): return df[(df.fare_amount > 0)& (df.pickup_longitude > -80)&(df.pickup_longitude < -70)& (df.pickup_latitude > 35)&(df.pickup_latitude < 45)& (df.dropoff_longitude > -80)&(df.dropoff_longitude < -70)& (df.dropoff_latitude > 35)&(df.dropoff_latitude < 45)& (df.passenger_count > 0)&(df.passenger_count < 10)] train_df = clean_df(train_df) print(len(train_df))<concatenate>
train['Age_PclXSex'] = train[['Age', 'Pclass', 'Sex']].apply(age_PclassSex, axis = 1 )
Titanic - Machine Learning from Disaster
9,379,738
def sphere_dist(pickup_lat, pickup_lon, dropoff_lat, dropoff_lon): R_earth = 6371 pickup_lat, pickup_lon, dropoff_lat, dropoff_lon = map(np.radians, [pickup_lat, pickup_lon, dropoff_lat, dropoff_lon]) dlat = dropoff_lat - pickup_lat dlon = dropoff_lon - pickup_lon a = np.sin(dlat/2.0)**2 + np.cos(pickup_lat)* np.cos(dropoff_lat)* np.sin(dlon/2.0)**2 return 2 * R_earth * np.arcsin(np.sqrt(a)) def add_airport_dist(dataset): jfk_coord =(40.639722, -73.778889) ewr_coord =(40.6925, -74.168611) lga_coord =(40.77725, -73.872611) pickup_lat = dataset['pickup_latitude'] dropoff_lat = dataset['dropoff_latitude'] pickup_lon = dataset['pickup_longitude'] dropoff_lon = dataset['dropoff_longitude'] pickup_jfk = sphere_dist(pickup_lat, pickup_lon, jfk_coord[0], jfk_coord[1]) dropoff_jfk = sphere_dist(jfk_coord[0], jfk_coord[1], dropoff_lat, dropoff_lon) pickup_ewr = sphere_dist(pickup_lat, pickup_lon, ewr_coord[0], ewr_coord[1]) dropoff_ewr = sphere_dist(ewr_coord[0], ewr_coord[1], dropoff_lat, dropoff_lon) pickup_lga = sphere_dist(pickup_lat, pickup_lon, lga_coord[0], lga_coord[1]) dropoff_lga = sphere_dist(lga_coord[0], lga_coord[1], dropoff_lat, dropoff_lon) dataset['jfk_dist'] = pd.concat([pickup_jfk, dropoff_jfk], axis=1 ).min(axis=1) dataset['ewr_dist'] = pd.concat([pickup_ewr, dropoff_ewr], axis=1 ).min(axis=1) dataset['lga_dist'] = pd.concat([pickup_lga, dropoff_lga], axis=1 ).min(axis=1) return dataset def add_datetime_info(dataset): dataset['pickup_datetime'] = pd.to_datetime(dataset['pickup_datetime'],format="%Y-%m-%d %H:%M:%S UTC") dataset['hour'] = dataset.pickup_datetime.dt.hour dataset['day'] = dataset.pickup_datetime.dt.day dataset['month'] = dataset.pickup_datetime.dt.month dataset['weekday'] = dataset.pickup_datetime.dt.weekday dataset['year'] = dataset.pickup_datetime.dt.year return dataset train_df = add_datetime_info(train_df) train_df = add_airport_dist(train_df) train_df['distance'] = sphere_dist(train_df['pickup_latitude'], train_df['pickup_longitude'], train_df['dropoff_latitude'] , train_df['dropoff_longitude']) train_df.head()<drop_column>
train.drop(['Age', 'Cabin'], axis =1 , inplace = True )
Titanic - Machine Learning from Disaster
9,379,738
train_df.drop(columns=['key', 'pickup_datetime'], inplace=True) train_df.head()<split>
train.dropna(inplace = True )
Titanic - Machine Learning from Disaster
9,379,738
y = train_df['fare_amount'] train = train_df.drop(columns=['fare_amount']) x_train,x_test,y_train,y_test = train_test_split(train,y,random_state=0,test_size=0.01 )<train_on_grid>
Sex_Dumm = pd.get_dummies(train['Sex'], drop_first = True) Embarked_Dumm = pd.get_dummies(train['Embarked'], drop_first = True) Ticket_Grp = pd.get_dummies(train['Ticket_Grp'], drop_first = True, prefix = 'Ticket') Salute_Group = pd.get_dummies(train['Salute_Grp'], drop_first = True )
Titanic - Machine Learning from Disaster
9,379,738
params = { 'max_depth': 8, 'eta':.03, 'subsample': 1, 'colsample_bytree': 0.8, 'objective':'reg:linear', 'eval_metric':'rmse', 'silent': 1 } CV=False if CV: dtrain = xgb.DMatrix(train,label=y) gridsearch_params = [ (eta) for eta in np.arange (.04, 0.12,.02) ] min_rmse = float("Inf") best_params = None for(eta)in gridsearch_params: print("CV with eta={} ".format( eta)) params['eta'] = eta cv_results = xgb.cv( params, dtrain, num_boost_round=1000, nfold=3, metrics={'rmse'}, early_stopping_rounds=10 ) mean_rmse = cv_results['test-rmse-mean'].min() boost_rounds = cv_results['test-rmse-mean'].argmin() print("\tRMSE {} for {} rounds".format(mean_rmse, boost_rounds)) if mean_rmse < min_rmse: min_rmse = mean_rmse best_params =(eta) print("Best params: {}, RMSE: {}".format(best_params, min_rmse)) else: params['silent'] = 0 print(params )<train_model>
train = pd.concat([train, Sex_Dumm, Embarked_Dumm, Ticket_Grp, Salute_Group], axis = 1) train.head()
Titanic - Machine Learning from Disaster
9,379,738
def XGBmodel(x_train,x_test,y_train,y_test,params): matrix_train = xgb.DMatrix(x_train,label=y_train) matrix_test = xgb.DMatrix(x_test,label=y_test) model=xgb.train(params=params, dtrain=matrix_train,num_boost_round=5000, early_stopping_rounds=10,evals=[(matrix_test,'test')]) return model model = XGBmodel(x_train,x_test,y_train,y_test,params )<load_from_csv>
y = train['Survived'] X_train, X_test, y_train, y_test = train_test_split(train[['Pclass', 'SibSp', 'Parch', 'Fare', 'Age_PclXSex', 'male', 'Q', 'S', 'Ticket_13', 'Ticket_17', 'Ticket_19', 'Ticket_23', 'Ticket_24', 'Ticket_25', 'Ticket_26', 'Ticket_28', 'Ticket_29', 'Ticket_31', 'Ticket_33', 'Ticket_34', 'Ticket_35', 'Ticket_36', 'Ticket_37', 'Ticket_A5', 'Ticket_CA', 'Ticket_Others', 'Ticket_PC', 'Ticket_SC', 'Ticket_SO', 'Ticket_ST', 'Miss.', 'Mr.', 'Mrs.', 'Others']], y, test_size = 0.3, random_state = 143)
Titanic - Machine Learning from Disaster