kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
9,379,738
test_df = pd.read_csv('.. /input/test.csv') test_df = add_datetime_info(test_df) test_df = add_airport_dist(test_df) test_df['distance'] = sphere_dist(test_df['pickup_latitude'], test_df['pickup_longitude'], test_df['dropoff_latitude'] , test_df['dropoff_longitude']) test_key = test_df['key'] x_pred = test_df.drop(columns=['key', 'pickup_datetime']) prediction = model.predict(xgb.DMatrix(x_pred), ntree_limit = model.best_ntree_limit )<save_to_csv>
from xgboost import XGBClassifier
Titanic - Machine Learning from Disaster
9,379,738
submission = pd.DataFrame({ "key": test_key, "fare_amount": prediction.round(2) }) submission.to_csv('taxi_fare_submission.csv',index=False) submission.head()<import_modules>
xgb_base = XGBClassifier(random_state = 105) xgb_base.fit(X_train, y_train) xgb_base
Titanic - Machine Learning from Disaster
9,379,738
from geopy.distance import great_circle<set_options>
pred = xgb_base.predict(X_test) print(accuracy_score(y_test, pred))
Titanic - Machine Learning from Disaster
9,379,738
warnings.filterwarnings('ignore' )<load_from_csv>
param_grid = {"n_est" : [10, 20, 25, 30, 40, 50, 100,150, 200], "learning_rate" : [0.01, 0.02, 0.03, 0.05, 0.10, 0.15, 0.20, 0.25, 0.30 ] , "max_depth" : [ 2, 3, 4, 5, 6, 8, 10, 12, 15], "min_child_weight" : [ 1, 3, 5, 7 ], "gamma" : [ 0.0, 0.1, 0.2 , 0.3, 0.4 ], "colsample_bytree" : [ 0.1, 0.2, 0.3, 0.4, 0.5 , 0.7 ] } xgb_cl = XGBClassifier(random_state = 105) xgb_random = RandomizedSearchCV(xgb_cl, param_grid, n_iter = 500, cv = 5, verbose =2 , scoring = 'accuracy', random_state = 143, n_jobs = -1) xgb_random.fit(X_train, y_train )
Titanic - Machine Learning from Disaster
9,379,738
train = pd.read_csv('.. /input/train.csv',nrows=10_000_000 )<filter>
xgb_random.best_params_
Titanic - Machine Learning from Disaster
9,379,738
train = train.loc[(train['fare_amount'] > 0)&(train['fare_amount'] < 200)] train = train.loc[(train['pickup_longitude'] > -150)&(train['pickup_longitude'] < 0)] train = train.loc[(train['pickup_latitude'] > 0)&(train['pickup_latitude'] < 80)] train = train.loc[(train['dropoff_longitude'] > -150)&(train['dropoff_longitude'] < 0)] train = train.loc[(train['dropoff_latitude'] > 0)&(train['dropoff_longitude'] < 80)] train = train.loc[train['passenger_count'] <= 8]<load_from_csv>
pred2 = xgb_random.best_estimator_.predict(X_test )
Titanic - Machine Learning from Disaster
9,379,738
test = pd.read_csv('.. /input/test.csv' )<compute_test_metric>
print(accuracy_score(y_test, pred2))
Titanic - Machine Learning from Disaster
9,379,738
def haversine(lon1, lat1, lon2, lat2): lon1 = np.radians(lon1) lat1 = np.radians(lat1) lon2 = np.radians(lon2) lat2 = np.radians(lat2) dlon = lon2 - lon1 dlat = lat2 - lat1 a = np.sin(dlat/2)**2 + np.cos(lat1)* np.cos(lat2)* np.sin(dlon/2)**2 c = 2 * np.arcsin(np.sqrt(a)) r = 6371 return c * r<feature_engineering>
test = pd.read_csv('/kaggle/input/titanic/test.csv' )
Titanic - Machine Learning from Disaster
9,379,738
def add_travel_distance_vector_features(df): df['distance'] = haversine(df.dropoff_longitude,df.dropoff_latitude,df.pickup_longitude,df.pickup_latitude) add_travel_distance_vector_features(train) add_travel_distance_vector_features(test )<drop_column>
test['Salute'] = test['Name'].apply(lambda x:x.split() [1] )
Titanic - Machine Learning from Disaster
9,379,738
<count_missing_values>
def Salute_group(col): if col[0] in ['Mr.', 'Miss.', 'Mrs.', 'Master.']: return col[0] else: return 'Others'
Titanic - Machine Learning from Disaster
9,379,738
train.isnull().sum()<count_missing_values>
test['Salute_Grp'] = test[['Salute']].apply(Salute_group, axis =1) test['Salute_Grp'].value_counts()
Titanic - Machine Learning from Disaster
9,379,738
train.isnull().sum()<drop_column>
test['Ticket_First'] = test['Ticket'].apply(lambda x:x.replace('.','' ).replace('/','' ).split() [0][:2])
Titanic - Machine Learning from Disaster
9,379,738
train.dropna(how = 'any', axis = 'rows', inplace=True )<data_type_conversions>
test['Ticket_Grp'] = test[['Ticket_First']].apply(Ticket_Grp2, axis =1 )
Titanic - Machine Learning from Disaster
9,379,738
train['key'] = pd.to_datetime(train.key) test['key'] = pd.to_datetime(test.key )<feature_engineering>
PclassXSex_med = test[['Sex','Age','Pclass']].groupby(['Sex','Pclass'] ).median() PclassXSex_med
Titanic - Machine Learning from Disaster
9,379,738
train['hour'] = train['key'].dt.hour test['hour'] = test['key'].dt.hour train['day'] = train['key'].dt.day test['day'] = test['key'].dt.day train['month'] = train['key'].dt.month test['month'] = test['key'].dt.month train['year'] = train['key'].dt.year test['year'] = test['key'].dt.year<feature_engineering>
test['Age_PclXSex'] = test[['Age', 'Pclass', 'Sex']].apply(age_PclassSex, axis = 1 )
Titanic - Machine Learning from Disaster
9,379,738
train['daysinmonth'] = train['key'].dt.daysinmonth test['daysinmonth'] = test['key'].dt.daysinmonth train['weekofyear'] = train['key'].dt.weekofyear test['weekofyear'] = test['key'].dt.weekofyear train['dayofweek'] = train['key'].dt.dayofweek test['dayofweek'] = test['key'].dt.dayofweek train['dayofyear'] = train['key'].dt.dayofyear test['dayofyear'] = test['key'].dt.dayofyear train['quarter'] = train['key'].dt.quarter test['quarter'] = test['key'].dt.quarter<data_type_conversions>
test.drop(['Cabin', 'Age'], axis =1 , inplace = True )
Titanic - Machine Learning from Disaster
9,379,738
train.key = train.key.values.astype(np.int64) test.key = test.key.values.astype(np.int64 )<drop_column>
Fare_med = test[['Pclass','Fare','Sex', 'Embarked']].groupby(['Pclass','Sex', 'Embarked'] ).agg(['count', 'mean']) Fare_med
Titanic - Machine Learning from Disaster
9,379,738
train.pop('pickup_datetime') test.pop('pickup_datetime') train.pop('key') test.pop('key' )<feature_engineering>
test['Fare'].fillna(12.718, inplace = True )
Titanic - Machine Learning from Disaster
9,379,738
train['longitude_distance'] = abs(train['pickup_longitude'] - train['dropoff_longitude']) train['latitude_distance'] = abs(train['pickup_latitude'] - train['dropoff_latitude']) test['longitude_distance'] = abs(test['pickup_longitude'] - test['dropoff_longitude']) test['latitude_distance'] = abs(test['pickup_latitude'] - test['dropoff_latitude']) train['distance_travelled_sin'] = np.sin(( train['longitude_distance'] ** 2 * train['latitude_distance'] ** 2)**.5) test['distance_travelled_sin'] = np.sin(( test['longitude_distance'] ** 2 * test['latitude_distance'] ** 2)**.5 )<prepare_x_and_y>
Sex_Dumm = pd.get_dummies(test['Sex'], drop_first = True) Embarked_Dumm = pd.get_dummies(test['Embarked'], drop_first = True) Ticket_Grp = pd.get_dummies(test['Ticket_Grp'], drop_first = True, prefix = 'Ticket') Salute_Group = pd.get_dummies(test['Salute_Grp'], drop_first = True )
Titanic - Machine Learning from Disaster
9,379,738
y = train.pop('fare_amount') X = train<import_modules>
test = pd.concat([test, Sex_Dumm, Embarked_Dumm, Ticket_Grp, Salute_Group], axis = 1) test.columns
Titanic - Machine Learning from Disaster
9,379,738
from sklearn.preprocessing import StandardScaler<normalization>
X = train[['Pclass', 'SibSp', 'Parch', 'Fare', 'Age_PclXSex', 'male', 'Q', 'S', 'Ticket_13', 'Ticket_17', 'Ticket_19', 'Ticket_23', 'Ticket_24', 'Ticket_25', 'Ticket_26', 'Ticket_28', 'Ticket_29', 'Ticket_31', 'Ticket_33', 'Ticket_34', 'Ticket_35', 'Ticket_36', 'Ticket_37', 'Ticket_A5', 'Ticket_CA', 'Ticket_Others', 'Ticket_PC', 'Ticket_SC', 'Ticket_SO', 'Ticket_ST', 'Miss.', 'Mr.', 'Mrs.', 'Others']] y = train['Survived']
Titanic - Machine Learning from Disaster
9,379,738
scaler = StandardScaler()<normalization>
xgb_fin_base = XGBClassifier() xgb_fin_base.fit(X,y )
Titanic - Machine Learning from Disaster
9,379,738
X = scaler.fit_transform(X) test = scaler.transform(test )<split>
test.set_index('PassengerId', inplace = True )
Titanic - Machine Learning from Disaster
9,379,738
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.001, random_state=124 )<import_modules>
test_fin =test[['Pclass', 'SibSp', 'Parch', 'Fare', 'Age_PclXSex', 'male', 'Q', 'S', 'Ticket_13', 'Ticket_17', 'Ticket_19', 'Ticket_23', 'Ticket_24', 'Ticket_25', 'Ticket_26', 'Ticket_28', 'Ticket_29', 'Ticket_31', 'Ticket_33', 'Ticket_34', 'Ticket_35', 'Ticket_36', 'Ticket_37', 'Ticket_A5', 'Ticket_CA', 'Ticket_Others', 'Ticket_PC', 'Ticket_SC', 'Ticket_SO', 'Ticket_ST', 'Miss.', 'Mr.', 'Mrs.', 'Others']] test_fin
Titanic - Machine Learning from Disaster
9,379,738
from keras.layers import Dense, Input,Dropout,LeakyReLU,LSTM,BatchNormalization from keras.models import Model from keras import backend as Backend import keras.activations as act<compute_test_metric>
pred_fin_base = xgb_fin_base.predict(test_fin) pred_base_df = pd.DataFrame(pred_fin_base, columns = ['Survived'],index = test_fin.index) pred_base_df
Titanic - Machine Learning from Disaster
9,379,738
def rmse(y_true, y_pred): return Backend.sqrt(Backend.mean(Backend.square(y_pred - y_true), axis=-1))<choose_model_class>
pred_base_df['Survived'].to_csv('My_Titanic_Predictions.csv', index = True, header = True )
Titanic - Machine Learning from Disaster
9,379,738
<choose_model_class>
xgb_tuned = xgb_random.best_estimator_
Titanic - Machine Learning from Disaster
9,379,738
def lstm(n): model_in = Input(shape=(1,n)) model = LSTM(100 )(model_in) model = Dropout(0.2 )(model) model = Dense(1,activation=act.selu )(model) model = Model(model_in,model) model.compile(loss='mse', optimizer='nadam',metrics=[rmse]) return model<train_model>
xgb_tuned.fit(X,y )
Titanic - Machine Learning from Disaster
9,379,738
history = model.fit(X_train,y_train,batch_size=10000,epochs=100,verbose=1,validation_data=(X_test,y_test))<compute_test_metric>
pred_fin_tuned = xgb_tuned.predict(test_fin) pred_tuned = pd.DataFrame(pred_fin_tuned, columns = ['Survived'],index = test_fin.index) pred_tuned
Titanic - Machine Learning from Disaster
9,379,738
<predict_on_test><EOS>
pred_tuned['Survived'].to_csv('My_Titanic_Predictions2.csv', index = True, header = True )
Titanic - Machine Learning from Disaster
8,988,266
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<load_from_csv>
sns.set(style="darkgrid") warnings.filterwarnings('ignore') SEED = 42
Titanic - Machine Learning from Disaster
8,988,266
test = pd.read_csv('.. /input/test.csv' )<create_dataframe>
def concat_df(train_data, test_data): return pd.concat([train_data, test_data], sort=True ).reset_index(drop=True) def divide_df(all_data): return all_data.loc[:890], all_data.loc[891:].drop(['Survived'], axis=1) df_train = pd.read_csv('.. /input/train.csv') df_test = pd.read_csv('.. /input/test.csv') df_all = concat_df(df_train, df_test) df_train.name = 'Training Set' df_test.name = 'Test Set' df_all.name = 'All Set' dfs = [df_train, df_test] print('Number of Training Examples = {}'.format(df_train.shape[0])) print('Number of Test Examples = {} '.format(df_test.shape[0])) print('Training X Shape = {}'.format(df_train.shape)) print('Training y Shape = {} '.format(df_train['Survived'].shape[0])) print('Test X Shape = {}'.format(df_test.shape)) print('Test y Shape = {} '.format(df_test.shape[0])) print(df_train.columns) print(df_test.columns )
Titanic - Machine Learning from Disaster
8,988,266
submission = pd.DataFrame( {'key': test.key, 'fare_amount': pres.reshape(9914)}, columns = ['key', 'fare_amount'] )<save_to_csv>
def display_missing(df): for col in df.columns.tolist() : print('{} column missing values: {}'.format(col, df[col].isnull().sum())) print(' ') for df in dfs: print('{}'.format(df.name)) display_missing(df )
Titanic - Machine Learning from Disaster
8,988,266
submission.to_csv('submission.csv', index = False )<set_options>
age_by_pclass_sex = df_all.groupby(['Sex', 'Pclass'] ).median() ['Age'] for pclass in range(1, 4): for sex in ['female', 'male']: print('Median age of Pclass {} {}s: {}'.format(pclass, sex, age_by_pclass_sex[sex][pclass])) print('Median age of all passengers: {}'.format(df_all['Age'].median())) df_all['Age'] = df_all.groupby(['Sex', 'Pclass'])['Age'].apply(lambda x: x.fillna(x.median()))
Titanic - Machine Learning from Disaster
8,988,266
%matplotlib inline sns.set_style("white") warnings.filterwarnings('ignore') <load_from_csv>
df_all[df_all['Embarked'].isnull() ]
Titanic - Machine Learning from Disaster
8,988,266
data = pd.read_csv(".. /input/new-york-city-taxi-fare-prediction/train.csv", nrows = 7000000 )<define_variables>
df_all['Embarked'] = df_all['Embarked'].fillna('S' )
Titanic - Machine Learning from Disaster
8,988,266
q1 = data['fare_amount'].quantile(0.25) q3 = data['fare_amount'].quantile(0.75) iqr = q3 - q1 print("Fare Amount lower bound : ", q1 -(1.5 * iqr), "Fare Amount upper bound : ", q3 +(1.5 * iqr))<count_missing_values>
df_all[df_all['Fare'].isnull() ]
Titanic - Machine Learning from Disaster
8,988,266
print("Total null values: ", data.isnull().sum()) print("Percentage of null values: ", data[["dropoff_longitude", "dropoff_latitude"]].isnull().sum() / data.shape[0] )<drop_column>
med_fare = df_all.groupby(['Pclass', 'Parch', 'SibSp'] ).Fare.median() [3][0][0] df_all['Fare'] = df_all['Fare'].fillna(med_fare )
Titanic - Machine Learning from Disaster
8,988,266
data.dropna(how='any', axis='rows', inplace=True )<categorify>
idx = df_all[df_all['Deck'] == 'T'].index df_all.loc[idx, 'Deck'] = 'A'
Titanic - Machine Learning from Disaster
8,988,266
def get_cleaned(df): return df[(df.fare_amount > 0)& (df.pickup_latitude > 35)&(df.pickup_latitude < 45)& (df.pickup_longitude > -80)&(df.pickup_longitude < -68)& (df.dropoff_latitude > 35)&(df.dropoff_latitude < 45)& (df.pickup_longitude > -80)&(df.dropoff_longitude < -68)& (df.passenger_count > 0)&(df.passenger_count < 8)] data = get_cleaned(data) print(len(data)) print("Data lost after the cleaning process: ", 7000000 - len(data))<compute_test_metric>
df_all_decks_survived = df_all.groupby(['Deck', 'Survived'] ).count().drop(columns=['Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked', 'Pclass', 'Cabin', 'PassengerId', 'Ticket'] ).rename(columns={'Name':'Count'} ).transpose() def get_survived_dist(df): surv_counts = {'A':{}, 'B':{}, 'C':{}, 'D':{}, 'E':{}, 'F':{}, 'G':{}, 'M':{}} decks = df.columns.levels[0] for deck in decks: for survive in range(0, 2): surv_counts[deck][survive] = df[deck][survive][0] df_surv = pd.DataFrame(surv_counts) surv_percentages = {} for col in df_surv.columns: surv_percentages[col] = [(count / df_surv[col].sum())* 100 for count in df_surv[col]] return surv_counts, surv_percentages def display_surv_dist(percentages): df_survived_percentages = pd.DataFrame(percentages ).transpose() deck_names =('A', 'B', 'C', 'D', 'E', 'F', 'G', 'M') bar_count = np.arange(len(deck_names)) bar_width = 0.85 not_survived = df_survived_percentages[0] survived = df_survived_percentages[1] plt.figure(figsize=(20, 10)) plt.bar(bar_count, not_survived, color=' plt.bar(bar_count, survived, bottom=not_survived, color=' plt.xlabel('Deck', size=15, labelpad=20) plt.ylabel('Survival Percentage', size=15, labelpad=20) plt.xticks(bar_count, deck_names) plt.tick_params(axis='x', labelsize=15) plt.tick_params(axis='y', labelsize=15) plt.legend(loc='upper left', bbox_to_anchor=(1, 1), prop={'size': 15}) plt.title('Survival Percentage in Decks', size=18, y=1.05) plt.show() all_surv_count, all_surv_per = get_survived_dist(df_all_decks_survived) display_surv_dist(all_surv_per )
Titanic - Machine Learning from Disaster
8,988,266
def sphere_dist(pick_lat, pick_lon, drop_lat, drop_lon): R_earth = 6371 pick_lat, pick_lon, drop_lat, drop_lon = map(np.radians, [pick_lat, pick_lon, drop_lat, drop_lon]) dlat = drop_lat - pick_lat dlon = drop_lon - pick_lon a = np.sin(dlat/2.0)**2 + np.cos(pick_lat)* np.cos(drop_lat)* np.sin(dlon/2.0)**2 return 2 * R_earth * np.arcsin(np.sqrt(a))<feature_engineering>
df_all['Deck'] = df_all['Deck'].replace(['A', 'B', 'C'], 'ABC') df_all['Deck'] = df_all['Deck'].replace(['D', 'E'], 'DE') df_all['Deck'] = df_all['Deck'].replace(['F', 'G'], 'FG') df_all['Deck'].value_counts()
Titanic - Machine Learning from Disaster
8,988,266
def datetime_info(df): df['pickup_datetime'] = pd.to_datetime(df['pickup_datetime'],format="%Y-%m-%d %H:%M:%S UTC") df['hour'] = df.pickup_datetime.dt.hour df['day'] = df.pickup_datetime.dt.day df['month'] = df.pickup_datetime.dt.month df['weekday'] = df.pickup_datetime.dt.weekday df['year'] = df.pickup_datetime.dt.year return df data = datetime_info(data) data = airport_dist(data) data['distance'] = sphere_dist(data['pickup_latitude'], data['pickup_longitude'], data['dropoff_latitude'], data['dropoff_longitude']) data.head()<drop_column>
df_all.drop(['Cabin'], inplace=True, axis=1) df_train, df_test = divide_df(df_all) dfs = [df_train, df_test] for df in dfs: display_missing(df )
Titanic - Machine Learning from Disaster
8,988,266
data.drop(columns=["key", "pickup_datetime"], inplace=True) data.head()<split>
corr = df_train_corr_nd['Correlation Coefficient'] > 0.1 df_train_corr_nd[corr]
Titanic - Machine Learning from Disaster
8,988,266
y = data["fare_amount"] train = data.drop(columns=["fare_amount"]) x_train, x_test, y_train, y_test = train_test_split(train, y, random_state=2666, test_size=0.05 )<init_hyperparams>
corr = df_test_corr_nd['Correlation Coefficient'] > 0.1 df_test_corr_nd[corr]
Titanic - Machine Learning from Disaster
8,988,266
params = { "max_depth": 7, "subsample": 0.9, "eta": 0.03, "colsample_bytree": 0.9, "random_state": 2666, "objective": "reg:linear", "eval_metric": "rmse", "silent": 1 }<train_model>
df_all = concat_df(df_train, df_test) df_all.head()
Titanic - Machine Learning from Disaster
8,988,266
def XGBmodel(x_train, x_test, y_train, y_test, params): matrix_train = xgb.DMatrix(x_train, label=y_train) matrix_test = xgb.DMatrix(x_test, label=y_test) model = xgb.train(params=params, dtrain=matrix_train,num_boost_round=5000, early_stopping_rounds=10,evals=[(matrix_test,'test')]) return model start_time = time.time() model = XGBmodel(x_train, x_test, y_train, y_test, params )<define_variables>
df_all['Fare'] = pd.qcut(df_all['Fare'], 13 )
Titanic - Machine Learning from Disaster
8,988,266
time_taken = time.time() - start_time time_taken<load_from_csv>
df_all['Age'] = pd.qcut(df_all['Age'], 10 )
Titanic - Machine Learning from Disaster
8,988,266
test = pd.read_csv('.. /input/new-york-city-taxi-fare-prediction/test.csv') test = datetime_info(test) test = airport_dist(test) test['distance'] = sphere_dist(test['pickup_latitude'], test['pickup_longitude'], test['dropoff_latitude'] , test['dropoff_longitude']) test_key = test['key'] x_pred = test.drop(columns=['key', 'pickup_datetime']) prediction = model.predict(xgb.DMatrix(x_pred), ntree_limit=model.best_ntree_limit )<save_to_csv>
df_all['Ticket_Frequency'] = df_all.groupby('Ticket')['Ticket'].transform('count' )
Titanic - Machine Learning from Disaster
8,988,266
submission = pd.DataFrame({ "key": test_key, "fare_amount": prediction.round(2) }) submission.to_csv('taxi_fare_submission.csv',index=False) submission.head()<set_options>
df_all['Title'] = df_all['Name'].str.split(', ', expand=True)[1].str.split('.', expand=True)[0] df_all['Is_Married'] = 0 df_all['Is_Married'].loc[df_all['Title'] == 'Mrs'] = 1
Titanic - Machine Learning from Disaster
8,988,266
warnings.filterwarnings('ignore') pd.set_option('display.float_format', lambda x: '%.4f' % x) TRAIN_PATH = '.. /input/new-york-city-taxi-fare-prediction/train.csv' TEST_PATH = '.. /input/new-york-city-taxi-fare-prediction/test.csv'<load_from_csv>
def extract_surname(data): families = [] for i in range(len(data)) : name = data.iloc[i] if '(' in name: name_no_bracket = name.split('(')[0] else: name_no_bracket = name family = name_no_bracket.split(',')[0] title = name_no_bracket.split(',')[1].strip().split(' ')[0] for c in string.punctuation: family = family.replace(c, '' ).strip() families.append(family) return families df_all['Family'] = extract_surname(df_all['Name']) df_train = df_all.loc[:890] df_test = df_all.loc[891:] dfs = [df_train, df_test]
Titanic - Machine Learning from Disaster
8,988,266
test = pd.read_csv(TEST_PATH) print('Null values:',test.isnull().sum().sum()) test.head()<load_from_csv>
mean_survival_rate = np.mean(df_train['Survived']) train_family_survival_rate = [] train_family_survival_rate_NA = [] test_family_survival_rate = [] test_family_survival_rate_NA = [] for i in range(len(df_train)) : if df_train['Family'][i] in family_rates: train_family_survival_rate.append(family_rates[df_train['Family'][i]]) train_family_survival_rate_NA.append(1) else: train_family_survival_rate.append(mean_survival_rate) train_family_survival_rate_NA.append(0) for i in range(len(df_test)) : if df_test['Family'].iloc[i] in family_rates: test_family_survival_rate.append(family_rates[df_test['Family'].iloc[i]]) test_family_survival_rate_NA.append(1) else: test_family_survival_rate.append(mean_survival_rate) test_family_survival_rate_NA.append(0) df_train['Family_Survival_Rate'] = train_family_survival_rate df_train['Family_Survival_Rate_NA'] = train_family_survival_rate_NA df_test['Family_Survival_Rate'] = test_family_survival_rate df_test['Family_Survival_Rate_NA'] = test_family_survival_rate_NA train_ticket_survival_rate = [] train_ticket_survival_rate_NA = [] test_ticket_survival_rate = [] test_ticket_survival_rate_NA = [] for i in range(len(df_train)) : if df_train['Ticket'][i] in ticket_rates: train_ticket_survival_rate.append(ticket_rates[df_train['Ticket'][i]]) train_ticket_survival_rate_NA.append(1) else: train_ticket_survival_rate.append(mean_survival_rate) train_ticket_survival_rate_NA.append(0) for i in range(len(df_test)) : if df_test['Ticket'].iloc[i] in ticket_rates: test_ticket_survival_rate.append(ticket_rates[df_test['Ticket'].iloc[i]]) test_ticket_survival_rate_NA.append(1) else: test_ticket_survival_rate.append(mean_survival_rate) test_ticket_survival_rate_NA.append(0) df_train['Ticket_Survival_Rate'] = train_ticket_survival_rate df_train['Ticket_Survival_Rate_NA'] = train_ticket_survival_rate_NA df_test['Ticket_Survival_Rate'] = test_ticket_survival_rate df_test['Ticket_Survival_Rate_NA'] = test_ticket_survival_rate_NA
Titanic - Machine Learning from Disaster
8,988,266
df_temp = pd.read_csv(TRAIN_PATH, nrows=100000) profile = pandas_profiling.ProfileReport(df_temp, title="Profile Report", minimal=True, progress_bar=False) profile.to_notebook_iframe()<data_type_conversions>
for df in [df_train, df_test]: df['Survival_Rate'] =(df['Ticket_Survival_Rate'] + df['Family_Survival_Rate'])/ 2 df['Survival_Rate_NA'] =(df['Ticket_Survival_Rate_NA'] + df['Family_Survival_Rate_NA'])/ 2
Titanic - Machine Learning from Disaster
8,988,266
def clean_df(df): df['pickup_datetime'] = df['pickup_datetime'].str.slice(0, 15) df['pickup_datetime'] = pd.to_datetime(df['pickup_datetime'], utc=True, format='%Y-%m-%d %H:%M') df = df.assign(rev=df.dropoff_latitude<df.dropoff_longitude) idx =(df['rev'] == 1) df.loc[idx,['dropoff_longitude','dropoff_latitude']] = df.loc[idx,['dropoff_latitude','dropoff_longitude']].values df.loc[idx,['pickup_longitude','pickup_latitude']] = df.loc[idx,['pickup_latitude','pickup_longitude']].values criteria =( " 0 < fare_amount <= 500" " and 0 < passenger_count <= 6 " " and -75 <= pickup_longitude <= -72 " " and -75 <= dropoff_longitude <= -72 " " and 40 <= pickup_latitude <= 42 " " and 40 <= dropoff_latitude <= 42 " ) df =(df .dropna() .query(criteria) .reset_index() .drop(columns=['rev', 'index']) ) return df def load_df(nrows=None, features=None): cols = [ 'fare_amount', 'pickup_datetime','pickup_longitude', 'pickup_latitude', 'dropoff_longitude', 'dropoff_latitude', 'passenger_count' ] df_as_list = [] for df_chunk in pd.read_csv(TRAIN_PATH, usecols=cols, nrows=nrows, chunksize=5000000): df_chunk = clean_df(df_chunk) if features == 'explore': df_chunk = exploration_features(df_chunk) elif features == 'model': df_chunk = modelling_features(df_chunk) else: df_chunk = df_chunk.drop(columns='pickup_datetime') df_as_list.append(df_chunk) df = pd.concat(df_as_list) return df<split>
non_numeric_features = ['Embarked', 'Sex', 'Deck', 'Title', 'Family_Size_Grouped', 'Age', 'Fare'] for df in dfs: for feature in non_numeric_features: df[feature] = LabelEncoder().fit_transform(df[feature] )
Titanic - Machine Learning from Disaster
8,988,266
def get_split_sets(train): x = train.drop(columns=['fare_amount']) y = train['fare_amount'].values x_train, x_val, y_train, y_val = train_test_split(x, y, test_size=0.1, random_state=0) return x_train, x_val, y_train, y_val def lin_model(x_train, x_val, y_train, y_val): model = LinearRegression() model.fit(x_train, y_train) pred = model.predict(x_val) rmse = np.sqrt(mean_squared_error(y_val, pred)) return model, rmse, pred def knn_model(x_train, x_val, y_train, y_val, neighbors): min_rmse = 1000 for n in neighbors: knn = KNeighborsRegressor(n_neighbors=n) knn.fit(x_train, y_train) pred = knn.predict(x_val) rmse = np.sqrt(mean_squared_error(y_val, pred)) if rmse < min_rmse: min_rmse = rmse model = knn best_pred = pred print('Neighbours', n, 'RMSE', rmse) return model, min_rmse, best_pred def lgbm_model(params,x_train, x_val, y_train, y_val): lgbm_train = lgbm.Dataset(x_train, y_train, silent=True) lgbm_val = lgbm.Dataset(x_val, y_val, silent=True) model = lgbm.train(params=params, train_set=lgbm_train, valid_sets=lgbm_val, verbose_eval=100) pred = model.predict(x_val, num_iteration=model.best_iteration) rmse = np.sqrt(mean_squared_error(y_val, pred)) return model, rmse, pred<load_from_csv>
cat_features = ['Pclass', 'Sex', 'Deck', 'Embarked', 'Title', 'Family_Size_Grouped'] encoded_features = [] for df in dfs: for feature in cat_features: encoded_feat = OneHotEncoder().fit_transform(df[feature].values.reshape(-1, 1)).toarray() n = df[feature].nunique() cols = ['{}_{}'.format(feature, n)for n in range(1, n + 1)] encoded_df = pd.DataFrame(encoded_feat, columns=cols) encoded_df.index = df.index encoded_features.append(encoded_df) df_train = pd.concat([df_train, *encoded_features[:6]], axis=1) df_test = pd.concat([df_test, *encoded_features[6:]], axis=1 )
Titanic - Machine Learning from Disaster
8,988,266
train = load_df(1000000) x_train, x_val, y_train, y_val= get_split_sets(train) test = pd.read_csv(TEST_PATH) x_test = test.drop(columns=['key'] )<compute_test_metric>
df_all = concat_df(df_train, df_test) drop_cols = ['Deck', 'Embarked', 'Family', 'Family_Size', 'Family_Size_Grouped', 'Survived', 'Name', 'Parch', 'PassengerId', 'Pclass', 'Sex', 'SibSp', 'Ticket', 'Title', 'Ticket_Survival_Rate', 'Family_Survival_Rate', 'Ticket_Survival_Rate_NA', 'Family_Survival_Rate_NA'] df_all.drop(columns=drop_cols, inplace=True) df_all.head()
Titanic - Machine Learning from Disaster
8,988,266
lin_init_model, lin_init_rmse, lin_init_pred = lin_model(x_train, x_val, y_train, y_val )<find_best_params>
X_train = StandardScaler().fit_transform(df_train.drop(columns=drop_cols)) y_train = df_train['Survived'].values X_test = StandardScaler().fit_transform(df_test.drop(columns=drop_cols)) print('X_train shape: {}'.format(X_train.shape)) print('y_train shape: {}'.format(y_train.shape)) print('X_test shape: {}'.format(X_test.shape))
Titanic - Machine Learning from Disaster
8,988,266
k_choices = [10,20,30,40,50,60] knn_cols = ['pickup_longitude','pickup_latitude','dropoff_longitude','dropoff_latitude'] knn_init, knn_init_rmse, knn_init_pred = knn_model(x_train[knn_cols], x_val[knn_cols], y_train, y_val, k_choices )<train_model>
single_best_model = RandomForestClassifier(criterion='gini', n_estimators=1100, max_depth=5, min_samples_split=4, min_samples_leaf=5, max_features='auto', oob_score=True, random_state=SEED, n_jobs=-1, verbose=1) leaderboard_model = RandomForestClassifier(criterion='gini', n_estimators=1750, max_depth=7, min_samples_split=6, min_samples_leaf=6, max_features='auto', oob_score=True, random_state=SEED, n_jobs=-1, verbose=1 )
Titanic - Machine Learning from Disaster
8,988,266
lgbm_params = { 'objective': 'regression', 'boosting': 'gbdt', 'num_leaves': 400, 'learning_rate': 0.1, 'max_bin': 3000, 'num_rounds': 5000, 'early_stopping_rounds': 100, 'metric' : 'rmse' } lgbm_init_model, lgbm_init_rmse, lgbm_init_pred = lgbm_model(lgbm_params, x_train, x_val, y_train, y_val )<train_model>
N = 5 oob = 0 probs = pd.DataFrame(np.zeros(( len(X_test), N * 2)) , columns=['Fold_{}_Prob_{}'.format(i, j)for i in range(1, N + 1)for j in range(2)]) importances = pd.DataFrame(np.zeros(( X_train.shape[1], N)) , columns=['Fold_{}'.format(i)for i in range(1, N + 1)], index=df_all.columns) fprs, tprs, scores = [], [], [] skf = StratifiedKFold(n_splits=N, random_state=N, shuffle=True) for fold,(trn_idx, val_idx)in enumerate(skf.split(X_train, y_train), 1): print('Fold {} '.format(fold)) leaderboard_model.fit(X_train[trn_idx], y_train[trn_idx]) trn_fpr, trn_tpr, trn_thresholds = roc_curve(y_train[trn_idx], leaderboard_model.predict_proba(X_train[trn_idx])[:, 1]) trn_auc_score = auc(trn_fpr, trn_tpr) val_fpr, val_tpr, val_thresholds = roc_curve(y_train[val_idx], leaderboard_model.predict_proba(X_train[val_idx])[:, 1]) val_auc_score = auc(val_fpr, val_tpr) scores.append(( trn_auc_score, val_auc_score)) fprs.append(val_fpr) tprs.append(val_tpr) probs.loc[:, 'Fold_{}_Prob_0'.format(fold)] = leaderboard_model.predict_proba(X_test)[:, 0] probs.loc[:, 'Fold_{}_Prob_1'.format(fold)] = leaderboard_model.predict_proba(X_test)[:, 1] importances.iloc[:, fold - 1] = leaderboard_model.feature_importances_ oob += leaderboard_model.oob_score_ / N print('Fold {} OOB Score: {} '.format(fold, leaderboard_model.oob_score_)) print('Average OOB Score: {}'.format(oob))
Titanic - Machine Learning from Disaster
8,988,266
print('Linear Regression RMSE', lin_init_rmse) print('KNN RMSE', knn_init_rmse) print('LightGBM RMSE', lgbm_init_rmse )<compute_test_metric>
class_survived = [col for col in probs.columns if col.endswith('Prob_1')] probs['1'] = probs[class_survived].sum(axis=1)/ N probs['0'] = probs.drop(columns=class_survived ).sum(axis=1)/ N probs['pred'] = 0 pos = probs[probs['1'] >= 0.5].index probs.loc[pos, 'pred'] = 1 y_pred = probs['pred'].astype(int) submission_df = pd.DataFrame(columns=['PassengerId', 'Survived']) submission_df['PassengerId'] = df_test['PassengerId'] submission_df['Survived'] = y_pred.values submission_df.to_csv('submissions.csv', header=True, index=False) submission_df.head(10 )
Titanic - Machine Learning from Disaster
9,623,254
init_preds_ave =(lgbm_init_pred+knn_init_pred)/2 rmse = np.sqrt(mean_squared_error(y_val, init_preds_ave)) print('Combined RMSE: ', rmse )<prepare_x_and_y>
import matplotlib.pyplot as plt from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import LabelEncoder
Titanic - Machine Learning from Disaster
9,623,254
def distance(lon1,lat1,lon2,lat2): az12,az21,dist = Geod(ellps='WGS84' ).inv(lon1,lat1,lon2,lat2) return dist def direction(lon1,lat1,lon2,lat2): az12,az21,dist = Geod(ellps='WGS84' ).inv(lon1,lat1,lon2,lat2) return az12 def shared_features(df): rows = len(df) nyc_long, nyc_lat = [-74.001541]*rows, [40.724944]*rows jfk_long, jfk_lat = [-73.785937]*rows, [40.645494]*rows lga_long, lga_lat = [-73.872067]*rows, [40.774071]*rows nla_long, nla_lat = [-74.177721]*rows, [40.690764]*rows chp_long, chp_lat = [-73.137393]*rows, [41.366138]*rows exp_long, exp_lat = [-74.0375]*rows, [40.736]*rows pickup_long = df.pickup_longitude.tolist() pickup_lat = df.pickup_latitude.tolist() dropoff_long = df.dropoff_longitude.tolist() dropoff_lat = df.dropoff_latitude.tolist() df = df.assign( year=df.pickup_datetime.dt.year, dayofyear=df.pickup_datetime.dt.dayofyear, weekday=df.pickup_datetime.dt.dayofweek, time=(df.pickup_datetime.dt.hour+df.pickup_datetime.dt.minute/5), distance=distance(pickup_long, pickup_lat, dropoff_long, dropoff_lat), direction=direction(pickup_long, pickup_lat, dropoff_long, dropoff_lat), pickup_dist_nyc=pd.Series(distance(pickup_long, pickup_lat, nyc_long, nyc_lat)) , dropoff_dist_nyc=pd.Series(distance(dropoff_long, dropoff_lat, nyc_long, nyc_lat)) , pickup_dist_jfk=pd.Series(distance(pickup_long, pickup_lat, jfk_long, jfk_lat)) , dropoff_dist_jfk=pd.Series(distance(dropoff_long, dropoff_lat, jfk_long, jfk_lat)) , pickup_dist_lga=pd.Series(distance(pickup_long, pickup_lat, lga_long, lga_lat)) , dropoff_dist_lga=pd.Series(distance(dropoff_long, dropoff_lat, lga_long, lga_lat)) , pickup_dist_nla=pd.Series(distance(pickup_long, pickup_lat, nla_long, nla_lat)) , dropoff_dist_nla=pd.Series(distance(dropoff_long, dropoff_lat, nla_long, nla_lat)) , pickup_dist_chp=pd.Series(distance(pickup_long, pickup_lat, chp_long, chp_lat)) , dropoff_dist_chp=pd.Series(distance(dropoff_long, dropoff_lat, chp_long, chp_lat)) , pickup_dist_exp=pd.Series(distance(pickup_long, pickup_lat, exp_long, exp_lat)) , dropoff_dist_exp=pd.Series(distance(dropoff_long, dropoff_lat, exp_long, exp_lat)) ) return df def exploration_features(df): df = shared_features(df) df =( df .assign( hour=df.pickup_datetime.dt.hour, close_to_airport='No', fare_per_km=df.fare_amount*1000/df.distance, direction_bucket = pd.cut(df.direction, np.linspace(-180, 180, 37)) , pickup_long_bucket=pd.cut(df.pickup_longitude, bins=2550, labels=False), pickup_lat_bucket=pd.cut(df.pickup_latitude, bins=2200, labels=False), dropoff_long_bucket=pd.cut(df.dropoff_longitude, bins=2550, labels=False), dropoff_lat_bucket=pd.cut(df.dropoff_latitude, bins=2200, labels=False), pickup_long_bucket_big=pd.cut(df.pickup_longitude, bins=255, labels=False), pickup_lat_bucket_big=pd.cut(df.pickup_latitude, bins=220, labels=False), dropoff_long_bucket_big=pd.cut(df.dropoff_longitude, bins=255, labels=False), dropoff_lat_bucket_big=pd.cut(df.dropoff_latitude, bins=220, labels=False) ) .drop(columns='pickup_datetime') .query("0 < distance") ) df.loc[(( df['pickup_dist_jfk']<1500)|(df['dropoff_dist_jfk']<1500)) , 'close_to_airport'] = 'JFK' df.loc[(( df['pickup_dist_lga']<1500)|(df['dropoff_dist_lga']<1500)) , 'close_to_airport'] = 'LaGuardia' df.loc[(( df['pickup_dist_nla']<1500)|(df['dropoff_dist_nla']<1500)) , 'close_to_airport'] = 'Newark' return df<load_pretrained>
df_train = pd.read_csv('/kaggle/input/titanic/train.csv') df_train.head(10 )
Titanic - Machine Learning from Disaster
9,623,254
train = load_df(5000000, features='explore' )<statistical_test>
df_test = pd.read_csv('/kaggle/input/titanic/test.csv') df_test.head(10 )
Titanic - Machine Learning from Disaster
9,623,254
ks = scipy.stats.ks_2samp( train.where(train.distance > 50 ).dropna() ['fare_amount'], train.where(train.distance <= 50 ).dropna() ['fare_amount'] ) print('p-value:', ks[1] )<count_values>
print('shape of the training data {}'.format(df_train.shape)) print('The number of passengers who survived {}'.format(np.array(df_train['Survived'].value_counts() [1]))) print('The number of passengers who didn't survived {}'.format(np.array(df_train['Survived'].value_counts() [1]))) print('There were {} men while {} women on Titanic'.format(df_train['Sex'].value_counts() [0], df_train['Sex'].value_counts() [1])) print('Average age of passengers was {}'.format(df_train['Age'].mean())) print('Average age of female passengers was {} while that of men was {}'.format(df_train.groupby('Sex')['Age'].mean() [0],\ df_train.groupby('Sex')['Age'].mean() [1])) print() print() print("Checking for NULL values in each column") print(df_train.isnull().sum()) df_train.groupby('Embarked')['Survived'].value_counts()
Titanic - Machine Learning from Disaster
9,623,254
long_trips = train[train.distance>75000].fare_amount.count() print(long_trips, 'trips over 75km.' )<feature_engineering>
df_train['Age'].fillna(value= df_train['Age'].mean() , inplace=True) df_train['Embarked'].fillna(method='ffill', inplace=True) df_test['Age'].fillna(value= df_test['Age'].mean() , inplace=True) df_test['Fare'].fillna(value= df_test['Fare'].mean() , inplace=True) sex = LabelEncoder() embark = LabelEncoder() df_train['Sex'] = sex.fit_transform(df_train['Sex']) df_test['Sex'] = sex.fit_transform(df_test['Sex']) df_train['Embarked'] = embark.fit_transform(df_train['Embarked']) df_test['Embarked'] = embark.fit_transform(df_test['Embarked']) features = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked'] X_train = df_train[features] y_train = df_train['Survived'] X_test = df_test[features]
Titanic - Machine Learning from Disaster
9,623,254
def modelling_features(df): df = shared_features(df) df = df.assign( sin_time=np.sin(2*np.pi*df['time']/24), cos_time=np.cos(2*np.pi*df['time']/24), sin_direction=np.sin(2*np.pi*df['direction']/360), cos_direction=np.cos(2*np.pi*df['direction']/360), sin_dayofyear=np.sin(2*np.pi*df['dayofyear']/365), cos_dayofyear=np.cos(2*np.pi*df['dayofyear']/365), sin_weekday=np.sin(2*np.pi*df['weekday']/6), cos_weekday=np.cos(2*np.pi*df['weekday']/6), direction_bucket=pd.cut(df['direction'], bins=37, labels=False) ).drop(columns=['pickup_datetime', 'time', 'direction', 'weekday', 'dayofyear']) return df<feature_engineering>
rfc = RandomForestClassifier(max_depth = 8 , n_estimators = 125, random_state= 4) rfc = rfc.fit(X_train, y_train) predict = rfc.predict(X_test) output = pd.DataFrame({'PassengerId': df_test.PassengerId, 'Survived': predict}) output.to_csv('my_submission.csv', index=False )
Titanic - Machine Learning from Disaster
8,965,295
train = load_df(10000000, features='model') test['pickup_datetime'] = test['pickup_datetime'].str.slice(0, 15) test['pickup_datetime'] = pd.to_datetime(test['pickup_datetime'], utc=True, format='%Y-%m-%d %H:%M') test = modelling_features(test) train =(train .query(f'{test.pickup_longitude.min() -0.1} <= pickup_longitude <= {test.pickup_longitude.max() +0.1}') .query(f'{test.pickup_latitude.min() -0.1} <= pickup_latitude <= {test.pickup_latitude.max() +0.1}') .query(f'{test.dropoff_longitude.min() -0.1} <= dropoff_longitude <= {test.dropoff_longitude.max() +0.1}') .query(f'{test.dropoff_latitude.min() -0.1} <= dropoff_latitude <= {test.dropoff_latitude.max() +0.1}') ) x_train, x_val, y_train, y_val = get_split_sets(train) x_train['fare_per_km'] = y_train*1000/(x_train.distance+5) fares_by_direction = x_train.query('5 < distance' ).groupby('direction_bucket')['fare_per_km'].mean() x_train['adj_dist'] = [fares_by_direction[i] for i in x_train.direction_bucket]*x_train.distance/fares_by_direction.max() x_val['adj_dist'] = [fares_by_direction[i] for i in x_val.direction_bucket]*x_val.distance/fares_by_direction.max() test['adj_dist'] = [fares_by_direction[i] for i in test.direction_bucket]*test.distance/fares_by_direction.max() x_train = x_train.drop(columns=['fare_per_km', 'direction_bucket']) x_val = x_val.drop(columns=['direction_bucket']) x_test = test.drop(columns=['key', 'direction_bucket'] )<compute_test_metric>
from sklearn import preprocessing from tensorflow.python import keras from sklearn.impute import SimpleImputer import tensorflow as tf from tensorflow.python.keras.models import Sequential from tensorflow.python.keras.layers import Dense, Dropout
Titanic - Machine Learning from Disaster
8,965,295
lin_final_model, lin_final_rmse, lin_final_pred = lin_model(x_train, x_val, y_train, y_val )<find_best_params>
train_data = '/kaggle/input/titanic/train.csv' test_data = "/kaggle/input/titanic/test.csv"
Titanic - Machine Learning from Disaster
8,965,295
knn_cols = ['pickup_longitude','pickup_latitude','dropoff_longitude','dropoff_latitude'] k_choices = [18,24,30,40] knn_final_model, knn_final_rmse, knn_final_pred = knn_model(x_train[knn_cols], x_val[knn_cols], y_train, y_val, k_choices )<train_on_grid>
train_panda = pd.read_csv(train_data, index_col = "PassengerId") train_panda.head()
Titanic - Machine Learning from Disaster
8,965,295
lgbm_params = { 'objective': 'regression', 'boosting': 'gbdt', 'reg_sqrt': True, 'learning_rate': 0.03, 'num_leaves': 1200, 'max_depth': -1, 'max_bin': 5000, 'num_rounds': 5000, 'early_stopping_round': 50, 'metric': 'rmse' } lgbm_final_model, lgbm_final_rmse, lgbm_final_pred = lgbm_model(lgbm_params, x_train, x_val, y_train, y_val )<train_model>
test_panda = pd.read_csv(test_data) test_panda.head()
Titanic - Machine Learning from Disaster
8,965,295
print('Linear Regression RMSE', lin_final_rmse) print('KNN RMSE', knn_final_rmse) print('LightGBM RMSE', lgbm_final_rmse )<compute_test_metric>
encoder = preprocessing.LabelEncoder()
Titanic - Machine Learning from Disaster
8,965,295
d = {} for a in np.linspace(0,1,101): final_preds_ave =(lgbm_final_pred*(1-a)+ knn_final_pred * a) rmse = np.sqrt(mean_squared_error(y_val, final_preds_ave)) d[a] = rmse alpha = min(d, key=d.get) print('Best weight to give KNN: ', alpha )<save_to_csv>
cat_features = ["Sex", "Embarked"]
Titanic - Machine Learning from Disaster
8,965,295
lgbm_test_pred = lgbm_final_model.predict(x_test, num_iteration=lgbm_final_model.best_iteration) knn_test_pred = knn_final_model.predict(x_test[['pickup_longitude','pickup_latitude','dropoff_longitude','dropoff_latitude']]) submission_pred =(lgbm_test_pred*(1-alpha)+ knn_test_pred * alpha) submission = pd.DataFrame({'key': test.key, 'fare_amount': submission_pred}) submission.to_csv('submission_10_10_20_comb.csv', index=False )<load_from_csv>
train_panda["Embarked"] = train_panda["Embarked"].astype(str )
Titanic - Machine Learning from Disaster
8,965,295
train = pd.read_csv(".. /input/new-york-city-taxi-fare-prediction/train.csv", nrows = 1000000) test = pd.read_csv(".. /input/new-york-city-taxi-fare-prediction/test.csv" )<sort_values>
encoded_train = train_panda[cat_features].apply(encoder.fit_transform) encoded_test = test_panda[cat_features].apply(encoder.fit_transform )
Titanic - Machine Learning from Disaster
8,965,295
train.isnull().sum().sort_values(ascending=False )<sort_values>
num_features = ["Survived","Pclass","Age","SibSp","Parch","Fare"] test_features= ["Pclass","Age","SibSp","Parch","Fare"]
Titanic - Machine Learning from Disaster
8,965,295
test.isnull().sum().sort_values(ascending=False )<drop_column>
training_data = train_panda[num_features].join(encoded_train) test_data = test_panda[test_features].join(encoded_test )
Titanic - Machine Learning from Disaster
8,965,295
train = train.drop(train[train.isnull().any(1)].index, axis = 0 )<drop_column>
training_data.isnull().sum()
Titanic - Machine Learning from Disaster
8,965,295
train = train.drop(train[train['fare_amount']<0].index, axis=0) train.shape<define_variables>
test_data.isnull().sum()
Titanic - Machine Learning from Disaster
8,965,295
train[train['passenger_count']>6]<drop_column>
my_imputer = SimpleImputer() imputed_train = pd.DataFrame(my_imputer.fit_transform(training_data)) imputed_test_data = pd.DataFrame(my_imputer.fit_transform(test_data))
Titanic - Machine Learning from Disaster
8,965,295
train = train.drop(train[train['passenger_count']==208].index, axis = 0 )<filter>
imputed_train.columns = training_data.columns imputed_test_data.columns = test_data.columns
Titanic - Machine Learning from Disaster
8,965,295
train[train['pickup_latitude']<-90]<filter>
X = imputed_train.drop("Survived", axis = 1) y = imputed_train["Survived"]
Titanic - Machine Learning from Disaster
8,965,295
train[train['pickup_latitude']>90]<drop_column>
tf.random.set_seed(42 )
Titanic - Machine Learning from Disaster
8,965,295
train = train.drop(((train[train['pickup_latitude']<-90])|(train[train['pickup_latitude']>90])).index, axis=0 )<filter>
model = Sequential()
Titanic - Machine Learning from Disaster
8,965,295
train[train['pickup_longitude']<-180]<filter>
model.add(Dense(7, activation = "relu", input_shape =(7,))) model.add(Dropout(0.2))
Titanic - Machine Learning from Disaster
8,965,295
train[train['pickup_longitude']>180]<drop_column>
model.add(Dense(50, activation = "relu")) model.add(Dropout(0.2)) model.add(Dense(50, activation = "relu"))
Titanic - Machine Learning from Disaster
8,965,295
train = train.drop(((train[train['pickup_longitude']<-180])|(train[train['pickup_longitude']>180])).index, axis=0 )<filter>
model.add(Dense(2, activation = "softmax"))
Titanic - Machine Learning from Disaster
8,965,295
train[train['dropoff_latitude']<-90]<filter>
model.compile(loss=keras.losses.sparse_categorical_crossentropy, optimizer = "adam", metrics = ['accuracy'] )
Titanic - Machine Learning from Disaster
8,965,295
train[train['dropoff_latitude']>90]<drop_column>
model.fit(X, y, batch_size=1, epochs=700, validation_split = 0.2, verbose = 0 )
Titanic - Machine Learning from Disaster
8,965,295
train = train.drop(((train[train['dropoff_latitude']<-90])|(train[train['dropoff_latitude']>90])).index, axis=0 )<filter>
preds = model.predict(imputed_test_data )
Titanic - Machine Learning from Disaster
8,965,295
train[train['dropoff_latitude']<-180]|train[train['dropoff_latitude']>180]<data_type_conversions>
predictions = np.array(model.predict(imputed_test_data)).argmax(axis=1 )
Titanic - Machine Learning from Disaster
8,965,295
train['key'] = pd.to_datetime(train['key']) train['pickup_datetime'] = pd.to_datetime(train['pickup_datetime'] )<data_type_conversions>
passenger_id=test_panda["PassengerId"] results=passenger_id.to_frame() results["Survived"]=predictions
Titanic - Machine Learning from Disaster
8,965,295
<install_modules><EOS>
results.to_csv("Titanic_ai_model.csv", index=False )
Titanic - Machine Learning from Disaster
9,301,232
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<compute_test_metric>
warnings.simplefilter(action='ignore' )
Titanic - Machine Learning from Disaster
9,301,232
def haversine_distance(lat1, long1, lat2, long2): data = [train, test] for i in data: R = 6371 phi1 = np.radians(i[lat1]) phi2 = np.radians(i[lat2]) delta_phi = np.radians(i[lat2]-i[lat1]) delta_lambda = np.radians(i[long2]-i[long1]) a = np.sin(delta_phi / 2.0)** 2 + np.cos(phi1)* np.cos(phi2)* np.sin(delta_lambda / 2.0)** 2 c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1-a)) d =(R * c) i['H_Distance'] = d return d<concatenate>
train_df = pd.read_csv('.. /input/titanic/train.csv') predict_df = pd.read_csv('.. /input/titanic/test.csv' )
Titanic - Machine Learning from Disaster
9,301,232
haversine_distance('pickup_latitude', 'pickup_longitude', 'dropoff_latitude', 'dropoff_longitude' )<feature_engineering>
df=pd.concat([train_df,predict_df], ignore_index=True, sort =False) df.head()
Titanic - Machine Learning from Disaster
9,301,232
data = [train,test] for i in data: i['Year'] = i['pickup_datetime'].dt.year i['Month'] = i['pickup_datetime'].dt.month i['Date'] = i['pickup_datetime'].dt.day i['Day of Week'] = i['pickup_datetime'].dt.dayofweek i['Hour'] = i['pickup_datetime'].dt.hour<sort_values>
df[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False ).mean().sort_values(by='Survived', ascending=False )
Titanic - Machine Learning from Disaster
9,301,232
train.sort_values(['H_Distance','fare_amount'], ascending=False )<compute_test_metric>
df[['Sex', 'Survived']].groupby(['Sex'], as_index=False ).mean().sort_values(by='Survived', ascending=False )
Titanic - Machine Learning from Disaster
9,301,232
pearsonr(train['H_Distance'], train['fare_amount'] )<feature_engineering>
df['Sex'] = df['Sex'].map({'female': 1, 'male': 0} ).astype(int )
Titanic - Machine Learning from Disaster
9,301,232
bins_0 = train.loc[(train['H_Distance'] == 0), ['H_Distance']] bins_1 = train.loc[(train['H_Distance'] > 0)&(train['H_Distance'] <= 10),['H_Distance']] bins_2 = train.loc[(train['H_Distance'] > 10)&(train['H_Distance'] <= 50),['H_Distance']] bins_3 = train.loc[(train['H_Distance'] > 50)&(train['H_Distance'] <= 100),['H_Distance']] bins_4 = train.loc[(train['H_Distance'] > 100)&(train['H_Distance'] <= 200),['H_Distance']] bins_5 = train.loc[(train['H_Distance'] > 200)&(train['H_Distance'] <= 300),['H_Distance']] bins_6 = train.loc[(train['H_Distance'] > 300),['H_Distance']] bins_0['bins'] = '0' bins_1['bins'] = '0-10' bins_2['bins'] = '11-50' bins_3['bins'] = '51-100' bins_4['bins'] = '100-200' bins_5['bins'] = '201-300' bins_6['bins'] = '>300' dist_bins =pd.concat([bins_0,bins_1,bins_2,bins_3,bins_4,bins_5,bins_6]) dist_bins.columns<count_values>
df['name_title'] = df['Name'].str.extract('([A-Za-z]+)\.', expand=False )
Titanic - Machine Learning from Disaster
9,301,232
Counter(dist_bins['bins'] )<filter>
df['name_title'].value_counts()
Titanic - Machine Learning from Disaster
9,301,232
train.loc[(( train['pickup_latitude']==0)&(train['pickup_longitude']==0)) &(( train['dropoff_latitude']!=0)&(train['dropoff_longitude']!=0)) &(train['fare_amount']==0)]<drop_column>
def clean_name_title(val): if val in ['Rev', 'Col', 'Mlle', 'Mme', 'Ms','Major', 'Jonkheer', 'Countess', 'Capt','Lady','Dona','Dr']: return 'RARE' elif val in ['Sir', 'Don']: return 'Mr' elif val in ['Lady', 'Dona']: return 'Mrs' else: return val df['name_title'] = df['name_title'].apply(clean_name_title) df['name_title'].value_counts()
Titanic - Machine Learning from Disaster