kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
4,211,056
def calculate_direction(pickup_lat, pickup_lon, dropoff_lat, dropoff_lon): R_earth = 6371 pickup_lat, pickup_lon, dropoff_lat, dropoff_lon = map(np.radians, [pickup_lat, pickup_lon, dropoff_lat, dropoff_lon]) dlat = dropoff_lat - pickup_lat dlon = pickup_lon - dropoff_lon a = np.arctan2(np.sin(dlon * np.cos(dropoff_lat)) ,np.cos(pickup_lat)* np.sin(dropoff_lat)- np.sin(pickup_lat)* np.cos(dropoff_lat)* np.cos(dlon)) return a<feature_engineering>
trainData.Age=trainData.Age.fillna(trainData.Age.mean()) testData.Age=testData.Age.fillna(trainData.Age.mean()) trainData.Fare=trainData.Fare.fillna(trainData.Fare.mean()) testData.Fare=testData.Fare.fillna(trainData.Fare.mean()) trainData.Embarked=trainData.Embarked.fillna(trainData.Embarked.mode() [0]) testData.Embarked=testData.Embarked.fillna(trainData.Embarked.mode() [0] )
Titanic - Machine Learning from Disaster
4,211,056
train_df['direction'] = calculate_direction(train_df['pickup_latitude'].values, train_df['pickup_longitude'].values, train_df['dropoff_latitude'].values , train_df['dropoff_longitude'].values) df_test['direction'] = calculate_direction(df_test['pickup_latitude'].values, df_test['pickup_longitude'].values, df_test['dropoff_latitude'].values , df_test['dropoff_longitude'].values )<feature_engineering>
trainData.drop(['PassengerId','Name','Cabin','Ticket'],axis=1,inplace=True) testData.drop(['PassengerId','Name','Cabin','Ticket'],axis=1,inplace=True )
Titanic - Machine Learning from Disaster
4,211,056
train_df['pickup_latitude'].apply(lambda x: np.radians(x)) train_df['pickup_longitude'].apply(lambda x: np.radians(x)) train_df['dropoff_latitude'].apply(lambda x: np.radians(x)) train_df['dropoff_longitude'].apply(lambda x: np.radians(x)) df_test['pickup_latitude'].apply(lambda x: np.radians(x)) df_test['pickup_longitude'].apply(lambda x: np.radians(x)) df_test['dropoff_latitude'].apply(lambda x: np.radians(x)) df_test['dropoff_longitude'].apply(lambda x: np.radians(x))<prepare_x_and_y>
combined=pd.concat([trainData, testData], sort=False) print(combined.dtypes.sort_values() )
Titanic - Machine Learning from Disaster
4,211,056
df_train.drop(columns=['pickup_datetime'], inplace=True) y = df_train['fare_amount'] df_train = df_train.drop(columns=['fare_amount'] )<split>
length = trainData.shape[0] combined=pd.concat([trainData, testData], sort=False) combined=pd.get_dummies(combined) trainData=combined[:length] testData=combined[length:] trainData.Survived=trainData.Survived.astype('int' )
Titanic - Machine Learning from Disaster
4,211,056
x_train,x_test,y_train,y_test = train_test_split(df_train,y,random_state=123,test_size=0.1 )<init_hyperparams>
x=trainData.drop("Survived",axis=1) y=trainData['Survived'] xtest=testData.drop("Survived",axis=1 )
Titanic - Machine Learning from Disaster
4,211,056
params = { 'boosting_type':'gbdt', 'objective': 'regression', 'nthread': 4, 'num_leaves': 31, 'learning_rate': 0.1, 'max_depth': -1, 'subsample': 0.8, 'bagging_fraction' : 1, 'max_bin' : 10000 , 'bagging_freq': 10, 'metric': 'rmse', 'zero_as_missing': True, 'num_rounds':50000 }<train_model>
from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score from sklearn.metrics import mean_absolute_error
Titanic - Machine Learning from Disaster
4,211,056
train_set = lgbm.Dataset(x_train, y_train, silent=False,categorical_feature=['year','month','day','day_of_week']) valid_set = lgbm.Dataset(x_test, y_test, silent=False,categorical_feature=['year','month','day','day_of_week']) model = lgbm.train(params, train_set = train_set, num_boost_round=10000,early_stopping_rounds=1000,verbose_eval=500, valid_sets=valid_set )<drop_column>
RF = RandomForestClassifier(random_state=1) results = cross_val_score(RF,x,y,scoring='accuracy',cv=5) print(results) np.mean(results)
Titanic - Machine Learning from Disaster
4,211,056
test_key = df_test['key'] df_test.drop(columns=["pickup_datetime",'key'], axis=1, inplace=True )<predict_on_test>
RF.fit(x, y)
Titanic - Machine Learning from Disaster
4,211,056
<save_to_csv><EOS>
predictions=RF.predict(xtest) column_name = pd.read_csv('.. /input/test.csv') output=pd.DataFrame({'PassengerId':column_name['PassengerId'],'Survived':predictions}) output.to_csv('submission.csv', index=False )
Titanic - Machine Learning from Disaster
2,534,485
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<set_options>
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns
Titanic - Machine Learning from Disaster
2,534,485
%env JOBLIB_TEMP_FOLDER=/tmp %matplotlib inline <load_from_csv>
%matplotlib inline
Titanic - Machine Learning from Disaster
2,534,485
train = pd.read_csv(".. /input/train.csv", parse_dates=['pickup_datetime'], usecols=range(1,8), nrows=700_000) test = pd.read_csv(".. /input/test.csv", parse_dates=['pickup_datetime'] )<drop_column>
train=pd.read_csv('.. /input/train.csv' )
Titanic - Machine Learning from Disaster
2,534,485
train = train.dropna(how = 'any', axis = 'rows') train = train[train.eval('(fare_amount > 0)&(passenger_count <= 6)')]<filter>
family=[1 if i[6]>0 or i[7]>0 else 0 for i in train.values] train['Family']=family
Titanic - Machine Learning from Disaster
2,534,485
train = train[(train.pickup_longitude >= -77)& (train.pickup_longitude <= -70)& (train.dropoff_longitude >= -77)& (train.dropoff_longitude <= 70)& (train.pickup_latitude >= 35)& (train.pickup_latitude <= 45)& (train.dropoff_latitude >= 35)& (train.dropoff_latitude <= 45) ]<feature_engineering>
category=['child' if i[5]<=16 else i[4] for i in train.values] train['Category']=category
Titanic - Machine Learning from Disaster
2,534,485
train.pickup_datetime = train.pickup_datetime.dt.tz_localize('UTC') train.pickup_datetime = train.pickup_datetime.dt.tz_convert(tz.gettz('America/New_York')) train['year'] = train.pickup_datetime.dt.year train['dayofweek'] = train.pickup_datetime.dt.dayofweek train['dayofyear'] = train.pickup_datetime.dt.dayofyear train['hourofday'] = train.pickup_datetime.dt.hour train = train.drop('pickup_datetime', axis=1 )<feature_engineering>
age_Pclass1=np.around(train[train['Pclass']==1]['Age'].mean()) age_Pclass2=np.around(train[train['Pclass']==2]['Age'].mean()) age_Pclass3=np.around(train[train['Pclass']==3]['Age'].mean()) def impute_age(x): if str(x[5] ).lower() =='nan': if x[2]==1: return age_Pclass1 elif x[2]==2: return age_Pclass2 else: return age_Pclass3 else: return x[5] train['Age']=train.apply(impute_age,axis=1 )
Titanic - Machine Learning from Disaster
2,534,485
airports = {'jfk': [40.6441666, -73.7822222], 'laguardia': [40.7747222, -73.8719444], 'newark': [40.6897222, -74.175]} pickup = train.apply(lambda x: distance.distance(( x.pickup_latitude, x.pickup_longitude),(airports.get('jfk')) ).miles, axis=1) dropoff = train.apply(lambda x: distance.distance(( x.dropoff_latitude, x.dropoff_longitude),(airports.get('jfk')) ).miles, axis=1) train['to_jfk'] = pd.concat(( pickup, dropoff), axis=1 ).min(axis=1) pickup = train.apply(lambda x: distance.distance(( x.pickup_latitude, x.pickup_longitude),(airports.get('laguardia')) ).miles, axis=1) dropoff = train.apply(lambda x: distance.distance(( x.dropoff_latitude, x.dropoff_longitude),(airports.get('laguardia')) ).miles, axis=1) train['to_laguardia'] = pd.concat(( pickup, dropoff), axis=1 ).min(axis=1) pickup = train.apply(lambda x: distance.distance(( x.pickup_latitude, x.pickup_longitude),(airports.get('newark')) ).miles, axis=1) dropoff = train.apply(lambda x: distance.distance(( x.dropoff_latitude, x.dropoff_longitude),(airports.get('newark')) ).miles, axis=1) train['to_newark'] = pd.concat(( pickup, dropoff), axis=1 ).min(axis=1) del pickup, dropoff<prepare_x_and_y>
embarked_mode=train['Embarked'].mode() def impute_embarked(x): if str(x ).lower() =='nan': return embarked_mode[0] else: return x train['Embarked']=train['Embarked'].apply(impute_embarked )
Titanic - Machine Learning from Disaster
2,534,485
y = train.fare_amount train = train.drop('fare_amount', axis=1) <choose_model_class>
embarked_dummy=pd.get_dummies(train['Embarked']) embarked_dummy.head()
Titanic - Machine Learning from Disaster
2,534,485
xgb_param = {'eval_metric': 'rmse', 'n_estimators': 3000, 'max_depth': 9, 'learning_rate': 0.1, 'subsample': 0.9, 'colsample_bytree': 0.8, 'gamma': 1e-4, 'reg_alpha': 1e-4, 'verbose': 0, 'n_jobs': -1 } xgb_model = xgb.XGBRegressor(**xgb_param) xgb_model.fit(train, y) xgb.plot_importance(xgb_model )<drop_column>
category_dummy=pd.get_dummies(train['Category']) category_dummy.head()
Titanic - Machine Learning from Disaster
2,534,485
test_key = test['key'] test = test.drop('key', axis=1) <feature_engineering>
train=pd.concat([train,embarked_dummy],axis=1) train=pd.concat([train,category_dummy],axis=1) train.head()
Titanic - Machine Learning from Disaster
2,534,485
test.pickup_datetime = test.pickup_datetime.dt.tz_localize('UTC') test.pickup_datetime = test.pickup_datetime.dt.tz_convert(tz.gettz('America/New_York')) test['year'] = test.pickup_datetime.dt.year test['dayofweek'] = test.pickup_datetime.dt.dayofweek test['dayofyear'] = test.pickup_datetime.dt.dayofyear test['hourofday'] = test.pickup_datetime.dt.hour test = test.drop('pickup_datetime', axis=1) test['distance'] = test.apply(lambda x: distance.distance(( x.pickup_latitude, x.pickup_longitude),(x.dropoff_latitude, x.dropoff_longitude)).miles, axis = 1) pickup = test.apply(lambda x: distance.distance(( x.pickup_latitude, x.pickup_longitude),(airports.get('jfk')) ).miles, axis=1) dropoff = test.apply(lambda x: distance.distance(( x.dropoff_latitude, x.dropoff_longitude),(airports.get('jfk')) ).miles, axis=1) test['to_jfk'] = pd.concat(( pickup, dropoff), axis = 1 ).min(axis=1) pickup = test.apply(lambda x: distance.distance(( x.pickup_latitude, x.pickup_longitude),(airports.get('laguardia')) ).miles, axis=1) dropoff = test.apply(lambda x: distance.distance(( x.dropoff_latitude, x.dropoff_longitude),(airports.get('laguardia')) ).miles, axis=1) test['to_laguardia'] = pd.concat(( pickup, dropoff), axis = 1 ).min(axis=1) pickup = test.apply(lambda x: distance.distance(( x.pickup_latitude, x.pickup_longitude),(airports.get('newark')) ).miles, axis=1) dropoff = test.apply(lambda x: distance.distance(( x.dropoff_latitude, x.dropoff_longitude),(airports.get('newark')) ).miles, axis=1) test['to_newark'] = pd.concat(( pickup, dropoff), axis = 1 ).min(axis=1) del pickup, dropoff<predict_on_test>
X=train.iloc[:,[2,5,9,12,14,15,17,18]].values y=train.iloc[:,1].values
Titanic - Machine Learning from Disaster
2,534,485
xgb_predict = xgb_model.predict(test )<save_to_csv>
scale=StandardScaler()
Titanic - Machine Learning from Disaster
2,534,485
xgb_submission = pd.DataFrame({ 'key': test_key, 'fare_amount': xgb_predict }) xgb_submission.to_csv("xgb_submission.csv", index=False )<import_modules>
X=scale.fit_transform(X )
Titanic - Machine Learning from Disaster
2,534,485
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import geopy.distance as geo import datetime import time import calendar from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report,confusion_matrix from sklearn import preprocessing from sklearn import metrics<set_options>
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=0 )
Titanic - Machine Learning from Disaster
2,534,485
%matplotlib inline<load_from_csv>
from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import AdaBoostClassifier
Titanic - Machine Learning from Disaster
2,534,485
df = pd.read_csv('.. /input/train.csv', nrows = 2000000 )<drop_column>
dtree=DecisionTreeClassifier() ml_algo=AdaBoostClassifier(base_estimator=dtree,n_estimators=2500,learning_rate=0.01 )
Titanic - Machine Learning from Disaster
2,534,485
unwanted_indices = df[(abs(df['pickup_latitude'])> 90)| (abs(df['dropoff_latitude'])> 90) ].index df.drop(list(unwanted_indices), inplace=True) del unwanted_indices unwanted_indices = df[(abs(df['pickup_longitude'])> 180)| (abs(df['dropoff_longitude'])> 180) ].index df.drop(list(unwanted_indices), inplace=True )<filter>
ml_algo.fit(X_train,y_train )
Titanic - Machine Learning from Disaster
2,534,485
df[(abs(df['pickup_longitude'])> 180)| (abs(df['dropoff_longitude'])> 180) ].index<filter>
confusion_matrix(y_test,ml_algo.predict(X_test))
Titanic - Machine Learning from Disaster
2,534,485
df[(abs(df['pickup_latitude'])> 90)| (abs(df['dropoff_latitude'])> 90) ].index<drop_column>
ml_algo.score(X_test,y_test )
Titanic - Machine Learning from Disaster
2,534,485
unwanted_indices = df[df['dropoff_latitude'].isnull() ].index df.drop(list(unwanted_indices), inplace=True) unwanted_indices = df[df['dropoff_longitude'].isnull() ].index df.drop(list(unwanted_indices), inplace=True )<drop_column>
ml_algo.score(X_train,y_train )
Titanic - Machine Learning from Disaster
2,534,485
if 'unwanted_indices' in globals() : del unwanted_indices unwanted_indices = df[(df['passenger_count'] > 6)|(df['passenger_count'] == 0)].index df.drop(list(unwanted_indices), inplace=True )<drop_column>
ml_algo.score(X,y )
Titanic - Machine Learning from Disaster
2,534,485
unwanted_indices = df[df['fare_amount']<=0].index df.drop(list(unwanted_indices), inplace=True )<drop_column>
ml_algo.fit(X,y )
Titanic - Machine Learning from Disaster
2,534,485
<normalization>
test=pd.read_csv('.. /input/test.csv' )
Titanic - Machine Learning from Disaster
2,534,485
df['Travel distance'] = list(map(lambda x1,x2,x3,x4: geo.distance(( x3,x1),(x4,x2)).miles, df['pickup_longitude'], df['dropoff_longitude'], df['pickup_latitude'], df['dropoff_latitude']))<data_type_conversions>
test['Fare'].fillna(test['Fare'].mean() ,inplace=True )
Titanic - Machine Learning from Disaster
2,534,485
newtimeStamp = pd.to_datetime(df['pickup_datetime'].apply(lambda x: x.split(' UTC')[0])) <feature_engineering>
age_Pclass1=np.around(test[test['Pclass']==1]['Age'].mean()) age_Pclass2=np.around(test[test['Pclass']==2]['Age'].mean()) age_Pclass3=np.around(test[test['Pclass']==3]['Age'].mean()) def impute_age(x): if str(x[4] ).lower() =='nan': if x[1]==1: return age_Pclass1 elif x[1]==2: return age_Pclass2 else: return age_Pclass3 else: return x[4] test['Age']=test.apply(impute_age,axis=1 )
Titanic - Machine Learning from Disaster
2,534,485
df['Hour'] = newtimeStamp.apply(lambda x : x.hour) df['Month'] = newtimeStamp.apply(lambda x : x.month) df['Date'] = newtimeStamp.apply(lambda x : x.date() )<define_variables>
family=[1 if i[5]>0 or i[6]>0 else 0 for i in test.values] test['Family']=family
Titanic - Machine Learning from Disaster
2,534,485
for x in df['pickup_datetime'] : y = x.split(' ')[2] if(y!='UTC'): print(y )<feature_engineering>
category=['child' if i[4]<=16 else i[3] for i in test.values] test['Category']=category
Titanic - Machine Learning from Disaster
2,534,485
Day_of_Week=newtimeStamp.apply( lambda x : calendar.day_name[datetime.date(x.year,x.month,x.day) .weekday() ] ) dmap = {0:'Mon',1:'Tue',2:'Wed',3:'Thu',4:'Fri',5:'Sat',6:'Sun'} Day_of_Week = newtimeStamp.apply( lambda x : dmap[datetime.date(x.year,x.month,x.day) .weekday() ] ) df['Day of Week'] = Day_of_Week<feature_engineering>
embarked_dummy=pd.get_dummies(test['Embarked']) embarked_dummy.head()
Titanic - Machine Learning from Disaster
2,534,485
df['qty'] = df['pickup_datetime'].apply(lambda x: 1 )<feature_engineering>
category_dummy=pd.get_dummies(test['Category']) category_dummy.head()
Titanic - Machine Learning from Disaster
2,534,485
df['timestamp'] = newtimeStamp.apply(lambda x : time.mktime( (x.year,x.month,x.day, x.hour,x.minute,x.second, 1,x.day,-1)) )<groupby>
test=pd.concat([test,embarked_dummy],axis=1) test=pd.concat([test,category_dummy],axis=1) test.head()
Titanic - Machine Learning from Disaster
2,534,485
dfm=df.groupby(['Day of Week','Hour'] ).count() fp=dfm['qty'].unstack()<drop_column>
X_TEST=test.iloc[:,[1,4,8,11,13,14,16,17]].values
Titanic - Machine Learning from Disaster
2,534,485
dftrain = df.drop([ 'key', 'pickup_datetime', 'Hour', 'Month', 'Date', 'Day of Week', 'qty' ],axis=1 ).dropna() dftrain.head(2 )<prepare_x_and_y>
scale=StandardScaler()
Titanic - Machine Learning from Disaster
2,534,485
X_train = dftrain.drop('fare_amount',axis=1) y_train = dftrain['fare_amount']<split>
X_TEST=scale.fit_transform(X_TEST )
Titanic - Machine Learning from Disaster
2,534,485
XX_train, XX_test, yy_train, yy_test = train_test_split(X_train, y_train, test_size=0.30 )<compute_test_metric>
pred=ml_algo.predict(X_TEST ).reshape(-1,1 )
Titanic - Machine Learning from Disaster
2,534,485
<compute_test_metric>
id=test.iloc[:,0].values.reshape(-1,1) output=np.concatenate(( id,pred),axis=1 )
Titanic - Machine Learning from Disaster
2,534,485
<compute_test_metric>
submission=pd.DataFrame(output,columns=['PassengerId','Survived'] )
Titanic - Machine Learning from Disaster
2,534,485
<choose_model_class><EOS>
submission.to_csv('Submission.csv',index=False )
Titanic - Machine Learning from Disaster
2,508,663
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<load_from_csv>
warnings.filterwarnings(action='ignore', category=DataConversionWarning) warnings.filterwarnings(action='ignore', category=DeprecationWarning) warnings.filterwarnings(action='ignore', category=FutureWarning )
Titanic - Machine Learning from Disaster
2,508,663
dftest = pd.read_csv('.. /input/test.csv' )<feature_engineering>
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv") data = train.append(test, sort=False )
Titanic - Machine Learning from Disaster
2,508,663
newtimeStamp=pd.to_datetime(dftest['pickup_datetime']) dftest['Hour'] = newtimeStamp.apply(lambda x : x.hour) dftest['Month'] = newtimeStamp.apply(lambda x : x.month) dftest['Date'] = newtimeStamp.apply(lambda x : x.date() )<feature_engineering>
types = pd.DataFrame(data.dtypes ).rename(columns={0: 'type'} ).sort_values(by=['type'],ascending=False) types
Titanic - Machine Learning from Disaster
2,508,663
Day_of_Week=newtimeStamp.apply( lambda x : calendar.day_name[datetime.date(x.year,x.month,x.day) .weekday() ] ) dmap = {0:'Mon',1:'Tue',2:'Wed',3:'Thu',4:'Fri',5:'Sat',6:'Sun'} Day_of_Week = newtimeStamp.apply( lambda x : dmap[datetime.date(x.year,x.month,x.day) .weekday() ] ) dftest['Day of Week'] = Day_of_Week<feature_engineering>
def check_missing(df): null_val = df.isnull().sum() percent = 100 * df.isnull().sum() /len(df) missing_table = pd.concat([null_val, percent], axis=1) col = missing_table.rename(columns = {0 : 'Num', 1 : 'Rate'}) return col print("Data cols = check_missing(data) types.join(cols ).sort_values(by="Rate", ascending=False )
Titanic - Machine Learning from Disaster
2,508,663
dftest['qty'] = dftest['pickup_datetime'].apply(lambda x: 1 )<feature_engineering>
data.drop(['Cabin'], axis=1, inplace = True) data["Embarked"] = data["Embarked"].fillna(data["Embarked"].mode() [0]) data["Embarked"] = data["Embarked"].map({"S": 0, "C" : 1, "Q" : 2}) title_mapping = { ' (.+)Mr\.(.+)': 1, ' (.+)Master\.(.+)': 1, ' (.+)Dr\.(.+)': 2, ' (.+)Don\.(.+)': 2, ' (.+)Major\.(.+)': 2, ' (.+)Sir\.(.+)':2, ' (.+)Col\.(.+)': 2, ' (.+)Jonkheer\.(.+)': 2, ' (.+)Capt\.(.+)': 2,' (.+)Countess\.(.+)': 2, ' (.+)Dona\.(.+)': 2, ' (.+)Rev\.(.+)': 3, ' (.+)Ms\.(.+)': 4, ' (.*)Miss\.(.+)': 4, ' (.+)Mrs\.(.+)': 4, ' (.+)Mme\.(.+)': 4,' (.+)Lady\.(.+)': 4, ' (.+)Mlle\.(.+)': 4 } data["Title"] = data["Name"].replace(title_mapping, regex=True ).astype('int') data["Sex"] = data["Sex"].map({"male": 0, "female": 1} )
Titanic - Machine Learning from Disaster
2,508,663
dftest['timestamp'] = newtimeStamp.apply(lambda x : time.mktime( (x.year,x.month,x.day, x.hour,x.minute,x.second, 1,x.day,-1)) )<feature_engineering>
data['FamilySize'] = data["Parch"] + data["SibSp"] data['IsFamily'] = data["Parch"] + data["SibSp"] data.loc[data['IsFamily'] > 1, 'IsFamily'] = 2 data.loc[data['IsFamily'] == 1, 'IsFamily'] = 1 data.loc[data['IsFamily'] == 0, 'IsFamily'] = 0 DEFAULT_SURVIVAL_VALUE = 0.5 data['FamilySurvival'] = DEFAULT_SURVIVAL_VALUE data['LastName'] = data['Name'].apply(lambda x: str.split(x, ",")[0]) for grp, grp_df in data.groupby(['LastName', 'Fare']): if(len(grp_df)!= 1): for ind, row in grp_df.iterrows() : smax = grp_df.drop(ind)['Survived'].max() smin = grp_df.drop(ind)['Survived'].min() passID = row['PassengerId'] if(smax == 1.0): data.loc[data['PassengerId'] == passID, 'FamilySurvival'] = 1 elif(smin==0.0): data.loc[data['PassengerId'] == passID, 'FamilySurvival'] = 0 for _, grp_df in data.groupby('Ticket'): if(len(grp_df)!= 1): for ind, row in grp_df.iterrows() : if(row['FamilySurvival'] == 0)|(row['FamilySurvival']== 0.5): smax = grp_df.drop(ind)['Survived'].max() smin = grp_df.drop(ind)['Survived'].min() passID = row['PassengerId'] if(smax == 1.0): data.loc[data['PassengerId'] == passID, 'FamilySurvival'] = 1 elif(smin==0.0): data.loc[data['PassengerId'] == passID, 'FamilySurvival'] = 0
Titanic - Machine Learning from Disaster
2,508,663
dftest['Travel distance'] = list(map(lambda x1,x2,x3,x4: geo.distance(( x3,x1),(x4,x2)).miles, dftest['pickup_longitude'], dftest['dropoff_longitude'], dftest['pickup_latitude'], dftest['dropoff_latitude']))<groupby>
train_target = data[:891]["Survived"].values data.drop(['Name', 'PassengerId', 'Age', 'Fare', 'Ticket', 'LastName'], axis = 1, inplace = True) data = pd.get_dummies(data, columns=["Embarked", "Title", "Sex", "IsFamily", "FamilySurvival"], drop_first=True) train = data[:891] test = data[891:] data.dtypes
Titanic - Machine Learning from Disaster
2,508,663
dfm=dftest.groupby(['Day of Week','Hour'] ).count() fp=dfm['qty'].unstack() fp<import_modules>
possible_features = train.columns.copy().drop('Survived') selector = SelectKBest(f_classif, len(possible_features)) selector.fit(train[possible_features], train_target) scores = -np.log10(selector.pvalues_) indices = np.argsort(scores)[::-1] print('Feature importances:') for i in range(len(scores)) : print('%.2f %s' %(scores[indices[i]], possible_features[indices[i]]))
Titanic - Machine Learning from Disaster
2,508,663
from sklearn import preprocessing<categorify>
fparams = \ ['Sex_1', 'Title_4', 'FamilySurvival_1.0', 'Pclass', 'FareBin', 'FamilySurvival_0.5', 'Embarked_1', 'IsFamily_1', 'IsFamily_2', 'Parch', 'Title_3', 'AgeBin', 'SibSp'] train_features = train[fparams].values test_features = test[fparams].values CV_SPLIT_NUM = 6 N_ESTIMATORS = 300 DEBUG_MODE = False
Titanic - Machine Learning from Disaster
2,508,663
coltest=list(dftest.columns) idy=coltest[2:7] idy.append(coltest[12]) idy.append(coltest[13]) idy<prepare_x_and_y>
rfgs_parameters = { 'n_estimators': [N_ESTIMATORS], 'max_depth' : [2,3,4], 'max_features': [2,3,4], "min_samples_split": [2,3,4], "min_samples_leaf": [2,3,4] } if not DEBUG_MODE: rfc_cv = GridSearchCV(RandomForestClassifier() , rfgs_parameters, cv=CV_SPLIT_NUM) rfc_cv.fit(train_features, train_target) print("RFC GridSearch score: "+str(rfc_cv.best_score_)) print("RFC GridSearch params: ") print(rfc_cv.best_params_ )
Titanic - Machine Learning from Disaster
2,508,663
X_train = dftrain[idy] y_train = dftrain['fare_amount'] X_test = dftest[idy]<import_modules>
gbcgs_parameters = { 'loss' : ["deviance","exponential"], 'n_estimators' : [N_ESTIMATORS], 'learning_rate': [0.02,0.03,0.04,0.05,0.06], 'max_depth': [2,3,4], 'max_features': [2,3,4], "min_samples_split": [2,3,4], 'min_samples_leaf': [2,3,4] } if not DEBUG_MODE: gbc_cv = GridSearchCV(GradientBoostingClassifier() , gbcgs_parameters, cv=CV_SPLIT_NUM) gbc_cv.fit(train_features, train_target) print("GBC GridSearch score: "+str(gbc_cv.best_score_)) print("GBC GridSearch params: ") print(gbc_cv.best_params_ )
Titanic - Machine Learning from Disaster
2,508,663
<choose_model_class>
svcgs_parameters = { 'kernel': ['rbf'], 'C': [10,20,30,40,50,60,70], 'gamma': [0.005,0.006,0.007,0.008,0.009,0.01,0.011], 'probability': [True] } if not DEBUG_MODE: svc_cv = GridSearchCV(svm.SVC() , svcgs_parameters, cv=CV_SPLIT_NUM) svc_cv.fit(train_features, train_target) print("SVC GridSearch score: "+str(svc_cv.best_score_)) print("SVC GridSearch params: ") print(svc_cv.best_params_ )
Titanic - Machine Learning from Disaster
2,508,663
<train_model>
if not DEBUG_MODE: vc = VotingClassifier(estimators=[('rfc', rfc_cv.best_estimator_),('gbc', gbc_cv.best_estimator_),('svm', svc_cv.best_estimator_)], voting='soft', n_jobs=4, weights=[3,1,2]) vc = vc.fit(train_features, train_target )
Titanic - Machine Learning from Disaster
2,508,663
<prepare_output><EOS>
arr = [ {'model': vc, 'filename': 'vc_submission.csv'}, {'model': rfc_cv.best_estimator_, 'filename': 'rfc_submission.csv'}, {'model': gbc_cv.best_estimator_, 'filename': 'gbc_submission.csv'}, {'model': svc_cv.best_estimator_, 'filename': 'svc_submission.csv'} ] if not DEBUG_MODE: for v in arr: survived = v['model'].predict(test_features) pred = pd.DataFrame(pd.read_csv(".. /input/test.csv")['PassengerId']) pred['Survived'] = survived.astype(int) pred.to_csv(".. /working/" + v['filename'], index = False )
Titanic - Machine Learning from Disaster
1,995,435
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<save_to_csv>
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from collections import Counter
Titanic - Machine Learning from Disaster
1,995,435
pd.DataFrame({ 'key':list(dftest['key']), 'fare_amount':fare } ).set_index('key' ).to_csv('sample_submission.csv', sep=',' )<define_variables>
df_train = pd.read_csv('.. /input/train.csv') df_test = pd.read_csv('.. /input/test.csv') df_sample= pd.read_csv('.. /input/gender_submission.csv' )
Titanic - Machine Learning from Disaster
1,995,435
traintypes = {'fare_amount': 'float32', 'pickup_datetime': 'str', 'pickup_longitude': 'float32', 'pickup_latitude': 'float32', 'dropoff_longitude': 'float32', 'dropoff_latitude': 'float32', 'passenger_count': 'uint8'} cols = list(traintypes.keys() )<load_from_csv>
def detect_outliers(df,n,features): outlier_indices = [] for col in features: Q1 = np.percentile(df[col], 25) Q3 = np.percentile(df[col],75) IQR = Q3 - Q1 outlier_step = 1.5 * IQR outlier_list_col = df[(df[col] < Q1 - outlier_step)|(df[col] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list(k for k, v in outlier_indices.items() if v > n) return multiple_outliers
Titanic - Machine Learning from Disaster
1,995,435
%%time train = pd.read_csv(TRAIN_PATH, usecols=cols, dtype=traintypes, nrows = 5000000 )<train_model>
Outliers_to_drop = detect_outliers(df_train,2,["Age","SibSp","Parch","Fare"]) df_train = df_train.drop(Outliers_to_drop, axis = 0 ).reset_index(drop=True )
Titanic - Machine Learning from Disaster
1,995,435
print("old size: %d" % len(train)) train = train[train.fare_amount >=0] print("New size: %d" % len(train))<count_missing_values>
dataset_title = [i.split(",")[1].split(".")[0].strip() for i in df_train["Name"]] df_train["Title"] = pd.Series(dataset_title) df_train["Title"] = df_train["Title"].replace(['Lady', 'the Countess','Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') df_train["Title"] = df_train["Title"].map({"Master":0, "Miss":1, "Ms" : 1 , "Mme":1, "Mlle":1, "Mrs":1, "Mr":2, "Rare":3}) df_train["Title"] = df_train["Title"].astype(int) df_train.drop(labels = ["Name"], axis = 1, inplace = True )
Titanic - Machine Learning from Disaster
1,995,435
train.isnull().sum()<train_model>
def impute_age(cols): Age = cols[0] Pclass = cols[1] if pd.isnull(Age): if Pclass == 1: return 37 elif Pclass == 2: return 29 else: return 24 else: return Age
Titanic - Machine Learning from Disaster
1,995,435
print("old size: %d" % len(train)) train = train.dropna(how='any', axis=0) print("New size after dropping missing value: %d" % len(train))<set_options>
def impute_fare(cols): Fare = cols[0] Pclass = cols[1] if pd.isnull(Fare): if Pclass == 1: return 84 elif Pclass == 2: return 20 else: return 13 else: return Fare
Titanic - Machine Learning from Disaster
1,995,435
% matplotlib inline plt.style.use('seaborn-whitegrid' )<load_from_csv>
df_train['Age'] = df_train[['Age','Pclass']].apply(impute_age,axis=1 )
Titanic - Machine Learning from Disaster
1,995,435
test = pd.read_csv(".. /input/test.csv") print("shape of test data", test.shape) test.head()<filter>
sex = pd.get_dummies(df_train['Sex'],drop_first=True) embark = pd.get_dummies(df_train['Embarked'],drop_first=True) df_train = pd.concat([df_train,sex,embark],axis=1 )
Titanic - Machine Learning from Disaster
1,995,435
def select_within_test_boundary(df, BB): return(df.pickup_longitude >= BB[0])&(df.pickup_longitude <= BB[1])& \ (df.pickup_latitude >= BB[2])&(df.pickup_latitude <= BB[3])& \ (df.dropoff_longitude >= BB[0])&(df.dropoff_longitude <= BB[1])& \ (df.dropoff_latitude >= BB[2])&(df.dropoff_latitude <= BB[3] )<feature_engineering>
df_train["Family"] = df_train["SibSp"] + df_train["Parch"] + 1 df_train['Single'] = df_train['Family'].map(lambda s: 1 if s == 1 else 0) df_train['SmallF'] = df_train['Family'].map(lambda s: 1 if s == 2 else 0) df_train['MedF'] = df_train['Family'].map(lambda s: 1 if 3 <= s <= 4 else 0) df_train['LargeF'] = df_train['Family'].map(lambda s: 1 if s >= 5 else 0) df_train['Senior'] = df_train['Age'].map(lambda s:1 if s>60 else 0 )
Titanic - Machine Learning from Disaster
1,995,435
def prepare_time_features(df): df['pickup_datetime'] = df['pickup_datetime'].str.slice(0, 16) df['pickup_datetime'] = pd.to_datetime(df['pickup_datetime'], utc=True, format='%Y-%m-%d %H:%M') df['hour_of_day'] = df.pickup_datetime.dt.hour df['month'] = df.pickup_datetime.dt.month df["year"] = df.pickup_datetime.dt.year df["weekday"] = df.pickup_datetime.dt.weekday return df<feature_engineering>
dataset_title = [i.split(",")[1].split(".")[0].strip() for i in df_test["Name"]] df_test["Title"] = pd.Series(dataset_title) df_test["Title"] = df_test["Title"].replace(['Lady', 'the Countess','Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') df_test["Title"] = df_test["Title"].map({"Master":0, "Miss":1, "Ms" : 1 , "Mme":1, "Mlle":1, "Mrs":1, "Mr":2, "Rare":3}) df_test["Title"] = df_test["Title"].astype(int) df_test.drop(labels = ["Name"], axis = 1, inplace = True )
Titanic - Machine Learning from Disaster
1,995,435
train = prepare_time_features(train) test = prepare_time_features(test )<compute_test_metric>
df_test['Age'] = df_test[['Age','Pclass']].apply(impute_age,axis=1) sex = pd.get_dummies(df_test['Sex'],drop_first=True) embark = pd.get_dummies(df_test['Embarked'],drop_first=True) df_test = pd.concat([df_test,sex,embark],axis=1) df_test['Fare'].fillna(value=df_test['Fare'].median() ,inplace=True )
Titanic - Machine Learning from Disaster
1,995,435
def distance(lat1, lon1, lat2, lon2): p = 0.017453292519943295 a = 0.5 - np.cos(( lat2 - lat1)* p)/2 + np.cos(lat1 * p)* np.cos(lat2 * p)*(1 - np.cos(( lon2 - lon1)* p)) / 2 return 0.6213712 * 12742 * np.arcsin(np.sqrt(a))<feature_engineering>
df_test['Fare'] = df_test[['Fare','Pclass']].apply(impute_fare,axis=1 )
Titanic - Machine Learning from Disaster
1,995,435
train['distance_miles'] = distance(train.pickup_latitude, train.pickup_longitude, \ train.dropoff_latitude, train.dropoff_longitude )<compute_test_metric>
df_test["Fare"] = df_test["Fare"].map(lambda i: np.log(i)if i > 0 else 0 )
Titanic - Machine Learning from Disaster
1,995,435
test['distance_miles'] = distance(test.pickup_latitude, test.pickup_longitude, \ test.dropoff_latitude, test.dropoff_longitude )<categorify>
df_test["Family"] = df_test["SibSp"] + df_test["Parch"] + 1
Titanic - Machine Learning from Disaster
1,995,435
def transform(data): jfk =(-73.7781, 40.6413) ewr =(-74.1745, 40.6895) lgr =(-73.8740, 40.7769) data['pickup_distance_to_jfk'] = distance(jfk[1], jfk[0], data['pickup_latitude'], data['pickup_longitude']) data['dropoff_distance_to_jfk'] = distance(jfk[1], jfk[0], data['dropoff_latitude'], data['dropoff_longitude']) data['pickup_distance_to_ewr'] = distance(ewr[1], ewr[0], data['pickup_latitude'], data['pickup_longitude']) data['dropoff_distance_to_ewr'] = distance(ewr[1], ewr[0], data['dropoff_latitude'], data['dropoff_longitude']) data['pickup_distance_to_lgr'] = distance(lgr[1], lgr[0], data['pickup_latitude'], data['pickup_longitude']) data['dropoff_distance_to_lgr'] = distance(lgr[1], lgr[0], data['dropoff_latitude'], data['dropoff_longitude']) return data train = transform(train) test = transform(test )<drop_column>
df_test['Single'] = df_test['Family'].map(lambda s: 1 if s == 1 else 0) df_test['SmallF'] = df_test['Family'].map(lambda s: 1 if s == 2 else 0) df_test['MedF'] = df_test['Family'].map(lambda s: 1 if 3 <= s <= 4 else 0) df_test['LargeF'] = df_test['Family'].map(lambda s: 1 if s >= 5 else 0) df_test['Senior'] = df_test['Age'].map(lambda s:1 if s>60 else 0 )
Titanic - Machine Learning from Disaster
1,995,435
print("old size: %d" % len(train)) train = train.drop(index= train[train['fare_amount']==0].index, axis=0) print("New size: %d" % len(train))<drop_column>
df_train['Person'] = df_train[['Age','Sex']].apply(get_person,axis=1) df_test['Person'] = df_test[['Age','Sex']].apply(get_person,axis=1) person_dummies_train = pd.get_dummies(df_train['Person']) person_dummies_train.columns = ['Child','Female','Male'] person_dummies_train.drop(['Male'], axis=1, inplace=True) person_dummies_test = pd.get_dummies(df_test['Person']) person_dummies_test.columns = ['Child','Female','Male'] person_dummies_test.drop(['Male'], axis=1, inplace=True) df_train = df_train.join(person_dummies_train) df_test = df_test.join(person_dummies_test) df_train.drop(['Person'],axis=1,inplace=True) df_test.drop(['Person'],axis=1,inplace=True )
Titanic - Machine Learning from Disaster
1,995,435
print("old size: %d" % len(train)) train = train.drop(index= train[train['fare_amount'] < 2.5].index, axis=0) print("New size: %d" % len(train))<train_model>
df_train.drop('male',axis=1,inplace=True) df_test.drop('male',axis=1,inplace=True )
Titanic - Machine Learning from Disaster
1,995,435
print("old size: %d" % len(train)) train = train.drop(index= train[train.passenger_count >= 7].index, axis=0) print("New size: %d" % len(train))<count_values>
df_train.drop(['Cabin','Ticket'],axis = 1, inplace= True) df_test.drop(['Ticket','Cabin'],axis = 1, inplace= True )
Titanic - Machine Learning from Disaster
1,995,435
pd.cut(test['distance_miles'],np.linspace(0, 70, num = 8)).value_counts()<split>
df_train.drop(['Sex','Embarked'],axis=1,inplace=True) df_test.drop(['Sex','Embarked'],axis=1,inplace=True )
Titanic - Machine Learning from Disaster
1,995,435
X_train, X_test, y_train, y_test = train_test_split(df_train.drop('fare_amount', axis=1), df_train['fare_amount'], test_size=0.2, random_state = 42) print(X_train.shape) print(X_test.shape) print(y_train.shape) print(y_test.shape )<init_hyperparams>
X_train, X_test, y_train, y_test = train_test_split(df_train.drop('Survived',axis=1), df_train['Survived'], test_size=0.15, random_state=101 )
Titanic - Machine Learning from Disaster
1,995,435
params = { 'max_depth': 7, 'gamma' :0, 'eta':.03, 'subsample': 1, 'colsample_bytree': 0.9, 'objective':'reg:linear', 'eval_metric':'rmse', 'silent': 0 }<train_model>
dt = DecisionTreeClassifier() dt.fit(X_train,y_train); plt.figure(figsize=(18,18)) plot_tree(dt,filled=True);
Titanic - Machine Learning from Disaster
1,995,435
def XGBmodel(X_train,X_test,y_train,y_test,params): matrix_train = xgb.DMatrix(X_train,label=y_train) matrix_test = xgb.DMatrix(X_test,label=y_test) model=xgb.train(params=params, dtrain=matrix_train,num_boost_round=5000, early_stopping_rounds=10,evals=[(matrix_test,'test')]) return model model = XGBmodel(X_train,X_test,y_train,y_test,params )<predict_on_test>
XGB = XGBClassifier(max_depth=4,learning_rate=0.005,n_estimators=500,n_jobs=-1,min_child_weight=2) XGB.fit(X_train,y_train )
Titanic - Machine Learning from Disaster
1,995,435
prediction = model.predict(xgb.DMatrix(df_test), ntree_limit = model.best_ntree_limit ).tolist()<save_to_csv>
y_pred = pd.DataFrame(XGB.predict(df_test)) y_pred['Survived'] = y_pred[0] y_pred.drop(0,axis=1,inplace=True) y_pred['PassengerId'] = df_test['PassengerId'] y_pred_xgb = y_pred y_pred.to_csv('titanic_pred_xgb.csv',index=False )
Titanic - Machine Learning from Disaster
1,995,435
test = pd.read_csv(".. /input/test.csv") holdout = pd.DataFrame({'key': test['key'], 'fare_amount': prediction}) holdout.to_csv('xgb_with4mtrainedrows.csv', index=False )<load_from_csv>
Scaler1 = StandardScaler() Scaler2 = StandardScaler() X_train_scaled = Scaler1.fit_transform(X_train) df_test_scaled = Scaler2.fit_transform(df_test )
Titanic - Machine Learning from Disaster
1,995,435
<import_modules>
logmodel = LogisticRegression(C=10 ).fit(X_train,y_train) y_pred = pd.DataFrame(logmodel.predict(df_test)) y_pred['Survived'] = y_pred[0] y_pred.drop(0,axis=1,inplace=True) y_pred['PassengerId'] = df_test['PassengerId'] y_pred_lr = y_pred y_pred.to_csv('titanic_pred_logistic.csv',index=False )
Titanic - Machine Learning from Disaster
1,995,435
<load_from_csv>
RFC = RandomForestClassifier(n_estimators=500,max_depth=9,min_samples_split=3) RFC.fit(X_train,y_train )
Titanic - Machine Learning from Disaster
1,995,435
dtypes = {'fare_amount': 'float32', 'pickup_datetime': 'str', 'pickup_longitude': 'float32', 'pickup_latitude': 'float32', 'dropoff_longitude': 'float32', 'dropoff_latitude': 'float32', 'passenger_count': 'uint8'} val_size = 10_000 input_path = '.. /input/train.csv' val_df = pd.read_csv(input_path, usecols=dtypes.keys() , dtype=dtypes, nrows=val_size) val_df = remove_outliers(val_df) val_df = extract_features(val_df) X_val = val_df.drop(columns='fare_amount') y_val = val_df[['fare_amount']] dval = xgb.DMatrix(X_val, y_val, feature_names=X_val.columns) batch_size = 10_000_000 columns = pd.read_csv(input_path, nrows=0 ).columns train_df = pd.read_csv(input_path, usecols=dtypes.keys() , dtype=dtypes, names=columns, skiprows=val_size + 1, chunksize=batch_size) val_df.dtypes<prepare_x_and_y>
y_pred = pd.DataFrame(RFC.predict(df_test)) y_pred['Survived'] = y_pred[0] y_pred.drop(0,axis=1,inplace=True) y_pred['PassengerId'] = df_test['PassengerId'] y_pred_rf = y_pred y_pred.to_csv('titanic_pred_rfc.csv',index=False )
Titanic - Machine Learning from Disaster
1,995,435
params = {'learning_rate': 0.05, 'max_depth': 7, 'objective': 'reg:linear', 'eval_metric': 'rmse', 'subsample': 0.8, 'gamma': 1, 'silent': True, 'verbose_eval': True} num_rounds = 100 model = None for batch_df in tqdm(train_df): batch_df = remove_outliers(batch_df) batch_df = extract_features(batch_df) X_train = batch_df.drop(columns='fare_amount') y_train = batch_df[['fare_amount']] dtrain = xgb.DMatrix(X_train, y_train, feature_names=X_train.columns) model = xgb.train(params, dtrain, num_rounds, early_stopping_rounds=5, evals=[(dtrain, 'train'),(dval, 'eval')], xgb_model=model )<load_from_csv>
lgb = LGBMClassifier(learning_rate=0.01,max_depth=5,n_estimators=500,num_leaves=3 ).fit(X_train,y_train )
Titanic - Machine Learning from Disaster
1,995,435
test_df = pd.read_csv('.. /input/test.csv') test_df = extract_features(test_df) test_df.dtypes<save_to_csv>
y_pred = pd.DataFrame(lgb.predict(df_test)) y_pred['Survived'] = y_pred[0] y_pred.drop(0,axis=1,inplace=True) y_pred['PassengerId'] = df_test['PassengerId'] y_pred_lgb = y_pred y_pred.to_csv('titanic_pred_lgb.csv',index=False )
Titanic - Machine Learning from Disaster
1,995,435
X_test = test_df.drop(columns='key') dtest = xgb.DMatrix(X_test, feature_names=X_test.columns) y_pred = model.predict(dtest) submission = pd.DataFrame({'key': test_df['key'], 'fare_amount': y_pred}) submission.to_csv('submission.csv', index = False) !head submission.csv<import_modules>
print("XGB train score: ",round(XGB.score(X_train,y_train),2), " XGB test score: ",round(XGB.score(X_test,y_test),2)) print("Log-Reg.train score: ",round(logmodel.score(X_train,y_train),2)," Log-Reg.test score: ",round(logmodel.score(X_test,y_test),2)) print("Random Forest's train score: ",round(RFC.score(X_train,y_train),2), " Random Forest test score: ",round(RFC.score(X_test,y_test),2)) print("LGBM train score: ",round(lgb.score(X_train,y_train),2), " LGB Model test score: ",round(lgb.score(X_test,y_test),2))
Titanic - Machine Learning from Disaster
1,995,435
tqdm.pandas() <set_options>
y_valid_xgb = XGB.predict(X_test) y_valid_log = logmodel.predict(X_test) y_valid_rfc = RFC.predict(X_test) y_valid_lgb = lgb.predict(X_test )
Titanic - Machine Learning from Disaster
1,995,435
%matplotlib inline %precision 4 warnings.filterwarnings('ignore') plt.style.use('ggplot') np.set_printoptions(suppress=True) pd.set_option("display.precision", 15 )<load_from_csv>
fpr_xgb, tpr_xgb, thresholds_xgb = roc_curve(y_test, y_valid_xgb) roc_auc_xgb = auc(fpr_xgb, tpr_xgb) fpr_log, tpr_log, thresholds_log = roc_curve(y_test, y_valid_log) roc_auc_log = auc(fpr_log, tpr_log) fpr_rfc, tpr_rfc, thresholds_rfc = roc_curve(y_test, y_valid_rfc) roc_auc_rfc = auc(fpr_rfc, tpr_rfc) fpr_lgb, tpr_lgb, thresholds_lgb = roc_curve(y_test, y_valid_lgb) roc_auc_lgb = auc(fpr_lgb, tpr_lgb )
Titanic - Machine Learning from Disaster
1,995,435
df_train = pd.read_csv('.. /input/santander-customer-transaction-prediction/train.csv', index_col=0) y_train = df_train.pop('target') len_train = len(df_train) df_test = pd.read_csv('./.. /input/santander-customer-transaction-prediction/test.csv', index_col=0) df_all = pd.concat(( df_train, df_test), sort=False) prev_cols = df_all.columns scaler = StandardScaler() df_all[prev_cols] = scaler.fit_transform(df_all[prev_cols]) df_train = df_all[0:len_train] df_test = df_all[len_train:] <randomize_order>
y_pred_final = y_pred y_pred_final['Survived'] = round(0.25 * y_pred_lgb['Survived'] + 0.25 * y_pred_rf['Survived'] + 0.25 * y_pred_xgb['Survived'] + 0.25 * y_pred_lr['Survived']) y_pred_final['PassengerId'] = df_test['PassengerId'] y_pred_final['Survived'] = y_pred_final['Survived'].astype(int) y_pred_final.to_csv('titanic_pred_final.csv',index=False )
Titanic - Machine Learning from Disaster
1,995,435
def augment_train(df_train, y_train): t0 = df_train[y_train == 0].copy() t1 = df_train[y_train == 1].copy() i = 0 N = 3 for I in range(0): for col in df_train.columns: i = i + 1000 np.random.seed(i) np.random.shuffle(t0[col].values) np.random.shuffle(t1[col].values) df_train = pd.concat([df_train, t0.copy() ]) df_train = pd.concat([df_train, t1.copy() ]) y_train = pd.concat([y_train, pd.Series([0] * t0.shape[0]), pd.Series([1] * t1.shape[0])]) return df_train, y_train<count_unique_values>
y_pred_final['Survived'].value_counts()
Titanic - Machine Learning from Disaster
1,995,435
features = [c for c in df_train.columns if c not in ["ID_code","target"]] def detect_test(test_df): df_test=test_df.values unique_count = np.zeros_like(df_test) for feature in tqdm(range(df_test.shape[1])) : _, index_, count_ = np.unique(df_test[:, feature], return_counts=True, return_index=True) unique_count[index_[count_ == 1], feature] += 1 real_samples_indexes = np.argwhere(np.sum(unique_count, axis=1)> 0)[:, 0] synthetic_samples_indexes = np.argwhere(np.sum(unique_count, axis=1)== 0)[:, 0] return real_samples_indexes,synthetic_samples_indexes def generate_fe(trn, tst): real,syn = detect_test(df_test[features]) al = pd.concat([trn,tst,df_test.iloc[real]],axis=0) trn_fe = pd.DataFrame() tst_fe = pd.DataFrame() for c in features: trn[c+"_test"]=trn[c].map(al[c].value_counts()) trn[c+"_multi"] = trn[c+"_test"]*trn[c] trn_fe[c] = trn[c] trn_fe[c+"_test"] = trn[c+"_test"] trn_fe[c+"_muti"] = trn[c+"_multi"] tst[c+"_test"]=tst[c].map(al[c].value_counts()) tst_fe[c] = tst[c] tst[c+"_multi"] = tst[c+"_test"]*tst[c] tst_fe[c+"_test"] = tst[c+"_test"] tst_fe[c+"_muti"] = tst[c+"_multi"] return trn_fe, tst_fe <feature_engineering>
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from collections import Counter
Titanic - Machine Learning from Disaster
1,995,435
def generate_fe_test(tst): re,sy = detect_test(tst[features]) al = pd.concat([df_train,df_test.iloc[re]],axis=0) tst_fe = pd.DataFrame() for c in features: tst[c+"_test"]=tst[c].map(al[c].value_counts()) tst_fe[c] = tst[c] tst[c+"_multi"] = tst[c+"_test"]*tst[c] tst_fe[c+"_test"] = tst[c+"_test"] tst_fe[c+"_muti"] = tst[c+"_multi"] return tst_fe test_fe = generate_fe_test(df_test[features] )<train_model>
df_train = pd.read_csv('.. /input/train.csv') df_test = pd.read_csv('.. /input/test.csv') df_sample= pd.read_csv('.. /input/gender_submission.csv' )
Titanic - Machine Learning from Disaster
1,995,435
class Logger(callbacks.Callback): def __init__(self, out_path='./', patience=30, lr_patience=3, out_fn='', log_fn=''): self.auc = 0 self.path = out_path self.fn = out_fn self.patience = patience self.lr_patience = lr_patience self.no_improve = 0 self.no_improve_lr = 0 def on_train_begin(self, logs={}): return def on_train_end(self, logs={}): return def on_epoch_begin(self, epoch, logs={}): return def on_batch_begin(self, batch, logs={}): return def on_batch_end(self, batch, logs={}): return def on_epoch_end(self, epoch, logs={}): cv_pred = self.model.predict(self.validation_data[0], batch_size=1024) cv_true = self.validation_data[1] auc_val = roc_auc_score(cv_true, cv_pred) if self.auc < auc_val: self.no_improve = 0 self.no_improve_lr = 0 print("Epoch %s - best AUC: %s" %(epoch, round(auc_val, 4))) self.auc = auc_val self.model.save(self.path + self.fn, overwrite=True) else: self.no_improve += 1 self.no_improve_lr += 1 print("Epoch %s - current AUC: %s" %(epoch, round(auc_val, 4))) if self.no_improve >= self.patience: self.model.stop_training = True if self.no_improve_lr >= self.lr_patience: lr = float(K.get_value(self.model.optimizer.lr)) K.set_value(self.model.optimizer.lr, 0.75*lr) print("Setting lr to {}".format(0.75*lr)) self.no_improve_lr = 0 return<prepare_x_and_y>
def detect_outliers(df,n,features): outlier_indices = [] for col in features: Q1 = np.percentile(df[col], 25) Q3 = np.percentile(df[col],75) IQR = Q3 - Q1 outlier_step = 1.5 * IQR outlier_list_col = df[(df[col] < Q1 - outlier_step)|(df[col] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list(k for k, v in outlier_indices.items() if v > n) return multiple_outliers
Titanic - Machine Learning from Disaster
1,995,435
preds = [] c = 0 oof_preds = np.zeros(( len(df_train), 1)) cv = StratifiedKFold(n_splits=5,shuffle=True, random_state=3263) for train, valid in cv.split(df_train, y_train): print("VAL %s" % c) trn = df_train.iloc[train] tst = df_train.iloc[valid] trn, tst = generate_fe(trn, tst) X_train = np.reshape(trn.values,(-1,200,3)) y_train_ = y_train.iloc[train].values X_valid = np.reshape(tst.values,(-1,200,3)) y_valid = y_train.iloc[valid].values model = _Model() logger = Logger(patience=30, out_path='./', out_fn='cv_{}.h5'.format(c)) model.fit(X_train, y_train_, validation_data=(X_valid, y_valid), epochs=150, verbose=2, batch_size=1024, callbacks=[logger]) model.load_weights('cv_{}.h5'.format(c)) fe = [c for c in test_fe.columns if c not in ["ID_code","target"]] X_test = np.reshape(test_fe[fe].values,(200000, 200, 3)) curr_preds = model.predict(X_test, batch_size=2048) oof_preds[valid] = model.predict(X_valid) preds.append(curr_preds) c += 1 pd.DataFrame(oof_preds ).to_csv("NN_oof_preds.csv", index = False) auc = roc_auc_score(y_train, oof_preds) print("CV_AUC: {}".format(auc)) preds = np.asarray(preds) preds = preds.reshape(( 5, 200000)) preds_final = np.mean(preds.T, axis=1) submission = pd.read_csv('./.. /input/santander-customer-transaction-prediction/sample_submission.csv') submission['target'] = preds_final submission.to_csv('submission.csv', index=False) <set_options>
Outliers_to_drop = detect_outliers(df_train,2,["Age","SibSp","Parch","Fare"]) df_train = df_train.drop(Outliers_to_drop, axis = 0 ).reset_index(drop=True )
Titanic - Machine Learning from Disaster
1,995,435
pd.options.display.max_rows = 200 pd.options.display.max_columns = 200 sns.set(style="darkgrid") warnings.filterwarnings('ignore') SEED = 42<load_from_csv>
dataset_title = [i.split(",")[1].split(".")[0].strip() for i in df_train["Name"]] df_train["Title"] = pd.Series(dataset_title) df_train["Title"] = df_train["Title"].replace(['Lady', 'the Countess','Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') df_train["Title"] = df_train["Title"].map({"Master":0, "Miss":1, "Ms" : 1 , "Mme":1, "Mlle":1, "Mrs":1, "Mr":2, "Rare":3}) df_train["Title"] = df_train["Title"].astype(int) df_train.drop(labels = ["Name"], axis = 1, inplace = True )
Titanic - Machine Learning from Disaster