kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
9,401,023
train_df= train_df.drop(train_df[train_df['fare_amount']<0].index, axis = 0) train_df.shape<count_values>
all_data['Age'].fillna(all_data.groupby(['title'])['Age'].transform('median'), inplace = True )
Titanic - Machine Learning from Disaster
9,401,023
Counter(train_df['passenger_count']>6 )<drop_column>
def splitAgeColumns(x): if x <= 16.0: return 0 elif x > 16.0 and x <= 26.0: return 1 elif x > 26.0 and x <= 36.0: return 2 elif x > 36.0 and x <= 46.0: return 3 else: return 4
Titanic - Machine Learning from Disaster
9,401,023
train_df= train_df.drop(train_df[train_df['passenger_count']>6].index, axis = 0) train_df.shape<count_values>
all_data['Age'] = all_data['Age'].apply(lambda x : splitAgeColumns(x))
Titanic - Machine Learning from Disaster
9,401,023
Counter(train_df['pickup_latitude']<-90 )<count_values>
all_data['calculate_fare'] = all_data['Fare'] /(all_data['Parch'] + all_data['SibSp'] + 1 )
Titanic - Machine Learning from Disaster
9,401,023
Counter(train_df['pickup_latitude']>90 )<drop_column>
def splitFareColumns(x): if x <= 5.0: return 0 elif x > 5.0 and x <= 10.0: return 1 elif x > 10.0 and x <= 15.0: return 2 elif x > 15.0 and x <= 20.0: return 3 elif x > 20.0 and x <= 25.0: return 4 else: return 5
Titanic - Machine Learning from Disaster
9,401,023
train_df = train_df.drop(((train_df[train_df['pickup_latitude']<-90])|(train_df[train_df['pickup_latitude']>90])).index, axis=0 )<count_values>
all_data['calculate_fare'] = all_data['calculate_fare'].apply(lambda x : splitFareColumns(x))
Titanic - Machine Learning from Disaster
9,401,023
Counter(train_df['pickup_longitude']<-180 )<count_values>
def splitSibSpColumns(x): if x <= 0.0: return 0 elif x > 0.0 and x <= 1.0: return 1 else: return 2 def splitParchColumns(x): if x <= 0.0: return 0 elif x > 0.0 and x <= 2.0: return 1 else: return 2
Titanic - Machine Learning from Disaster
9,401,023
Counter(train_df['pickup_longitude']>180 )<drop_column>
all_data['SibSp'] = all_data['SibSp'].apply(lambda x : splitSibSpColumns(x)) all_data['Parch'] = all_data['Parch'].apply(lambda x : splitParchColumns(x))
Titanic - Machine Learning from Disaster
9,401,023
train_df = train_df.drop(( train_df[train_df['pickup_longitude']<-180] ).index, axis=0 )<data_type_conversions>
data = [train, test] for dataset in data: dataset['pclass_fare'] = dataset['Fare'] * dataset['Pclass']
Titanic - Machine Learning from Disaster
9,401,023
train_df['key']=pd.to_datetime(train_df['key']) train_df['pickup_datetime']=pd.to_datetime(train_df['pickup_datetime'] )<data_type_conversions>
percent_null_value(train )
Titanic - Machine Learning from Disaster
9,401,023
test_df['key']=pd.to_datetime(test_df['key']) test_df['pickup_datetime']=pd.to_datetime(test_df['pickup_datetime'] )<feature_engineering>
df = all_data.drop(['Name', 'Ticket', 'Survived', 'Fare'], axis = 1 )
Titanic - Machine Learning from Disaster
9,401,023
data=[train_df,test_df] for i in data: i['date']=i['pickup_datetime'].dt.day i['month']=i['pickup_datetime'].dt.month i['day_of_week']=i['pickup_datetime'].dt.dayofweek i['hour']=i['pickup_datetime'].dt.hour i['year']=i['pickup_datetime'].dt.year <compute_test_metric>
mapping = {'male' : 0, 'female' : 1} df['Sex'] = df['Sex'].map(mapping )
Titanic - Machine Learning from Disaster
9,401,023
def sphere_distance(lat1,long1,lat2,long2): data=[train_df,test_df] for i in data: R=6367 phi1 = np.radians(i[lat1]) phi2 = np.radians(i[lat2]) delta_phi = np.radians(i[lat2]-i[lat1]) delta_lambda = np.radians(i[long2]-i[long1]) a = np.sin(delta_phi / 2.0)** 2 + np.cos(phi1)* np.cos(phi2)* np.sin(delta_lambda / 2.0)** 2 c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1-a)) d =(R * c) i['S_Distance'] = d return d<sort_values>
def getOneHotEncode(df, col): cat = pd.get_dummies(df[col], prefix = col) df = pd.concat([df, cat], axis = 1) df.drop([col], axis = 1, inplace = True) return df
Titanic - Machine Learning from Disaster
9,401,023
train_df.sort_values(['S_Distance','fare_amount'], ascending=False )<filter>
one_hot_columns = ['Pclass', 'Cabin', 'Embarked', 'title']
Titanic - Machine Learning from Disaster
9,401,023
dis_0 = train_df.loc[(train_df['S_Distance'] == 0), ['S_Distance']] dis_1 = train_df.loc[(train_df['S_Distance'] > 0)&(train_df['S_Distance'] <= 10), ['S_Distance']] dis_2 = train_df.loc[(train_df['S_Distance'] > 10)&(train_df['S_Distance'] <= 50), ['S_Distance']] dis_3 = train_df.loc[(train_df['S_Distance'] > 50)&(train_df['S_Distance'] <= 100), ['S_Distance']] dis_4 = train_df.loc[(train_df['S_Distance'] > 100)&(train_df['S_Distance'] <= 200), ['S_Distance']] dis_5 = train_df.loc[(train_df['S_Distance'] > 200)&(train_df['S_Distance'] <= 300), ['S_Distance']] dis_6 = train_df.loc[(train_df['S_Distance'] > 300)&(train_df['S_Distance'] <= 500), ['S_Distance']] dis_7 = train_df.loc[(train_df['S_Distance'] > 500), ['S_Distance']] dis_0['bins']='0' dis_1['bins']='0-10' dis_2['bins']='11-50' dis_3['bins']='51-100' dis_4['bins']='101-200' dis_5['bins']='201-300' dis_6['bins']='301-500' dis_7['bins']='>500' dis_bin=pd.concat([dis_0,dis_1,dis_2,dis_3,dis_4,dis_5,dis_6,dis_7]) dis_bin<count_values>
df = df.sort_values('PassengerId' )
Titanic - Machine Learning from Disaster
9,401,023
x=Counter(dis_bin['bins']) x<filter>
mapping = {'S' : 0, 'C' : 1, 'Q' : 2} df['Embarked'] = df['Embarked'].map(mapping )
Titanic - Machine Learning from Disaster
9,401,023
train_df.loc[(( train_df['pickup_latitude']==0)&(train_df['pickup_longitude']==0)) &(( train_df['dropoff_latitude']!=0)&(train_df['dropoff_longitude']!=0)) &(train_df['fare_amount']==0)]<filter>
from sklearn.model_selection import train_test_split, cross_val_score from sklearn.metrics import accuracy_score from sklearn.svm import SVC from xgboost import XGBClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from catboost import CatBoostClassifier
Titanic - Machine Learning from Disaster
9,401,023
train_df.loc[(( train_df['pickup_latitude']==0)&(train_df['pickup_longitude']==0)) &(( train_df['dropoff_latitude']!=0)&(train_df['dropoff_longitude']!=0)) &(train_df['fare_amount']==0)]<drop_column>
train, test = df[df.PassengerId <= 891], df[df.PassengerId > 891] train, test = train.drop(['PassengerId'], axis = 1), test.drop(['PassengerId'], axis = 1) X_train, X_val, Y_train, Y_val = train_test_split(train, survived, test_size = 0.2, shuffle = True, random_state = 1 )
Titanic - Machine Learning from Disaster
9,401,023
train_df = train_df.drop(train_df.loc[(( train_df['pickup_latitude']==0)&(train_df['pickup_longitude']==0)) &(( train_df['dropoff_latitude']!=0)&(train_df['dropoff_longitude']!=0)) &(train_df['fare_amount']==0)].index, axis=0 )<drop_column>
clf = SVC(kernel = 'linear') cross_val_score(clf, train, survived, cv = 5 )
Titanic - Machine Learning from Disaster
9,401,023
train_df = train_df.drop(train_df.loc[(( train_df['pickup_latitude']==0)&(train_df['pickup_longitude']==0)) &(( train_df['dropoff_latitude']!=0)&(train_df['dropoff_longitude']!=0)) &(train_df['fare_amount']==0)].index, axis=0) <filter>
clf = XGBClassifier() cross_val_score(clf, train, survived, cv = 5 )
Titanic - Machine Learning from Disaster
9,401,023
high_distance = train_df.loc[(train_df['S_Distance']>200)&(train_df['fare_amount']!=0)]<feature_engineering>
clf = DecisionTreeClassifier() cross_val_score(clf, train, survived, cv = 5 )
Titanic - Machine Learning from Disaster
9,401,023
high_distance['S_Distance'] = high_distance.apply( lambda row:(row['fare_amount'] - 2.50)/1.56, axis=1 )<filter>
clf = RandomForestClassifier(n_estimators=400) cross_val_score(clf, train, survived, cv = 5 )
Titanic - Machine Learning from Disaster
9,401,023
train_df[train_df['S_Distance']==0]<filter>
clf = CatBoostClassifier(verbose = False) cross_val_score(clf, train, survived, cv = 5 )
Titanic - Machine Learning from Disaster
9,401,023
train_df[(train_df['S_Distance']==0)&(train_df['fare_amount']==0)]<drop_column>
clf = SVC() clf.fit(X_train, Y_train )
Titanic - Machine Learning from Disaster
9,401,023
train_df = train_df.drop(train_df[(train_df['S_Distance']==0)&(train_df['fare_amount']==0)].index, axis = 0 )<define_variables>
y_train_pred = clf.predict(X_train) y_val_pred = clf.predict(X_val) print('train accuracy: {}%'.format(accuracy_score(y_train_pred, Y_train))) print('validation accuracy: {}%'.format(accuracy_score(y_val_pred, Y_val)) )
Titanic - Machine Learning from Disaster
9,401,023
rush_hour = train_df.loc[(((train_df['hour']>=6)&(train_df['hour']<=20)) &(( train_df['day_of_week']>=1)&(train_df['day_of_week']<=5)) &(train_df['S_Distance']==0)&(train_df['fare_amount'] < 2.5)) ] rush_hour<drop_column>
y_test = clf.predict(test) result = pd.read_csv('.. /input/titanic/gender_submission.csv') result['Survived'] = y_test result.to_csv('submission.csv', index = False )
Titanic - Machine Learning from Disaster
9,408,731
train_df=train_df.drop(rush_hour.index,axis=0 )<filter>
train_X = pd.read_csv(".. /input/titanic/train.csv") test_y = pd.read_csv(".. /input/titanic/test.csv") print("Test and Train FILES are loaded") train_X.columns
Titanic - Machine Learning from Disaster
9,408,731
non_rush_hour = train_df.loc[(((train_df['hour']<6)|(train_df['hour']>20)) &(( train_df['day_of_week']>=1)&(train_df['day_of_week']<=5)) &(train_df['S_Distance']==0)&(train_df['fare_amount'] < 3.0)) ]<filter>
features = ["Pclass", "Sex", "SibSp", "Parch"] train_y = train_X['Survived'] train_f= train_X[features] le = LabelEncoder() train_f['Sex']=le.fit_transform(train_f['Sex']) test_y['Sex'] = le.fit_transform(test_y['Sex'])
Titanic - Machine Learning from Disaster
9,408,731
non_rush_hour = train_df.loc[(((train_df['hour']<6)|(train_df['hour']>20)) &(( train_df['day_of_week']>=1)&(train_df['day_of_week']<=5)) &(train_df['S_Distance']==0)&(train_df['fare_amount'] < 3.0)) ] non_rush_hour<feature_engineering>
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1) model.fit(train_f,train_y) predictions = model.predict(test_y[features]) predictions
Titanic - Machine Learning from Disaster
9,408,731
train_df.loc[(train_df['S_Distance']!=0)&(train_df['fare_amount']==0)]<filter>
output = pd.DataFrame({"PassengerId" :test_y['PassengerId'] , 'Survived':predictions}) output.to_csv('submission1.csv',index=False) print("Your submission was successfully saved!" )
Titanic - Machine Learning from Disaster
9,101,854
scenario_3 = train_df.loc[(train_df['S_Distance']!=0)&(train_df['fare_amount']==0)] scenario_3<filter>
titanic=pd.read_csv('/kaggle/input/titanic/train.csv') titanic.head()
Titanic - Machine Learning from Disaster
9,101,854
scenario_3 = train_df.loc[(train_df['S_Distance']!=0)&(train_df['fare_amount']==0)]<feature_engineering>
titanic_dummies=pd.get_dummies(data=titanic,columns=['Sex','Embarked'],drop_first=True) titanic_dummies.info()
Titanic - Machine Learning from Disaster
9,101,854
scenario_3['fare_amount'] = scenario_3.apply( lambda row:(( row['S_Distance'] * 1.56)+ 2.50), axis=1 )<filter>
X=titanic_dummies.loc[:,['Sex_male','SibSp','Parch','Pclass','Fare','Embarked_S','Embarked_Q','Age','Fare']] y=titanic_dummies['Survived'] X
Titanic - Machine Learning from Disaster
9,101,854
train_df.loc[(train_df['S_Distance']==0)&(train_df['fare_amount']!=0)]<filter>
dt=RandomForestClassifier() X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.3,random_state=42) dt.fit(X_train,y_train )
Titanic - Machine Learning from Disaster
9,101,854
scenario_4 = train_df.loc[(train_df['S_Distance']==0)&(train_df['fare_amount']!=0)]<filter>
dt.score(X_test,y_test) y_pred=dt.predict(X_test )
Titanic - Machine Learning from Disaster
9,101,854
scenario_4.loc[(scenario_4['fare_amount']<=3.0)&(scenario_4['S_Distance']==0)]<filter>
confusion_matrix(y_test,y_pred )
Titanic - Machine Learning from Disaster
9,101,854
scenario_4.loc[(scenario_4['fare_amount']>3.0)&(scenario_4['S_Distance']==0)]<filter>
test=pd.read_csv('/kaggle/input/titanic/test.csv') test_dummies=pd.get_dummies(data=test,columns=['Sex','Embarked'],drop_first=True) test_dummies['Fare'].fillna(test_dummies['Fare'].dropna().median() ,inplace=True) test_dummies['Age'].fillna(test_dummies['Age'].dropna().median() ,inplace=True) test_dummies.info()
Titanic - Machine Learning from Disaster
9,101,854
scenario_4_sub = scenario_4.loc[(scenario_4['fare_amount']>3.0)&(scenario_4['S_Distance']==0)]<feature_engineering>
X_testf=test_dummies.loc[:,['Sex_male','SibSp','Parch','Pclass','Fare','Embarked_S','Embarked_Q','Age','Fare']] predictions=dt.predict(X_testf )
Titanic - Machine Learning from Disaster
9,101,854
scenario_4_sub['S_Distance'] = scenario_4_sub.apply( lambda row:(( row['fare_amount']-2.50)/1.56), axis=1 )<drop_column>
Id=test_dummies['PassengerId'] sub_df=pd.DataFrame({'PassengerId':Id,'Survived':predictions}) sub_df.head()
Titanic - Machine Learning from Disaster
9,101,854
train_df = train_df.drop(['key','pickup_datetime'], axis = 1) test_df = test_df.drop(['key','pickup_datetime'], axis = 1 )<prepare_x_and_y>
sub_df.to_csv('submission.csv',index=False )
Titanic - Machine Learning from Disaster
9,121,810
x_train = train_df.iloc[:,train_df.columns!='fare_amount'] y_train = train_df['fare_amount'].values x_test = test_df<train_model>
!pip install pycaret==1.0
Titanic - Machine Learning from Disaster
9,121,810
rg=RandomForestRegressor() rg.fit(x_train,y_train) y_predict=rg.predict(x_test) y_predict<save_to_csv>
import os import random import numpy as np import pandas as pd
Titanic - Machine Learning from Disaster
9,121,810
submission = pd.read_csv('.. /input/sample_submission.csv') submission['fare_amount'] = y_predict submission.to_csv('submission_1.csv', index=False) submission.head(10 )<import_modules>
def random_seed_initialize(seed=42): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed )
Titanic - Machine Learning from Disaster
9,121,810
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns<load_from_csv>
random_seed_initialize(777 )
Titanic - Machine Learning from Disaster
9,121,810
train_df = pd.read_csv('.. /input/train.csv', nrows = 5_000_000 )<compute_test_metric>
train_data = pd.read_csv('.. /input/titanic/train.csv') test_data = pd.read_csv('.. /input/titanic/test.csv') submission_data = pd.read_csv('.. /input/titanic/gender_submission.csv' )
Titanic - Machine Learning from Disaster
9,121,810
def haversine_np(lon1, lat1, lon2, lat2): lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2]) dlon = lon2 - lon1 dlat = lat2 - lat1 a = np.sin(dlat/2.0)**2 + np.cos(lat1)* np.cos(lat2)* np.sin(dlon/2.0)**2 c = 2 * np.arcsin(np.sqrt(a)) km = 6367 * c return km<feature_engineering>
from pycaret.classification import *
Titanic - Machine Learning from Disaster
9,121,810
train_df['distance'] = haversine_np(train_df['pickup_longitude'], train_df['pickup_latitude'], train_df['dropoff_longitude'], train_df['dropoff_latitude'] )<data_type_conversions>
exp = setup(data=train_data, target='Survived', ignore_features = ['PassengerId', 'Name'], silent=True, session_id=42 )
Titanic - Machine Learning from Disaster
9,121,810
train_df['pickup_datetime'] = pd.to_datetime(train_df['pickup_datetime'] )<feature_engineering>
fold_start_number = 2 fold_end_number = 30 + 1 except_count = 0 for i in range(fold_start_number, fold_end_number, 1): try: blend_models(fold=i, verbose=False) except: except_count += 1 save_experiment('TitanicBlendModelsExperiment') experiment = load_experiment('TitanicBlendModelsExperiment' )
Titanic - Machine Learning from Disaster
9,121,810
train_df['year'] = train_df['pickup_datetime'].dt.year train_df['month'] = train_df['pickup_datetime'].dt.month train_df['day'] = train_df['pickup_datetime'].dt.day train_df['hour'] = train_df['pickup_datetime'].dt.hour train_df['minute'] = train_df['pickup_datetime'].dt.minute<train_model>
accuracy = [] best_fold_number = 0 best_accuracy = 0 best_model = None for i in range(fold_end_number - fold_start_number - except_count, 0, -1): fold_num = len(experiment[-i*2+1]['Accuracy'])- 2 accuracy_mean = experiment[-i*2+1]['Accuracy']['Mean'] model = experiment[-i*2] if best_accuracy < accuracy_mean: best_fold_number = fold_num best_accuracy = accuracy_mean best_model = model accuracy.append([fold_num, accuracy_mean]) print('best fold number:' + str(best_fold_number)) print('best accuracy:' + str(best_accuracy))
Titanic - Machine Learning from Disaster
9,121,810
print('Old size: %d' % len(train_df)) train_df = train_df.dropna(how = 'any', axis = 'rows') print('New size: %d' % len(train_df))<feature_engineering>
predictions = predict_model(best_model, data=test_data) predictions.head()
Titanic - Machine Learning from Disaster
9,121,810
cond = True for col in {'pickup_latitude', 'pickup_longitude', 'dropoff_latitude', 'dropoff_longitude'}: cond &= abs(train_df[col] - train_df[col].mean())< 5 <feature_engineering>
submission_data['Survived'] = round(predictions['Label'] ).astype(int) submission_data.to_csv('submission.csv',index=False) submission_data.head()
Titanic - Machine Learning from Disaster
14,245,867
for col in {'pickup_latitude', 'pickup_longitude', 'dropoff_latitude', 'dropoff_longitude'}: train_df['rough' + col] = train_df[col].round(2 )<groupby>
train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv' )
Titanic - Machine Learning from Disaster
14,245,867
a=train_df.groupby(['roughpickup_latitude','roughpickup_longitude'])[['pickup_latitude', 'pickup_longitude']].agg(['mean','count'] )<sort_values>
seed = 42
Titanic - Machine Learning from Disaster
14,245,867
a.columns = ['mean_pickup_latitude','c1','mean_pickup_longitude','c2'] a.head() a.sort_values(['c1','c2'],ascending=False ).head() <groupby>
train = train.drop('Cabin',axis = 1) train = train.drop('Name',axis = 1) train = train.drop('Ticket',axis = 1) train = train.drop('PassengerId',axis = 1) train = train.reset_index(drop=True )
Titanic - Machine Learning from Disaster
14,245,867
b=train_df.groupby(['roughdropoff_latitude','roughdropoff_longitude'])[['dropoff_latitude', 'dropoff_longitude']].agg(['mean','count'] )<sort_values>
for col in train.columns: if train[col].dtype == 'O': train[col] = pd.get_dummies(train[col]) train.head()
Titanic - Machine Learning from Disaster
14,245,867
b.columns = ['mean_dropoff_latitude','c1','mean_dropoff_longitude','c2'] b.head() b.sort_values(['c1','c2'],ascending=False ).head(n=20) <sort_values>
train = train.fillna(-9999 )
Titanic - Machine Learning from Disaster
14,245,867
b[ b['mean_dropoff_latitude']<40.7].sort_values(['c1','c2'],ascending=False ).head() <rename_columns>
X = train.iloc[:,1:] y = train.iloc[:,0] x_train, x_test,y_train, y_test = train_test_split(X,y,test_size = 0.2,random_state = seed )
Titanic - Machine Learning from Disaster
14,245,867
a = a[['c1']].reset_index().rename(columns={'c1':'pickup_busyness'}) b = b[['c1']].reset_index().rename(columns={'c1':'dropoff_busyness'} )<merge>
import xgboost as xgb import optuna
Titanic - Machine Learning from Disaster
14,245,867
train_df = pd.merge(train_df,a, how='left') train_df = pd.merge(train_df,b, how='left' )<prepare_x_and_y>
def objective(trial): dtrain = xgb.DMatrix(x_train,label = y_train) dtest = xgb.DMatrix(x_test,label = y_test) num_round = 1000 param = { "eta": trial.suggest_float('eta',1e-3, 0.3), "objective": "binary:logistic", "eva_metric":'auc', "max_depth": trial.suggest_int('max_depth',4,16), "subsample": trial.suggest_float('subsample',0.5,1), "lambda":trial.suggest_float('lambda',1,3), 'feature_fraction':trial.suggest_float('feature_fraction',0.5,1), "missing":-9999 } bst = xgb.train(param, dtrain, evals=[(dtest, "validation")]) preds = bst.predict(dtest) pred_labels = np.rint(preds) accuracy = sklearn.metrics.accuracy_score(y_test, pred_labels) return accuracy
Titanic - Machine Learning from Disaster
14,245,867
X = train_df[['distance','year','month','day','hour','pickup_busyness','dropoff_busyness']].values Y = train_df['fare_amount'].values<import_modules>
if __name__ == "__main__": study = optuna.create_study( pruner=optuna.pruners.MedianPruner(n_warmup_steps=5), direction="maximize" ) study.optimize(objective, n_trials=100) print(study.best_trial )
Titanic - Machine Learning from Disaster
14,245,867
from sklearn.ensemble import RandomForestRegressor<import_modules>
params={'eta': 0.001832623843952462, 'max_depth': 4, 'subsample': 0.8827284715195801, 'lambda': 7.80011028366904}
Titanic - Machine Learning from Disaster
14,245,867
from sklearn.ensemble import RandomForestRegressor<choose_model_class>
num_round = 100 dtrain = xgb.DMatrix(x_train,label = y_train) dtest = xgb.DMatrix(x_test,label = y_test) clf = xgb.XGBClassifier(**params) kfold = StratifiedKFold(n_splits = 5) results = cross_val_score(clf,x_train,y_train,cv = kfold) avg_results = results.sum() /5
Titanic - Machine Learning from Disaster
14,245,867
kwargs = {'bootstrap': True, 'max_depth': None, 'max_features': 3, 'min_samples_leaf': 9, 'min_samples_split': 2} rand_regr = RandomForestRegressor(n_estimators=20, **kwargs )<train_model>
num_rounds = 10 bst = xgb.train(params,dtrain,num_rounds )
Titanic - Machine Learning from Disaster
14,245,867
rand_regr.fit(X, Y )<predict_on_test>
lgb_train = lgb.Dataset(x_train,y_train) lgb_test = lgb.Dataset(x_test,y_test )
Titanic - Machine Learning from Disaster
14,245,867
y_pred = rand_regr.predict(X) print('chi squared rand forest with date %s' %(np.sum(( Y-y_pred)**2.) /len(Y)) **0.5 )<compute_test_metric>
def lightgbm_objective(trial): lgb_train = lgb.Dataset(x_train,y_train) lgb_test = lgb.Dataset(x_test,y_test) param = { 'boost_type': trial.suggest_categorical('boost_type',['dart','gbdt']), "eta": trial.suggest_float('eta',1e-3, 0.3), "objective": "binary:logistic", "eva_metric":'auc', "max_depth": trial.suggest_int('max_depth',4,16), "subsample": trial.suggest_float('subsample',0.5,1), "lambda":trial.suggest_float('lambda',1,20), 'feature_fraction':trial.suggest_float('feature_fraction',0.5,1), "num_boost_round":trial.suggest_int('num_boost_round',20,100) } gbm = lgb.train(params, lgb_train, num_boost_round=100, valid_sets=lgb_test, ) preds = gbm.predict(x_test,num_iteration = gbm.best_iteration) accuracy = sklearn.metrics.accuracy_score([int(round(x)) for x in preds],y_test) return accuracy
Titanic - Machine Learning from Disaster
14,245,867
rand_regr.score(X,Y )<load_from_csv>
if __name__ == "__main__": study = optuna.create_study( pruner=optuna.pruners.MedianPruner(n_warmup_steps=5), direction="maximize" ) study.optimize(lightgbm_objective, n_trials=80) print(study.best_trial )
Titanic - Machine Learning from Disaster
14,245,867
test_df = pd.read_csv('.. /input/test.csv' )<compute_test_metric>
params={'boost_type': 'dart', 'eta': 0.21195106328946775, 'max_depth': 9, 'subsample': 0.6526587206109331, 'lambda': 11.46556912870986, 'feature_fraction': 0.8104196182900097, 'num_boost_round': 88}
Titanic - Machine Learning from Disaster
14,245,867
test_df['distance'] = haversine_np(test_df['pickup_longitude'], test_df['pickup_latitude'], test_df['dropoff_longitude'], test_df['dropoff_latitude'] )<data_type_conversions>
gbm = lgb.train(params, lgb_train, valid_sets=lgb_test, ) preds = gbm.predict(x_test,num_iteration = gbm.best_iteration) accuracy = sklearn.metrics.accuracy_score([int(round(x)) for x in preds],y_test) accuracy
Titanic - Machine Learning from Disaster
14,245,867
test_df['pickup_datetime'] = pd.to_datetime(test_df['pickup_datetime'] )<feature_engineering>
lgb.cv(params = params,train_set = lgb_train,metrics = 'auc',nfold = 3 )
Titanic - Machine Learning from Disaster
14,245,867
test_df['year'] = test_df['pickup_datetime'].dt.year test_df['month'] = test_df['pickup_datetime'].dt.month test_df['day'] = test_df['pickup_datetime'].dt.day test_df['hour'] = test_df['pickup_datetime'].dt.hour test_df['minute'] = test_df['pickup_datetime'].dt.minute<feature_engineering>
y_train.to_numpy()
Titanic - Machine Learning from Disaster
14,245,867
for col in {'pickup_latitude', 'pickup_longitude', 'dropoff_latitude', 'dropoff_longitude'}: test_df['rough' + col] = test_df[col].round(2 )<merge>
nn_model = tf.keras.Sequential() nn_model.add(Dense(units = 9, kernel_initializer = 'uniform', activation = 'relu')) nn_model.add(Dense(units = 9, kernel_initializer = 'uniform', activation = 'relu')) nn_model.add(Dense(units = 5, kernel_initializer = 'uniform', activation = 'relu')) nn_model.add(Dense(units = 1, kernel_initializer = 'uniform', activation = 'sigmoid')) nn_model.compile(optimizer = 'adam',loss = 'binary_crossentropy',metrics = ['accuracy']) nn_model.fit(x_train,y_train, epochs=100) test_loss, test_acc = nn_model.evaluate(x_test,y_test, verbose=2 )
Titanic - Machine Learning from Disaster
14,245,867
test_df = pd.merge(test_df,a, how='left') test_df = pd.merge(test_df,b, how='left' )<data_type_conversions>
test = test.drop('Cabin',axis = 1) test = test.drop('Name',axis = 1) test = test.drop('Ticket',axis = 1) test = test.drop('PassengerId',axis = 1) for col in test.columns: if test[col].dtype == 'O': test[col] = pd.get_dummies(test[col]) test = test.fillna(-9999 )
Titanic - Machine Learning from Disaster
14,245,867
test_df['pickup_busyness'] = test_df['pickup_busyness'].fillna(1) test_df['dropoff_busyness'] = test_df['dropoff_busyness'].fillna(1 )<predict_on_test>
test_xgb = xgb.DMatrix(test) pred_xgb = bst.predict(test_xgb) pred_xgb = [round(x)for x in pred_xgb]
Titanic - Machine Learning from Disaster
14,245,867
X_to_pred = test_df[['distance','year','month','day','hour','pickup_busyness','dropoff_busyness']].values y_pred = rand_regr.predict(X_to_pred )<save_to_csv>
test_lgb = lgb.Dataset(test) pred_lgb = gbm.predict(test) pred_lgb = [round(x)for x in pred_lgb]
Titanic - Machine Learning from Disaster
14,245,867
submission = pd.DataFrame( {'key': test_df.key, 'fare_amount': y_pred}, columns = ['key', 'fare_amount']) submission.to_csv('submission.csv', index = False )<prepare_x_and_y>
pred_nn = nn_model.predict(test) pred_nn = [int(round(x)) for x in pred_nn.flat[:]]
Titanic - Machine Learning from Disaster
14,245,867
<set_options>
Results = [round(( a+b+c)/3)for(a,b,c)in zip(pred_nn,pred_lgb,pred_xgb)]
Titanic - Machine Learning from Disaster
14,245,867
%matplotlib inline color = sns.color_palette() sns.set_style('darkgrid') def ignore_warn(*args, **kwargs): pass warnings.warn = ignore_warn pd.set_option('display.float_format', lambda x: '{:.3f}'.format(x)) train = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/test.csv') pd.set_option('display.max_columns', None) train.head(5) test.head(5) print("The train data size before dropping Id feature is : {} ".format(train.shape)) print("The test data size before dropping Id feature is : {} ".format(test.shape)) print("*" * 70) train_ID = train['Id'] test_ID = test['Id'] train.drop("Id", axis = 1, inplace = True) test.drop("Id", axis = 1, inplace = True) print("The train data size after dropping Id feature is : {} ".format(train.shape)) print("The test data size after dropping Id feature is : {} ".format(test.shape)) ax = sns.scatterplot(x = train['GrLivArea'], y = train['SalePrice'], hue = train['OverallCond'],) sns.jointplot(x='GrLivArea', y='SalePrice', data= train) train = train.drop(train[(train['GrLivArea']>4000)&(train['SalePrice']<300000)].index) ax = sns.scatterplot(x = train['GrLivArea'], y = train['SalePrice'], hue = train['OverallCond'],) sns.jointplot(x='GrLivArea', y='SalePrice', data= train) sns.distplot(train['SalePrice'] , fit=norm, color = 'b'); (mu, sigma)= norm.fit(train['SalePrice']) print(' mu = {:.2f} and sigma = {:.2f} '.format(mu, sigma)) plt.legend(['Normal dist.( $\mu=$ {:.2f} and $\sigma=$ {:.2f})'.format(mu, sigma)], loc='best') plt.ylabel('Frequency') plt.title('SalePrice distribution') fig = plt.figure() res = stats.probplot(train['SalePrice'], plot=plt) plt.show() train["SalePrice"] = np.log1p(train["SalePrice"]) sns.distplot(train['SalePrice'] , fit=norm, color= 'b'); (mu, sigma)= norm.fit(train['SalePrice']) print(' mu = {:.2f} and sigma = {:.2f} '.format(mu, sigma)) plt.legend(['Normal dist.( $\mu=$ {:.2f} and $\sigma=$ {:.2f})'.format(mu, sigma)], loc='best') plt.ylabel('Frequency') plt.title('SalePrice distribution') fig = plt.figure() res = stats.probplot(train['SalePrice'], plot=plt) plt.show() ntrain = train.shape[0] ntest = test.shape[0] y_train = train.SalePrice.values house_data = pd.concat(( train, test)).reset_index(drop=True) house_data.drop(['SalePrice'], axis=1, inplace=True) print("house_data size is : {}".format(house_data.shape)) data_null =(house_data.isnull().sum() / len(house_data)) * 100 data_null = data_null.drop(data_null[data_null == 0].index ).sort_values(ascending=False)[:30] missing_data = pd.DataFrame({'Missing Ratio' :data_null}) missing_data.head(20) f, ax = plt.subplots(figsize=(20, 12)) plt.xticks(rotation='60') sns.barplot(x=data_null.index, y=data_null, color = 'b') plt.xlabel('Features', fontsize=15) plt.ylabel('Percent of missing values', fontsize=15) plt.title('Percent missing data by feature', fontsize=15) corrmat = train.corr() plt.subplots(figsize=(35,35)) sns.heatmap(corrmat, vmax=0.9, square=True, annot=True) house_data["PoolQC"] = house_data["PoolQC"].fillna("None") house_data["MiscFeature"] = house_data["MiscFeature"].fillna("None") house_data["Alley"] = house_data["Alley"].fillna("None") house_data["Fence"] = house_data["Fence"].fillna("None") house_data["FireplaceQu"] = house_data["FireplaceQu"].fillna("None") house_data["LotFrontage"] = house_data.groupby("Neighborhood")["LotFrontage"].transform( lambda x: x.fillna(x.median())) for col in('GarageType', 'GarageFinish', 'GarageQual', 'GarageCond'): house_data[col] = house_data[col].fillna('None') for col in('GarageYrBlt', 'GarageArea', 'GarageCars'): house_data[col] = house_data[col].fillna(0) for col in('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF','TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath'): house_data[col] = house_data[col].fillna(0) for col in('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'): house_data[col] = house_data[col].fillna('None') house_data["MasVnrType"] = house_data["MasVnrType"].fillna("None") house_data["MasVnrArea"] = house_data["MasVnrArea"].fillna(0) house_data['MSZoning'] = house_data['MSZoning'].fillna(house_data['MSZoning'].mode() [0]) house_data = house_data.drop(['Utilities'], axis=1) house_data["Functional"] = house_data["Functional"].fillna("Typ") house_data['Electrical'] = house_data['Electrical'].fillna(house_data['Electrical'].mode() [0]) house_data['KitchenQual'] = house_data['KitchenQual'].fillna(house_data['KitchenQual'].mode() [0]) house_data['Exterior1st'] = house_data['Exterior1st'].fillna(house_data['Exterior1st'].mode() [0]) house_data['Exterior2nd'] = house_data['Exterior2nd'].fillna(house_data['Exterior2nd'].mode() [0]) house_data['SaleType'] = house_data['SaleType'].fillna(house_data['SaleType'].mode() [0]) house_data['MSSubClass'] = house_data['MSSubClass'].fillna("None") data_null =(house_data.isnull().sum() / len(house_data)) * 100 data_null = data_null.drop(data_null[data_null == 0].index ).sort_values(ascending=False) missing_data = pd.DataFrame({'Missing Ratio' :data_null}) missing_data.head() house_data['MSSubClass'] = house_data['MSSubClass'].apply(str) house_data['OverallCond'] = house_data['OverallCond'].astype(str) house_data['YrSold'] = house_data['YrSold'].astype(str) house_data['MoSold'] = house_data['MoSold'].astype(str) cols =('FireplaceQu', 'BsmtQual', 'BsmtCond', 'GarageQual', 'GarageCond', 'ExterQual', 'ExterCond','HeatingQC', 'PoolQC', 'KitchenQual', 'BsmtFinType1', 'BsmtFinType2', 'Functional', 'Fence', 'BsmtExposure', 'GarageFinish', 'LandSlope', 'LotShape', 'PavedDrive', 'Street', 'Alley', 'CentralAir', 'MSSubClass', 'OverallCond', 'YrSold', 'MoSold') for c in cols: lbl = LabelEncoder() lbl.fit(list(house_data[c].values)) house_data[c] = lbl.transform(list(house_data[c].values)) print('Shape all_data: {}'.format(house_data.shape)) house_data['TotalSF'] = house_data['TotalBsmtSF'] + house_data['1stFlrSF'] + house_data['2ndFlrSF'] numeric_feats = house_data.dtypes[house_data.dtypes != "object"].index skewed_feats = house_data[numeric_feats].apply(lambda x: skew(x.dropna())).sort_values(ascending=False) print(" Skew in numerical features: ") skewness = pd.DataFrame({'Skew' :skewed_feats}) skewness.head(10) skewness = skewness[abs(skewness)> 0.75] print("There are {} skewed numerical features to Box Cox transform".format(skewness.shape[0])) skewed_features = skewness.index lam = 0.15 for feat in skewed_features: house_data[feat] = boxcox1p(house_data[feat], lam) house_data = pd.get_dummies(house_data) print(house_data.shape) train = house_data[:ntrain] test = house_data[ntrain:] n_folds = 8 def rmsle_cv(model): kf = KFold(n_folds, shuffle=True, random_state=42 ).get_n_splits(train.values) rmse= np.sqrt(-cross_val_score(model, train.values, y_train, scoring="neg_mean_squared_error", cv = kf)) return(rmse) lasso = make_pipeline(RobustScaler() , Lasso(alpha =0.0005, random_state=1)) ENet = make_pipeline(RobustScaler() , ElasticNet(alpha=0.0005, l1_ratio=.9, random_state=3)) KRR = KernelRidge(alpha=0.6, kernel='polynomial', degree=2, coef0=2.5) GBoost = GradientBoostingRegressor(n_estimators=3000, learning_rate=0.05, max_depth=4, max_features='sqrt', min_samples_leaf=15, min_samples_split=10, loss='huber', random_state =5) model_xgb = xgb.XGBRegressor(colsample_bytree=0.4603, gamma=0.0468, learning_rate=0.05, max_depth=3, min_child_weight=1.7817, n_estimators=2200, reg_alpha=0.4640, reg_lambda=0.8571, subsample=0.5213, silent=1, random_state =7, nthread = -1) model_lgb = lgb.LGBMRegressor(objective='regression',num_leaves=5, learning_rate=0.05, n_estimators=720, max_bin = 55, bagging_fraction = 0.8, bagging_freq = 5, feature_fraction = 0.2319, feature_fraction_seed=9, bagging_seed=9, min_data_in_leaf =6, min_sum_hessian_in_leaf = 11) score = rmsle_cv(lasso) print(" Lasso score: {:.4f}({:.4f}) ".format(score.mean() , score.std())) score = rmsle_cv(ENet) print("ElasticNet score: {:.4f}({:.4f}) ".format(score.mean() , score.std())) score = rmsle_cv(KRR) print("Kernel Ridge score: {:.4f}({:.4f}) ".format(score.mean() , score.std())) score = rmsle_cv(GBoost) print("Gradient Boosting score: {:.4f}({:.4f}) ".format(score.mean() , score.std())) score = rmsle_cv(model_xgb) print("Xgboost score: {:.4f}({:.4f}) ".format(score.mean() , score.std())) score = rmsle_cv(model_lgb) print("LGBM score: {:.4f}({:.4f}) ".format(score.mean() , score.std())) class AveragingModels(BaseEstimator, RegressorMixin, TransformerMixin): def __init__(self, models): self.models = models def fit(self, X, y): self.models_ = [clone(x)for x in self.models] for model in self.models_: model.fit(X, y) return self def predict(self, X): predictions = np.column_stack([ model.predict(X)for model in self.models_ ]) return np.mean(predictions, axis=1) averaged_models = AveragingModels(models =(ENet, GBoost, KRR, lasso)) score = rmsle_cv(averaged_models) print(" Averaged base models score: {:.4f}({:.4f}) ".format(score.mean() , score.std())) class StackingAveragedModels(BaseEstimator, RegressorMixin, TransformerMixin): def __init__(self, base_models, meta_model, n_folds=5): self.base_models = base_models self.meta_model = meta_model self.n_folds = n_folds def fit(self, X, y): self.base_models_ = [list() for x in self.base_models] self.meta_model_ = clone(self.meta_model) kfold = KFold(n_splits=self.n_folds, shuffle=True, random_state=156) out_of_fold_predictions = np.zeros(( X.shape[0], len(self.base_models))) for i, model in enumerate(self.base_models): for train_index, holdout_index in kfold.split(X, y): instance = clone(model) self.base_models_[i].append(instance) instance.fit(X[train_index], y[train_index]) y_pred = instance.predict(X[holdout_index]) out_of_fold_predictions[holdout_index, i] = y_pred self.meta_model_.fit(out_of_fold_predictions, y) return self def predict(self, X): meta_features = np.column_stack([ np.column_stack([model.predict(X)for model in base_models] ).mean(axis=1) for base_models in self.base_models_ ]) return self.meta_model_.predict(meta_features) stacked_averaged_models = StackingAveragedModels(base_models =(ENet, GBoost, KRR), meta_model = lasso) score = rmsle_cv(stacked_averaged_models) print("Stacking Averaged models score: {:.4f}({:.4f})".format(score.mean() , score.std())) def rmsle(y, y_pred): return np.sqrt(mean_squared_error(y, y_pred)) stacked_averaged_models.fit(train.values, y_train) stacked_train_pred = stacked_averaged_models.predict(train.values) stacked_pred = np.expm1(stacked_averaged_models.predict(test.values)) print(rmsle(y_train, stacked_train_pred)) model_xgb.fit(train, y_train) xgb_train_pred = model_xgb.predict(train) xgb_pred = np.expm1(model_xgb.predict(test)) print(rmsle(y_train, xgb_train_pred)) model_lgb.fit(train, y_train) lgb_train_pred = model_lgb.predict(train) lgb_pred = np.expm1(model_lgb.predict(test.values)) print(rmsle(y_train, lgb_train_pred)) print('RMSLE score on train data:') print(rmsle(y_train,stacked_train_pred*0.70 + xgb_train_pred*0.15 + lgb_train_pred*0.15)) ensemble = stacked_pred*0.70 + xgb_pred*0.15 + lgb_pred*0.15 sub = pd.DataFrame() sub['Id'] = test_ID sub['SalePrice'] = ensemble sub.to_csv('submission.csv',index=False) <install_modules>
sub = pd.read_csv('/kaggle/input/titanic/test.csv') sub['Survived'] = [int(x)for x in Results] sub = sub[['PassengerId','Survived']]
Titanic - Machine Learning from Disaster
14,245,867
<set_options><EOS>
my_sub = sub.to_csv('my_sub.csv',index = False )
Titanic - Machine Learning from Disaster
14,271,675
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<define_variables>
np.set_printoptions(precision=4) %config InlineBackend.figure_format = 'retina' %matplotlib inline init_notebook_mode(connected=True) warnings.filterwarnings("ignore") plt.style.use('fivethirtyeight') sns.set(font_scale=1.5)
Titanic - Machine Learning from Disaster
14,271,675
DATA_PATH = '.. /input/melanoma-merged-external-data-512x512-jpeg'<normalization>
train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv' )
Titanic - Machine Learning from Disaster
14,271,675
TEST_ROOT_PATH = f'{DATA_PATH}/512x512-test/512x512-test' def get_valid_transforms() : return A.Compose([ A.Resize(height=512, width=512, p=1.0), ToTensorV2(p=1.0), ], p=1.0) class DatasetRetriever(Dataset): def __init__(self, image_ids, transforms=None): super().__init__() self.image_ids = image_ids self.transforms = transforms def __getitem__(self, idx: int): image_id = self.image_ids[idx] image = cv2.imread(f'{TEST_ROOT_PATH}/{image_id}.jpg', cv2.IMREAD_COLOR) image = image.astype(np.float32)/ 255.0 if self.transforms: sample = {'image': image} sample = self.transforms(**sample) image = sample['image'] return image, image_id def __len__(self)-> int: return self.image_ids.shape[0]<find_best_params>
def var_standardized(v): stand=(v - v.mean())/ train.std() return stand
Titanic - Machine Learning from Disaster
14,271,675
def get_net() : net = EfficientNet.from_name('efficientnet-b5') net._fc = nn.Linear(in_features=2048, out_features=2, bias=True) return net net = get_net().cuda()<load_from_csv>
num_var = [f for f in train.columns if train.dtypes[f] != 'object'] num_var.remove('Survived') num_var.remove('PassengerId')
Titanic - Machine Learning from Disaster
14,271,675
df_test = pd.read_csv(f'.. /input/siim-isic-melanoma-classification/test.csv', index_col='image_name') test_dataset = DatasetRetriever( image_ids=df_test.index.values, transforms=get_valid_transforms() , ) test_loader = torch.utils.data.DataLoader( test_dataset, batch_size=8, num_workers=2, shuffle=False, sampler=SequentialSampler(test_dataset), pin_memory=False, drop_last=False, )<load_pretrained>
TRM=train.isna().sum().sum() TSM=test.isna().sum().sum() print(f'Mising Value percentage for train: {(TRM/ len(train)) *100}%') print(f'Mising Value percentage for test: {(TSM / len(test)) *100}%' )
Titanic - Machine Learning from Disaster
14,271,675
checkpoint_path = '.. /input/melanoma-public-checkpoints/effnet5-best-score-checkpoint-015epoch-version2.bin' checkpoint = torch.load(checkpoint_path) net.load_state_dict(checkpoint); net.eval() ;<concatenate>
print("- Mising Value: ", train.isna().sum() ," - Total of Mising Value : ", train.isna().sum().sum() )
Titanic - Machine Learning from Disaster
14,271,675
result = {'image_name': [], 'target': []} for images, image_names in tqdm(test_loader, total=len(test_loader)) : with torch.no_grad() : images = images.cuda().float() outputs = net(images) y_pred = nn.functional.softmax(outputs, dim=1 ).data.cpu().numpy() [:,1] result['image_name'].extend(image_names) result['target'].extend(y_pred) submission = pd.DataFrame(result )<save_to_csv>
print("- Mising Value: ", test.isna().sum() ," - Total of Mising Value : ", test.isna().sum().sum() )
Titanic - Machine Learning from Disaster
14,271,675
submission.to_csv('submission.csv', index=False) submission['target'].hist(bins=100);<set_options>
train.Embarked.replace(np.nan , 'S' , inplace=True) test.Embarked.replace(np.nan , 'S' , inplace=True) train['Cabin'] = train['Cabin'].map(lambda x:0 if pd.notnull(x)== False else 1) test['Cabin'] = test['Cabin'].map(lambda x:0 if pd.notnull(x)== False else 1) train['Name'] = train['Name'].str.replace('(' , '') train['Name'] = train['Name'].str.replace(')' , '') test['Name'] = test['Name'].str.replace('(' , '') test['Name'] = test['Name'].str.replace(')' , '') mean_fare= pd.DataFrame(test.groupby('Pclass')[['Fare']].mean()) for item, i in test['Fare'].iteritems() : if pd.notnull(i)==False: pclass_fare=test.Pclass.iloc[item] test['Fare'].iloc[item]=mean_fare.iloc[pclass_fare-1][0]
Titanic - Machine Learning from Disaster
14,271,675
%reload_ext autoreload %autoreload 2 %matplotlib inline %matplotlib inline <categorify>
mean_age= pd.DataFrame(test.groupby('Pclass')[['Age']].mean()) mean_age
Titanic - Machine Learning from Disaster
14,271,675
GlobalParams = collections.namedtuple('GlobalParams', [ 'batch_norm_momentum', 'batch_norm_epsilon', 'dropout_rate', 'num_classes', 'width_coefficient', 'depth_coefficient', 'depth_divisor', 'min_depth', 'drop_connect_rate', 'image_size']) BlockArgs = collections.namedtuple('BlockArgs', [ 'kernel_size', 'num_repeat', 'input_filters', 'output_filters', 'expand_ratio', 'id_skip', 'stride', 'se_ratio']) GlobalParams.__new__.__defaults__ =(None,)* len(GlobalParams._fields) BlockArgs.__new__.__defaults__ =(None,)* len(BlockArgs._fields) def relu_fn(x): return x * torch.sigmoid(x) def round_filters(filters, global_params): multiplier = global_params.width_coefficient if not multiplier: return filters divisor = global_params.depth_divisor min_depth = global_params.min_depth filters *= multiplier min_depth = min_depth or divisor new_filters = max(min_depth, int(filters + divisor / 2)// divisor * divisor) if new_filters < 0.9 * filters: new_filters += divisor return int(new_filters) def round_repeats(repeats, global_params): multiplier = global_params.depth_coefficient if not multiplier: return repeats return int(math.ceil(multiplier * repeats)) def drop_connect(inputs, p, training): if not training: return inputs batch_size = inputs.shape[0] keep_prob = 1 - p random_tensor = keep_prob random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device) binary_tensor = torch.floor(random_tensor) output = inputs / keep_prob * binary_tensor return output def get_same_padding_conv2d(image_size=None): if image_size is None: return Conv2dDynamicSamePadding else: return partial(Conv2dStaticSamePadding, image_size=image_size) class Conv2dDynamicSamePadding(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True): super().__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias) self.stride = self.stride if len(self.stride)== 2 else [self.stride[0]]*2 def forward(self, x): ih, iw = x.size() [-2:] kh, kw = self.weight.size() [-2:] sh, sw = self.stride oh, ow = math.ceil(ih / sh), math.ceil(iw / sw) pad_h = max(( oh - 1)* self.stride[0] +(kh - 1)* self.dilation[0] + 1 - ih, 0) pad_w = max(( ow - 1)* self.stride[1] +(kw - 1)* self.dilation[1] + 1 - iw, 0) if pad_h > 0 or pad_w > 0: x = F.pad(x, [pad_w//2, pad_w - pad_w//2, pad_h//2, pad_h - pad_h//2]) return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) class Conv2dStaticSamePadding(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, image_size=None, **kwargs): super().__init__(in_channels, out_channels, kernel_size, **kwargs) self.stride = self.stride if len(self.stride)== 2 else [self.stride[0]] * 2 assert image_size is not None ih, iw = image_size if type(image_size)== list else [image_size, image_size] kh, kw = self.weight.size() [-2:] sh, sw = self.stride oh, ow = math.ceil(ih / sh), math.ceil(iw / sw) pad_h = max(( oh - 1)* self.stride[0] +(kh - 1)* self.dilation[0] + 1 - ih, 0) pad_w = max(( ow - 1)* self.stride[1] +(kw - 1)* self.dilation[1] + 1 - iw, 0) if pad_h > 0 or pad_w > 0: self.static_padding = nn.ZeroPad2d(( pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2)) else: self.static_padding = Identity() def forward(self, x): x = self.static_padding(x) x = F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) return x class Identity(nn.Module): def __init__(self,): super(Identity, self ).__init__() def forward(self, input): return input<define_search_space>
def impute_age(age_pclass): Age = age_pclass[0] Pclass = age_pclass[1] if pd.isnull(Age): if Pclass == 1: return 38 elif Pclass == 2: return 30 else: return 25 else: return Age
Titanic - Machine Learning from Disaster
14,271,675
def efficientnet_params(model_name): params_dict = { 'efficientnet-b0':(1.0, 1.0, 224, 0.2), 'efficientnet-b1':(1.0, 1.1, 240, 0.2), 'efficientnet-b2':(1.1, 1.2, 260, 0.3), 'efficientnet-b3':(1.2, 1.4, 300, 0.3), 'efficientnet-b4':(1.4, 1.8, 380, 0.4), 'efficientnet-b5':(1.6, 2.2, 456, 0.4), 'efficientnet-b6':(1.8, 2.6, 528, 0.5), 'efficientnet-b7':(2.0, 3.1, 600, 0.5), } return params_dict[model_name] class BlockDecoder(object): @staticmethod def _decode_block_string(block_string): assert isinstance(block_string, str) ops = block_string.split('_') options = {} for op in ops: splits = re.split(r'(\d.*)', op) if len(splits)>= 2: key, value = splits[:2] options[key] = value assert(( 's' in options and len(options['s'])== 1)or (len(options['s'])== 2 and options['s'][0] == options['s'][1])) return BlockArgs( kernel_size=int(options['k']), num_repeat=int(options['r']), input_filters=int(options['i']), output_filters=int(options['o']), expand_ratio=int(options['e']), id_skip=('noskip' not in block_string), se_ratio=float(options['se'])if 'se' in options else None, stride=[int(options['s'][0])]) @staticmethod def _encode_block_string(block): args = [ 'r%d' % block.num_repeat, 'k%d' % block.kernel_size, 's%d%d' %(block.strides[0], block.strides[1]), 'e%s' % block.expand_ratio, 'i%d' % block.input_filters, 'o%d' % block.output_filters ] if 0 < block.se_ratio <= 1: args.append('se%s' % block.se_ratio) if block.id_skip is False: args.append('noskip') return '_'.join(args) @staticmethod def decode(string_list): assert isinstance(string_list, list) blocks_args = [] for block_string in string_list: blocks_args.append(BlockDecoder._decode_block_string(block_string)) return blocks_args @staticmethod def encode(blocks_args): block_strings = [] for block in blocks_args: block_strings.append(BlockDecoder._encode_block_string(block)) return block_strings def efficientnet(width_coefficient=None, depth_coefficient=None, dropout_rate=0.2, drop_connect_rate=0.2, image_size=None, num_classes=1000): blocks_args = [ 'r1_k3_s11_e1_i32_o16_se0.25', 'r2_k3_s22_e6_i16_o24_se0.25', 'r2_k5_s22_e6_i24_o40_se0.25', 'r3_k3_s22_e6_i40_o80_se0.25', 'r3_k5_s11_e6_i80_o112_se0.25', 'r4_k5_s22_e6_i112_o192_se0.25', 'r1_k3_s11_e6_i192_o320_se0.25', ] blocks_args = BlockDecoder.decode(blocks_args) global_params = GlobalParams( batch_norm_momentum=0.99, batch_norm_epsilon=1e-3, dropout_rate=dropout_rate, drop_connect_rate=drop_connect_rate, num_classes=num_classes, width_coefficient=width_coefficient, depth_coefficient=depth_coefficient, depth_divisor=8, min_depth=None, image_size=image_size, ) return blocks_args, global_params def get_model_params(model_name, override_params): if model_name.startswith('efficientnet'): w, d, s, p = efficientnet_params(model_name) blocks_args, global_params = efficientnet( width_coefficient=w, depth_coefficient=d, dropout_rate=p, image_size=s) else: raise NotImplementedError('model name is not pre-defined: %s' % model_name) if override_params: global_params = global_params._replace(**override_params) return blocks_args, global_params url_map = { 'efficientnet-b0': 'http://storage.googleapis.com/public-models/efficientnet-b0-08094119.pth', 'efficientnet-b1': 'http://storage.googleapis.com/public-models/efficientnet-b1-dbc7070a.pth', 'efficientnet-b2': 'http://storage.googleapis.com/public-models/efficientnet-b2-27687264.pth', 'efficientnet-b3': 'http://storage.googleapis.com/public-models/efficientnet-b3-c8376fa2.pth', 'efficientnet-b4': 'http://storage.googleapis.com/public-models/efficientnet-b4-e116e8b3.pth', 'efficientnet-b5': 'http://storage.googleapis.com/public-models/efficientnet-b5-586e6cc6.pth', } def load_pretrained_weights(model, model_name, load_fc=True): state_dict = model_zoo.load_url(url_map[model_name]) if load_fc: model.load_state_dict(state_dict) else: state_dict.pop('_fc.weight') state_dict.pop('_fc.bias') res = model.load_state_dict(state_dict, strict=False) assert str(res.missing_keys)== str(['_fc.weight', '_fc.bias']), 'issue loading pretrained weights' print('Loaded pretrained weights for {}'.format(model_name)) class MBConvBlock(nn.Module): def __init__(self, block_args, global_params): super().__init__() self._block_args = block_args self._bn_mom = 1 - global_params.batch_norm_momentum self._bn_eps = global_params.batch_norm_epsilon self.has_se =(self._block_args.se_ratio is not None)and(0 < self._block_args.se_ratio <= 1) self.id_skip = block_args.id_skip Conv2d = get_same_padding_conv2d(image_size=global_params.image_size) inp = self._block_args.input_filters oup = self._block_args.input_filters * self._block_args.expand_ratio if self._block_args.expand_ratio != 1: self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False) self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps) k = self._block_args.kernel_size s = self._block_args.stride self._depthwise_conv = Conv2d( in_channels=oup, out_channels=oup, groups=oup, kernel_size=k, stride=s, bias=False) self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps) if self.has_se: num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio)) self._se_reduce = Conv2d(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1) self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1) final_oup = self._block_args.output_filters self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False) self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps) def forward(self, inputs, drop_connect_rate=None): x = inputs if self._block_args.expand_ratio != 1: x = relu_fn(self._bn0(self._expand_conv(inputs))) x = relu_fn(self._bn1(self._depthwise_conv(x))) if self.has_se: x_squeezed = F.adaptive_avg_pool2d(x, 1) x_squeezed = self._se_expand(relu_fn(self._se_reduce(x_squeezed))) x = torch.sigmoid(x_squeezed)* x x = self._bn2(self._project_conv(x)) input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters if self.id_skip and self._block_args.stride == 1 and input_filters == output_filters: if drop_connect_rate: x = drop_connect(x, p=drop_connect_rate, training=self.training) x = x + inputs return x class EfficientNet(nn.Module): def __init__(self, blocks_args=None, global_params=None): super().__init__() assert isinstance(blocks_args, list), 'blocks_args should be a list' assert len(blocks_args)> 0, 'block args must be greater than 0' self._global_params = global_params self._blocks_args = blocks_args Conv2d = get_same_padding_conv2d(image_size=global_params.image_size) bn_mom = 1 - self._global_params.batch_norm_momentum bn_eps = self._global_params.batch_norm_epsilon in_channels = 3 out_channels = round_filters(32, self._global_params) self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False) self._bn0 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps) self._blocks = nn.ModuleList([]) for block_args in self._blocks_args: block_args = block_args._replace( input_filters=round_filters(block_args.input_filters, self._global_params), output_filters=round_filters(block_args.output_filters, self._global_params), num_repeat=round_repeats(block_args.num_repeat, self._global_params) ) self._blocks.append(MBConvBlock(block_args, self._global_params)) if block_args.num_repeat > 1: block_args = block_args._replace(input_filters=block_args.output_filters, stride=1) for _ in range(block_args.num_repeat - 1): self._blocks.append(MBConvBlock(block_args, self._global_params)) in_channels = block_args.output_filters out_channels = round_filters(1280, self._global_params) self._conv_head = Conv2d(in_channels, out_channels, kernel_size=1, bias=False) self._bn1 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps) self._dropout = self._global_params.dropout_rate self._fc = nn.Linear(out_channels, self._global_params.num_classes) def extract_features(self, inputs): x = relu_fn(self._bn0(self._conv_stem(inputs))) for idx, block in enumerate(self._blocks): drop_connect_rate = self._global_params.drop_connect_rate if drop_connect_rate: drop_connect_rate *= float(idx)/ len(self._blocks) x = block(x, drop_connect_rate=drop_connect_rate) x = relu_fn(self._bn1(self._conv_head(x))) return x def forward(self, inputs): x = self.extract_features(inputs) x = F.adaptive_avg_pool2d(x, 1 ).squeeze(-1 ).squeeze(-1) if self._dropout: x = F.dropout(x, p=self._dropout, training=self.training) x = self._fc(x) return x @classmethod def from_name(cls, model_name, override_params=None): cls._check_model_name_is_valid(model_name) blocks_args, global_params = get_model_params(model_name, override_params) return EfficientNet(blocks_args, global_params) @classmethod def from_pretrained(cls, model_name, num_classes=1000): model = EfficientNet.from_name(model_name, override_params={'num_classes': num_classes}) return model @classmethod def get_image_size(cls, model_name): cls._check_model_name_is_valid(model_name) _, _, res, _ = efficientnet_params(model_name) return res @classmethod def _check_model_name_is_valid(cls, model_name, also_need_pretrained_weights=False): num_models = 4 if also_need_pretrained_weights else 8 valid_models = ['efficientnet_b'+str(i)for i in range(num_models)] if model_name.replace('-','_')not in valid_models: raise ValueError('model_name should be one of: ' + ', '.join(valid_models))<load_pretrained>
for item, i in train['Age'].iteritems() : if pd.notnull(i)==False: Age_ver2 = impute_age([i, train.Pclass.iloc[item]]) train['Age'].iloc[item] = Age_ver2
Titanic - Machine Learning from Disaster
14,271,675
md_ef = EfficientNet.from_pretrained('efficientnet-b5', num_classes=1 )<load_from_csv>
for item, i in test['Age'].iteritems() : if pd.notnull(i)==False: Age_ver2 = impute_age([i, test.Pclass.iloc[item]]) test['Age'].iloc[item] = Age_ver2
Titanic - Machine Learning from Disaster
14,271,675
def get_df() : base_image_dir = os.path.join('.. ', 'input/aptos2019-blindness-detection/') train_dir = os.path.join(base_image_dir,'train_images/') df = pd.read_csv(os.path.join(base_image_dir, 'train.csv')) df['path'] = df['id_code'].map(lambda x: os.path.join(train_dir,'{}.png'.format(x))) df = df.drop(columns=['id_code']) df = df.sample(frac=1 ).reset_index(drop=True) test_df = pd.read_csv('.. /input/aptos2019-blindness-detection/sample_submission.csv') return df, test_df df, test_df = get_df()<feature_engineering>
train.isna().sum().sum()
Titanic - Machine Learning from Disaster
14,271,675
bs = 32 sz = 224 tfms = get_transforms(do_flip=True,flip_vert=True )<compute_test_metric>
test.isna().sum().sum()
Titanic - Machine Learning from Disaster
14,271,675
def qk(y_pred, y): return torch.tensor(cohen_kappa_score(torch.round(y_pred), y, weights='quadratic'), device='cuda:0' )<load_pretrained>
train= pd.get_dummies(train, columns=['Sex', 'Embarked'], drop_first=True) train = pd.get_dummies(train, columns=['Pclass'], drop_first=True )
Titanic - Machine Learning from Disaster
14,271,675
learn = Learner(data, md_ef, metrics = [qk], model_dir="models" ).to_fp16() learn.data.add_test(ImageList.from_df(test_df, '.. /input/aptos2019-blindness-detection', folder='test_images', suffix='.png'))<train_model>
test= pd.get_dummies(test, columns=['Sex', 'Embarked'], drop_first=True) test= pd.get_dummies(test, columns=['Pclass'], drop_first=True )
Titanic - Machine Learning from Disaster
14,271,675
learn.fit_one_cycle(10, 1e-3 )<load_pretrained>
train['Name'] = train.Name.str.extract('([A-Za-z]+)\.', expand=False) train['Name'] = train['Name'].replace(['Lady', 'Countess','Capt', 'Col',\ 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') train['Name'] = train['Name'].replace('Mlle', 'Miss') train['Name'] = train['Name'].replace('Ms', 'Miss') train['Name'] = train['Name'].replace('Mme', 'Mrs') train= pd.get_dummies(train, columns=['Name'], drop_first=True)
Titanic - Machine Learning from Disaster
14,271,675
learn.load('abcdef');<compute_test_metric>
test['Name'] = test.Name.str.extract('([A-Za-z]+)\.', expand=False) test['Name'] = test['Name'].replace(['Lady', 'Countess','Capt', 'Col',\ 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') test['Name'] = test['Name'].replace('Mlle', 'Miss') test['Name'] = test['Name'].replace('Ms', 'Miss') test['Name'] = test['Name'].replace('Mme', 'Mrs') test= pd.get_dummies(test, columns=['Name'], drop_first=True )
Titanic - Machine Learning from Disaster
14,271,675
class OptimizedRounder(object): def __init__(self): self.coef_ = 0 def _kappa_loss(self, coef, X, y): X_p = np.copy(X) for i, pred in enumerate(X_p): if pred < coef[0]: X_p[i] = 0 elif pred >= coef[0] and pred < coef[1]: X_p[i] = 1 elif pred >= coef[1] and pred < coef[2]: X_p[i] = 2 elif pred >= coef[2] and pred < coef[3]: X_p[i] = 3 else: X_p[i] = 4 ll = metrics.cohen_kappa_score(y, X_p, weights='quadratic') return -ll def fit(self, X, y): loss_partial = partial(self._kappa_loss, X=X, y=y) initial_coef = [0.5, 1.5, 2.5, 3.5] self.coef_ = sp.optimize.minimize(loss_partial, initial_coef, method='nelder-mead') print(-loss_partial(self.coef_['x'])) def predict(self, X, coef): X_p = np.copy(X) for i, pred in enumerate(X_p): if pred < coef[0]: X_p[i] = 0 elif pred >= coef[0] and pred < coef[1]: X_p[i] = 1 elif pred >= coef[1] and pred < coef[2]: X_p[i] = 2 elif pred >= coef[2] and pred < coef[3]: X_p[i] = 3 else: X_p[i] = 4 return X_p def coefficients(self): return self.coef_['x']<save_to_csv>
train["Fam_size"] = train['SibSp'] + train["Parch"] + 1 test["Fam_size"] = test['SibSp'] + test["Parch"] + 1
Titanic - Machine Learning from Disaster
14,271,675
def run_subm(learn=learn, coefficients=[0.5, 1.5, 2.5, 3.5]): opt = OptimizedRounder() preds,y = learn.get_preds(DatasetType.Test) tst_pred = opt.predict(preds, coefficients) test_df.diagnosis = tst_pred.astype(int) test_df.to_csv('submission.csv',index=False) print('done' )<import_modules>
test.duplicated().sum()
Titanic - Machine Learning from Disaster
14,271,675
import numpy as np import pandas as pd <import_modules>
train.duplicated().sum()
Titanic - Machine Learning from Disaster
14,271,675
__all__ = [ 'alexnet', 'densenet121', 'densenet169', 'densenet201', 'densenet161', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152', 'inceptionv3', 'squeezenet1_0', 'squeezenet1_1', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', 'vgg19_bn', 'vgg19' ] model_urls = { 'alexnet': 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth', 'densenet121': 'http://data.lip6.fr/cadene/pretrainedmodels/densenet121-fbdb23505.pth', 'densenet169': 'http://data.lip6.fr/cadene/pretrainedmodels/densenet169-f470b90a4.pth', 'densenet201': 'http://data.lip6.fr/cadene/pretrainedmodels/densenet201-5750cbb1e.pth', 'densenet161': 'http://data.lip6.fr/cadene/pretrainedmodels/densenet161-347e6b360.pth', 'inceptionv3': 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth', 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', 'squeezenet1_0': 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth', 'squeezenet1_1': 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth', 'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth', 'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth', 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth', 'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth', 'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth', 'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth', 'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth', 'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth', } input_sizes = {} means = {} stds = {} for model_name in __all__: input_sizes[model_name] = [3, 224, 224] means[model_name] = [0.485, 0.456, 0.406] stds[model_name] = [0.229, 0.224, 0.225] for model_name in ['inceptionv3']: input_sizes[model_name] = [3, 299, 299] means[model_name] = [0.5, 0.5, 0.5] stds[model_name] = [0.5, 0.5, 0.5] pretrained_settings = {} for model_name in __all__: pretrained_settings[model_name] = { 'imagenet': { 'url': model_urls[model_name], 'input_space': 'RGB', 'input_size': input_sizes[model_name], 'input_range': [0, 1], 'mean': means[model_name], 'std': stds[model_name], 'num_classes': 1000 } } def update_state_dict(state_dict): pattern = re.compile( r'^ (.*denselayer\d+\.( ?:norm|relu|conv)) \.((?:[12])\.( ?:weight|bias|running_mean|running_var)) $') for key in list(state_dict.keys()): res = pattern.match(key) if res: new_key = res.group(1)+ res.group(2) state_dict[new_key] = state_dict[key] del state_dict[key] return state_dict def load_pretrained(model, num_classes, settings): assert num_classes == settings['num_classes'], \ "num_classes should be {}, but is {}".format(settings['num_classes'], num_classes) state_dict = model_zoo.load_url(settings['url']) state_dict = update_state_dict(state_dict) model.load_state_dict(state_dict) model.input_space = settings['input_space'] model.input_size = settings['input_size'] model.input_range = settings['input_range'] model.mean = settings['mean'] model.std = settings['std'] return model def modify_alexnet(model): model._features = model.features del model.features model.dropout0 = model.classifier[0] model.linear0 = model.classifier[1] model.relu0 = model.classifier[2] model.dropout1 = model.classifier[3] model.linear1 = model.classifier[4] model.relu1 = model.classifier[5] model.last_linear = model.classifier[6] del model.classifier def features(self, input): x = self._features(input) x = x.view(x.size(0), 256 * 6 * 6) x = self.dropout0(x) x = self.linear0(x) x = self.relu0(x) x = self.dropout1(x) x = self.linear1(x) return x def logits(self, features): x = self.relu1(features) x = self.last_linear(x) return x def forward(self, input): x = self.features(input) x = self.logits(x) return x model.features = types.MethodType(features, model) model.logits = types.MethodType(logits, model) model.forward = types.MethodType(forward, model) return model def alexnet(num_classes=1000, pretrained='imagenet'): r model = models.alexnet(pretrained=False) if pretrained is not None: settings = pretrained_settings['alexnet'][pretrained] model = load_pretrained(model, num_classes, settings) model = modify_alexnet(model) return model def modify_densenets(model): model.last_linear = model.classifier del model.classifier def logits(self, features): x = F.relu(features, inplace=True) x = F.avg_pool2d(x, kernel_size=7, stride=1) x = x.view(x.size(0), -1) x = self.last_linear(x) return x def forward(self, input): x = self.features(input) x = self.logits(x) return x model.logits = types.MethodType(logits, model) model.forward = types.MethodType(forward, model) return model def densenet121(num_classes=1000, pretrained='imagenet'): r model = models.densenet121(pretrained=False) if pretrained is not None: settings = pretrained_settings['densenet121'][pretrained] model = load_pretrained(model, num_classes, settings) model = modify_densenets(model) return model def densenet169(num_classes=1000, pretrained='imagenet'): r model = models.densenet169(pretrained=False) if pretrained is not None: settings = pretrained_settings['densenet169'][pretrained] model = load_pretrained(model, num_classes, settings) model = modify_densenets(model) return model def densenet201(num_classes=1000, pretrained='imagenet'): r model = models.densenet201(pretrained=False) if pretrained is not None: settings = pretrained_settings['densenet201'][pretrained] model = load_pretrained(model, num_classes, settings) model = modify_densenets(model) return model def densenet161(num_classes=1000, pretrained='imagenet'): r model = models.densenet161(pretrained=False) if pretrained is not None: settings = pretrained_settings['densenet161'][pretrained] model = load_pretrained(model, num_classes, settings) model = modify_densenets(model) return model def inceptionv3(num_classes=1000, pretrained='imagenet'): r model = models.inception_v3(pretrained=False) if pretrained is not None: settings = pretrained_settings['inceptionv3'][pretrained] model = load_pretrained(model, num_classes, settings) model.last_linear = model.fc del model.fc def features(self, input): x = self.Conv2d_1a_3x3(input) x = self.Conv2d_2a_3x3(x) x = self.Conv2d_2b_3x3(x) x = F.max_pool2d(x, kernel_size=3, stride=2) x = self.Conv2d_3b_1x1(x) x = self.Conv2d_4a_3x3(x) x = F.max_pool2d(x, kernel_size=3, stride=2) x = self.Mixed_5b(x) x = self.Mixed_5c(x) x = self.Mixed_5d(x) x = self.Mixed_6a(x) x = self.Mixed_6b(x) x = self.Mixed_6c(x) x = self.Mixed_6d(x) x = self.Mixed_6e(x) if self.training and self.aux_logits: self._out_aux = self.AuxLogits(x) x = self.Mixed_7a(x) x = self.Mixed_7b(x) x = self.Mixed_7c(x) return x def logits(self, features): x = F.avg_pool2d(features, kernel_size=8) x = F.dropout(x, training=self.training) x = x.view(x.size(0), -1) x = self.last_linear(x) if self.training and self.aux_logits: aux = self._out_aux self._out_aux = None return x, aux return x def forward(self, input): x = self.features(input) x = self.logits(x) return x model.features = types.MethodType(features, model) model.logits = types.MethodType(logits, model) model.forward = types.MethodType(forward, model) return model def modify_resnets(model): model.last_linear = model.fc model.fc = None def features(self, input): x = self.conv1(input) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) return x def logits(self, features): x = self.avgpool(features) x = x.view(x.size(0), -1) x = self.last_linear(x) return x def forward(self, input): x = self.features(input) x = self.logits(x) return x model.features = types.MethodType(features, model) model.logits = types.MethodType(logits, model) model.forward = types.MethodType(forward, model) return model def resnet18(num_classes=1000, pretrained='imagenet'): model = models.resnet18(pretrained=False) if pretrained is not None: settings = pretrained_settings['resnet18'][pretrained] model = load_pretrained(model, num_classes, settings) model = modify_resnets(model) return model def resnet34(num_classes=1000, pretrained='imagenet'): model = models.resnet34(pretrained=False) if pretrained is not None: settings = pretrained_settings['resnet34'][pretrained] model = load_pretrained(model, num_classes, settings) model = modify_resnets(model) return model def resnet50(num_classes=1000, pretrained='imagenet'): model = models.resnet50(pretrained=False) if pretrained is not None: settings = pretrained_settings['resnet50'][pretrained] model = load_pretrained(model, num_classes, settings) model = modify_resnets(model) return model def resnet101(num_classes=1000, pretrained='imagenet'): model = models.resnet101(pretrained=False) if pretrained is not None: settings = pretrained_settings['resnet101'][pretrained] model = load_pretrained(model, num_classes, settings) model = modify_resnets(model) return model def resnet152(num_classes=1000, pretrained='imagenet'): model = models.resnet152(pretrained=False) if pretrained is not None: settings = pretrained_settings['resnet152'][pretrained] model = load_pretrained(model, num_classes, settings) model = modify_resnets(model) return model def modify_squeezenets(model): model.dropout = model.classifier[0] model.last_conv = model.classifier[1] model.relu = model.classifier[2] model.avgpool = model.classifier[3] del model.classifier def logits(self, features): x = self.dropout(features) x = self.last_conv(x) x = self.relu(x) x = self.avgpool(x) return x def forward(self, input): x = self.features(input) x = self.logits(x) return x model.logits = types.MethodType(logits, model) model.forward = types.MethodType(forward, model) return model def squeezenet1_0(num_classes=1000, pretrained='imagenet'): r model = models.squeezenet1_0(pretrained=False) if pretrained is not None: settings = pretrained_settings['squeezenet1_0'][pretrained] model = load_pretrained(model, num_classes, settings) model = modify_squeezenets(model) return model def squeezenet1_1(num_classes=1000, pretrained='imagenet'): r model = models.squeezenet1_1(pretrained=False) if pretrained is not None: settings = pretrained_settings['squeezenet1_1'][pretrained] model = load_pretrained(model, num_classes, settings) model = modify_squeezenets(model) return model def modify_vggs(model): model._features = model.features del model.features model.linear0 = model.classifier[0] model.relu0 = model.classifier[1] model.dropout0 = model.classifier[2] model.linear1 = model.classifier[3] model.relu1 = model.classifier[4] model.dropout1 = model.classifier[5] model.last_linear = model.classifier[6] del model.classifier def features(self, input): x = self._features(input) x = x.view(x.size(0), -1) x = self.linear0(x) x = self.relu0(x) x = self.dropout0(x) x = self.linear1(x) return x def logits(self, features): x = self.relu1(features) x = self.dropout1(x) x = self.last_linear(x) return x def forward(self, input): x = self.features(input) x = self.logits(x) return x model.features = types.MethodType(features, model) model.logits = types.MethodType(logits, model) model.forward = types.MethodType(forward, model) return model def vgg11(num_classes=1000, pretrained='imagenet'): model = models.vgg11(pretrained=False) if pretrained is not None: settings = pretrained_settings['vgg11'][pretrained] model = load_pretrained(model, num_classes, settings) model = modify_vggs(model) return model def vgg11_bn(num_classes=1000, pretrained='imagenet'): model = models.vgg11_bn(pretrained=False) if pretrained is not None: settings = pretrained_settings['vgg11_bn'][pretrained] model = load_pretrained(model, num_classes, settings) model = modify_vggs(model) return model def vgg13(num_classes=1000, pretrained='imagenet'): model = models.vgg13(pretrained=False) if pretrained is not None: settings = pretrained_settings['vgg13'][pretrained] model = load_pretrained(model, num_classes, settings) model = modify_vggs(model) return model def vgg13_bn(num_classes=1000, pretrained='imagenet'): model = models.vgg13_bn(pretrained=False) if pretrained is not None: settings = pretrained_settings['vgg13_bn'][pretrained] model = load_pretrained(model, num_classes, settings) model = modify_vggs(model) return model def vgg16(num_classes=1000, pretrained='imagenet'): model = models.vgg16(pretrained=False) if pretrained is not None: settings = pretrained_settings['vgg16'][pretrained] model = load_pretrained(model, num_classes, settings) model = modify_vggs(model) return model def vgg16_bn(num_classes=1000, pretrained='imagenet'): model = models.vgg16_bn(pretrained=False) if pretrained is not None: settings = pretrained_settings['vgg16_bn'][pretrained] model = load_pretrained(model, num_classes, settings) model = modify_vggs(model) return model def vgg19(num_classes=1000, pretrained='imagenet'): model = models.vgg19(pretrained=False) if pretrained is not None: settings = pretrained_settings['vgg19'][pretrained] model = load_pretrained(model, num_classes, settings) model = modify_vggs(model) return model def vgg19_bn(num_classes=1000, pretrained='imagenet'): model = models.vgg19_bn(pretrained=False) if pretrained is not None: settings = pretrained_settings['vgg19_bn'][pretrained] model = load_pretrained(model, num_classes, settings) model = modify_vggs(model) return model <define_variables>
y_train = train.Survived X_train = train.drop(['Survived', 'Ticket' ,'Fare_Range'] , axis=1) X_test = test.drop([ 'Ticket' ] , axis=1 )
Titanic - Machine Learning from Disaster