kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
20,728,635
train['isChurned'].value_counts()<prepare_x_and_y>
X_test = pd.get_dummies(X_test, columns=['Sex', 'Embarked'], drop_first=True) X_test
Titanic - Machine Learning from Disaster
20,728,635
df_x = train.drop(['isChurned'], axis=1) df_y = train['isChurned'] x = df_x y = df_y<normalization>
from sklearn.model_selection import GridSearchCV from sklearn.ensemble import RandomForestClassifier
Titanic - Machine Learning from Disaster
20,728,635
scaler = StandardScaler() x = scaler.fit_transform(x) new_test = scaler.transform(test )<split>
Rf = RandomForestClassifier()
Titanic - Machine Learning from Disaster
20,728,635
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 42 )<import_modules>
param_grid = {"n_estimators":[20, 40, 100, 120, 150],'max_depth': np.arange(1, 20), 'max_features': ['auto', 'sqrt'], 'criterion' :['gini', 'entropy']}
Titanic - Machine Learning from Disaster
20,728,635
from sklearn.metrics import confusion_matrix, f1_score<train_model>
RF_grid = GridSearchCV(Rf, param_grid=param_grid, cv=10, scoring='accuracy' )
Titanic - Machine Learning from Disaster
20,728,635
def train_model(models, x=x_train, y=y_train): for key, model in models.items() : print(f'Currently Training {key}') model.fit(x, y )<predict_on_test>
RF_grid.fit(X_train, Y_train )
Titanic - Machine Learning from Disaster
20,728,635
def preds(models, x_test=x_test, y_test=y_test, new_test=new_test): preds = { 'models': [], 'val_f1_score': [], 'train_f1_score': [], 'confusion_matrix': [], 'prediction': [] } for key, model in models.items() : preds['models'].append(key) test_pred = model.predict(x_test) train_pred = model.predict(x_train) preds['val_f1_score'].append(f1_score(test_pred, y_test)) preds['train_f1_score'].append(f1_score(train_pred, y_train)) preds['confusion_matrix'].append(confusion_matrix(test_pred, y_test)) preds['prediction'].append(model.predict(new_test)) return pd.DataFrame(preds )<define_variables>
RF_grid.best_params_
Titanic - Machine Learning from Disaster
20,728,635
evaluation = []<import_modules>
RF_cv_result = RF_grid.cv_results_['mean_test_score']
Titanic - Machine Learning from Disaster
20,728,635
from sklearn.linear_model import LogisticRegression, Perceptron, RidgeClassifier, SGDClassifier<choose_model_class>
from xgboost import XGBClassifier from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import RandomizedSearchCV
Titanic - Machine Learning from Disaster
20,728,635
linear_models = { 'LogisticRegression': LogisticRegression() , 'Perceptron': Perceptron() , 'RidgeClassifier': RidgeClassifier() , 'SGDClassifier': SGDClassifier() }<train_model>
xgb_model = XGBClassifier()
Titanic - Machine Learning from Disaster
20,728,635
train_model(linear_models )<predict_on_test>
distributions = {"n_estimators":[20, 40, 100, 120, 150],'max_depth': np.arange(1, 20), 'criterion' :['gini', 'entropy'], 'learning_rate' : [0.0001, 0.001, 0.01, 0.1, 0.2, 0.3]}
Titanic - Machine Learning from Disaster
20,728,635
evaluation.append(preds(linear_models))<import_modules>
kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=7) XG_grid = RandomizedSearchCV(xgb_model, distributions, scoring="neg_log_loss", n_jobs=-1, cv=kfold )
Titanic - Machine Learning from Disaster
20,728,635
from sklearn.naive_bayes import GaussianNB<define_variables>
XG_grid.fit(X_train, Y_train )
Titanic - Machine Learning from Disaster
20,728,635
naive_bayes = { 'GaussianNB': GaussianNB() }<train_model>
XG_grid.best_params_
Titanic - Machine Learning from Disaster
20,728,635
train_model(naive_bayes )<predict_on_test>
XG_grid_result = XG_grid.cv_results_['mean_test_score'] XG_grid_result
Titanic - Machine Learning from Disaster
20,728,635
evaluation.append(preds(naive_bayes))<import_modules>
predictions = RF_grid.predict(X_test) predictions
Titanic - Machine Learning from Disaster
20,728,635
from sklearn.tree import DecisionTreeClassifier, ExtraTreeClassifier<choose_model_class>
output = pd.DataFrame({'PassengerId': df_test.PassengerId, 'Survived': predictions}) output.to_csv('my_submission.csv', index=False) print("Your submission was successfully saved!" )
Titanic - Machine Learning from Disaster
23,020,427
tree_model = { 'DecisionTree': DecisionTreeClassifier() , 'ExtraTree': ExtraTreeClassifier() }<train_model>
df_train = pd.read_csv('.. /input/titanic/train.csv') df_test = pd.read_csv('.. /input/titanic/test.csv') data = df_train.append(df_test )
Titanic - Machine Learning from Disaster
23,020,427
train_model(tree_model )<predict_on_test>
df_train.isnull().sum()
Titanic - Machine Learning from Disaster
23,020,427
evaluation.append(preds(tree_model))<install_modules>
df_test.isnull().sum()
Titanic - Machine Learning from Disaster
23,020,427
!pip install catboost <choose_model_class>
df_train.duplicated().sum()
Titanic - Machine Learning from Disaster
23,020,427
boosting_model = { 'CatBoost': CatBoostClassifier(eval_metric='F1', logging_level='Silent'), 'XGBoost': XGBClassifier() , 'LGBM': LGBMClassifier() , 'AdaBoost': AdaBoostClassifier() }<train_model>
print("Training set size",len(df_train)) print("Test set size",len(df_train))
Titanic - Machine Learning from Disaster
23,020,427
train_model(boosting_model )<predict_on_test>
knn = KNNImputer(n_neighbors=3) data["Age"] = knn.fit_transform(data["Age"].values.reshape(-1,1)).ravel()
Titanic - Machine Learning from Disaster
23,020,427
evaluation.append(preds(boosting_model))<import_modules>
data["Fare"] = data["Fare"].fillna(data[:891]["Fare"].mode() [0]) data["Embarked"] = data["Embarked"].fillna(data[:891]["Embarked"].mode() [0] )
Titanic - Machine Learning from Disaster
23,020,427
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier<choose_model_class>
data["Ticket_num"] = data["Ticket"].apply(lambda x:"".join(re.findall(r"\d+",x))) data["Name_last"] = data["Name"].apply(lambda x:x.split(",",1)[0] )
Titanic - Machine Learning from Disaster
23,020,427
ensemble_model = { 'RandomForest': RandomForestClassifier() , 'ExtraTrees': ExtraTreesClassifier() }<train_model>
Def = 0.5 data["family_survived"] = Def da_group = data.groupby(["Name_last","Fare"]) for group,all_data in da_group: if(len(all_data)>= 1): for index, row in all_data.iterrows() : if(row['family_survived'] == 0)or(row['family_survived']== 0.5): surviv_max = all_data.drop(index)['Survived'].max() surviv_min = all_data.drop(index)['Survived'].min() ids = row['PassengerId'] if(surviv_max == 1.0): data.loc[data['PassengerId'] == ids, 'family_survived'] = 1 elif(surviv_min==0.0): data.loc[data['PassengerId'] == ids, 'family_survived'] = 0 print("family survival:", data.loc[data['family_survived'] == 1].shape[0])
Titanic - Machine Learning from Disaster
23,020,427
train_model(ensemble_model )<predict_on_test>
da_group = data[["PassengerId","Name_last","Survived","family","Fare","Ticket_num"]].groupby(["Name_last","Ticket_num"]) for group,all_data in da_group: if(len(all_data)>= 1): for index, row in all_data.iterrows() : surviv_max = all_data.drop(index)['Survived'].max() surviv_min = all_data.drop(index)['Survived'].min() ids = row['PassengerId'] if(surviv_max == 1.0): data.loc[data['PassengerId'] == ids, 'family_survived'] = 1 elif(surviv_min==0.0): data.loc[data['PassengerId'] == ids, 'family_survived'] = 0 print("family survival:", data.loc[data['family_survived'] == 1].shape[0] )
Titanic - Machine Learning from Disaster
23,020,427
evaluation.append(preds(ensemble_model))<sort_values>
da_group = data.groupby(["Ticket_num"]) for group,all_data in da_group: if(len(all_data)>= 1): for index, row in all_data.iterrows() : if(row['family_survived'] == 0)or(row['family_survived']== 0.5): surviv_max = all_data.drop(index)['Survived'].max() surviv_min = all_data.drop(index)['Survived'].min() ids = row['PassengerId'] if(surviv_max == 1.0): data.loc[data['PassengerId'] == ids, 'family_survived'] = 1 elif(surviv_min==0.0): data.loc[data['PassengerId'] == ids, 'family_survived'] = 0 print("family survival:", data.loc[data['family_survived'] == 1].shape[0] )
Titanic - Machine Learning from Disaster
23,020,427
eval = pd.concat(evaluation ).sort_values('val_f1_score',ascending=False ).reset_index(drop=True) eval<import_modules>
data = data.drop(columns=["Name","Ticket","Ticket_num","Name_last"] )
Titanic - Machine Learning from Disaster
23,020,427
from sklearn.model_selection import GridSearchCV<import_modules>
data["Age"] = pd.qcut(data["Age"],7 )
Titanic - Machine Learning from Disaster
23,020,427
from sklearn.model_selection import GridSearchCV<define_search_space>
data["Fare"] = pd.qcut(data["Fare"],7 )
Titanic - Machine Learning from Disaster
23,020,427
params = { 'C': np.logspace(-2,2,20), 'solver': ['liblinear', 'lbfgs'], 'penalty': ['l2'] }<choose_model_class>
data["Cabin"] = data["Cabin"].apply(lambda x:str(x)[0] )
Titanic - Machine Learning from Disaster
23,020,427
tuning_model = GridSearchCV(estimator = linear_models['LogisticRegression'], param_grid = params, verbose=True, scoring = 'f1', n_jobs=5 )<predict_on_test>
la = LabelEncoder() la.fit(data["Sex"]) data["Sex"] = la.transform(data["Sex"]) la.fit(data["Age"]) data["Age"] = la.transform(data["Age"]) la.fit(data["Fare"]) data["Fare"] = la.transform(data["Fare"]) la.fit(data["Embarked"]) data["Embarked"] = la.transform(data["Embarked"]) la.fit(data["Cabin"]) data["Cabin"] = la.transform(data["Cabin"]) all_col = ["Sex","Age","Fare","Embarked","Pclass","family","family_survived","Cabin"] for i in all_col: std = StandardScaler() std.fit(data[:891].loc[:,i].values.reshape(-1, 1)) data[i] = std.transform(data.loc[:,i].values.reshape(-1, 1))
Titanic - Machine Learning from Disaster
23,020,427
tuning_model.fit(x, y) predict = pd.DataFrame(tuning_model.best_estimator_.predict(new_test)) predict[0].value_counts()<find_best_params>
df_train = data[:891] df_test = data[891:]
Titanic - Machine Learning from Disaster
23,020,427
print(tuning_model.best_estimator_) print(tuning_model.best_params_ )<prepare_x_and_y>
df_train["Survived"].value_counts(normalize=True )
Titanic - Machine Learning from Disaster
23,020,427
y_pred = predict[0]<data_type_conversions>
from sklearn.model_selection import StratifiedKFold,cross_val_score,RandomizedSearchCV,GridSearchCV from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier,AdaBoostClassifier,GradientBoostingClassifier,StackingClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import f1_score,recall_score,precision_score,roc_auc_score from xgboost import XGBClassifier
Titanic - Machine Learning from Disaster
23,020,427
<sort_values>
target = df_train.iloc[:,1] feature = df_train.iloc[:,2:] stk = StratifiedKFold(n_splits=5,random_state=123,shuffle=True) for train_index,test_index in stk.split(feature,target): x_train,y_train = feature.iloc[train_index],target.iloc[train_index] x_test,y_test = feature.iloc[test_index],target.iloc[test_index]
Titanic - Machine Learning from Disaster
23,020,427
def compare(col): count_diff = 0 for i, row in comparison.iterrows() : if(row['prediction'] != row[col]): count_diff += 1 return count_diff<load_from_csv>
gbdt = GradientBoostingClassifier(random_state=123) gbdt.fit(x_train,y_train) gbdt_pre = gbdt.predict(x_test) print("gbdt recall:",round(recall_score(y_test,gbdt_pre),2)) print("gbdt precision:",round(precision_score(y_test,gbdt_pre),2)) print("gbdt f1_score:",round(f1_score(y_test,gbdt_pre),2)) print("gbdt rou_auc_score:",round(roc_auc_score(y_test,gbdt_pre),2))
Titanic - Machine Learning from Disaster
23,020,427
idx = pd.read_csv('.. /input/seleksidukungaib/test.csv')['idx']<data_type_conversions>
params = { "max_depth":(2,8), "learning_rate":(0.01,0.1), "min_samples_leaf":(1,3), "n_estimators":(100,300) } gbdt_gd = RandomizedSearchCV(gbdt,param_distributions=params,cv=5,scoring="roc_auc") gbdt_gd.fit(x_train,y_train) print(gbdt_gd.best_score_) print(gbdt_gd.best_estimator_) print(gbdt_gd.best_params_ )
Titanic - Machine Learning from Disaster
23,020,427
submission = pd.DataFrame({ 'idx': idx, 'isChurned': y_pred.astype(int) } )<count_values>
gbdt_pre = gbdt_gd.predict(x_test) print("gbdt_gd recall:",round(recall_score(y_test,gbdt_pre),2)) print("gbdt_gd precision:",round(precision_score(y_test,gbdt_pre),2)) print("gbdt_gd f1_score:",round(f1_score(y_test,gbdt_pre),2)) print("gbdt_gd rou_auc_score:",round(roc_auc_score(y_test,gbdt_pre),2))
Titanic - Machine Learning from Disaster
23,020,427
submission['isChurned'].value_counts()<save_to_csv>
adaboost = AdaBoostClassifier(random_state=123) adaboost.fit(x_train,y_train) adaboost_pre = gbdt.predict(x_test) print("adaboost recall:",round(recall_score(y_test,adaboost_pre),2)) print("adaboost precision:",round(precision_score(y_test,adaboost_pre),2)) print("adaboost f1_score:",round(f1_score(y_test,adaboost_pre),2)) print("adaboost rou_auc_score:",round(roc_auc_score(y_test,adaboost_pre),2))
Titanic - Machine Learning from Disaster
23,020,427
submission.to_csv('submission.csv', index=False )<save_to_csv>
params = { "learning_rate":(0.01,0.2), "n_estimators":(50,300) } ada_gd = RandomizedSearchCV(adaboost,param_distributions=params,cv=5,scoring="roc_auc") ada_gd.fit(x_train,y_train) print(ada_gd.best_score_) print(ada_gd.best_estimator_) print(ada_gd.best_params_ )
Titanic - Machine Learning from Disaster
23,020,427
submission.to_csv('submission.csv', index=False )<load_from_csv>
ada_pre = ada_gd.predict(x_test) print("ada_gd recall:",round(recall_score(y_test,ada_pre),2)) print("ada_gd precision:",round(precision_score(y_test,ada_pre),2)) print("ada_gd f1_score:",round(f1_score(y_test,ada_pre),2)) print("ada_gd rou_auc_score:",round(roc_auc_score(y_test,ada_pre),2))
Titanic - Machine Learning from Disaster
23,020,427
!kaggle competitions submit -c seleksidukungaib -f submission.csv -m "No Comment"<set_options>
rf = RandomForestClassifier(random_state=123) rf.fit(x_train,y_train) rf_pre = rf.predict(x_test) print("rf recall:",round(recall_score(y_test,rf_pre),2)) print("rf precision:",round(precision_score(y_test,rf_pre),2)) print("rf f1_score:",round(f1_score(y_test,rf_pre),2)) print("rf rou_auc_score:",round(roc_auc_score(y_test,rf_pre),2))
Titanic - Machine Learning from Disaster
23,020,427
sns.set() %matplotlib inline warnings.filterwarnings('ignore') %load_ext tensorboard.notebook pd.set_option('display.max_columns', None) <load_from_csv>
params = { "max_depth":(3,10), "n_estimators":(50,300), "min_samples_leaf":(1,5), "min_samples_split":(0.1,0.2), "min_impurity_decrease":(0,0.2), } rf_gd = RandomizedSearchCV(rf,param_distributions=params,cv=5,scoring="roc_auc") rf_gd.fit(x_train,y_train) print(rf_gd.best_score_) print(rf_gd.best_estimator_) print(rf_gd.best_params_ )
Titanic - Machine Learning from Disaster
23,020,427
odf=pd.read_csv('/kaggle/input/equipfails/equip_failures_training_set.csv',index_col=0) odft=pd.read_csv('/kaggle/input/equipfails/equip_failures_test_set.csv',index_col=0 )<data_type_conversions>
rfgd_pre = rf_gd.predict(x_test) print("rf_gd recall:",round(recall_score(y_test,rfgd_pre),2)) print("rf_gd precision:",round(precision_score(y_test,rfgd_pre),2)) print("rf_gd f1_score:",round(f1_score(y_test,rfgd_pre),2)) print("rf_gd rou_auc_score:",round(roc_auc_score(y_test,rfgd_pre),2))
Titanic - Machine Learning from Disaster
23,020,427
df=odf.replace({'na':-999999}) Xt=odft.replace({'na':-999999}) Xt=Xt.astype(float) df=df.astype(float) df['target']=df['target'].astype(int )<prepare_x_and_y>
knn = KNeighborsClassifier() knn.fit(x_train,y_train) knn_pre = knn.predict(x_test) print("knn recall:",round(recall_score(y_test,knn_pre),2)) print("knn precision:",round(precision_score(y_test,knn_pre),2)) print("knn f1_score:",round(f1_score(y_test,knn_pre),2)) print("knn rou_auc_score:",round(roc_auc_score(y_test,knn_pre),2))
Titanic - Machine Learning from Disaster
23,020,427
X=df.iloc[:,1:] y=df.iloc[:,0] Xn=normalize(X) Xtn=normalize(Xt )<split>
params = {'algorithm': ['auto'], 'weights': ['uniform', 'distance'], 'leaf_size': list(range(1,50,5)) , 'n_neighbors': [3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,20,22]} knn_gd=GridSearchCV(knn, param_grid = params, verbose=True, cv=10, scoring = "roc_auc") knn_gd.fit(x_train, y_train) print(knn_gd.best_score_) print(knn_gd.best_estimator_) print(knn_gd.best_params_ )
Titanic - Machine Learning from Disaster
23,020,427
X_train, X_test, y_train, y_test = train_test_split(Xn, y, test_size=0.2, random_state=8 )<import_modules>
knngd_pre = knn_gd.predict(x_test) print("knngd_pre recall:",round(recall_score(y_test,knngd_pre),2)) print("knngd_pre precision:",round(precision_score(y_test,knngd_pre),2)) print("knngd_pre f1_score:",round(f1_score(y_test,knngd_pre),2)) print("knngd_pre rou_auc_score:",round(roc_auc_score(y_test,knngd_pre),2))
Titanic - Machine Learning from Disaster
23,020,427
from sklearn.metrics import f1_score from sklearn.ensemble import RandomForestClassifier as RFC from sklearn.model_selection import StratifiedKFold<compute_train_metric>
xgb = XGBClassifier(random_state=123,eval_metric="logloss",use_label_encoder=False) xgb.fit(x_train,y_train) xgb_pre = xgb.predict(x_test) print("xgb recall:",round(recall_score(y_test,xgb_pre),2)) print("xgb precision:",round(precision_score(y_test,xgb_pre),2)) print("xgb f1_score:",round(f1_score(y_test,xgb_pre),2)) print("xgb rou_auc_score:",round(roc_auc_score(y_test,xgb_pre),2))
Titanic - Machine Learning from Disaster
23,020,427
def cross_validate_random_forest(X, y, Xt, yt, criterion, max_height, n_folds): train_acc=[] test_acc=[] general_acc=[] all_feature_importance=[] f1s=[] f1su=0 skf=StratifiedKFold(n_splits=n_folds, shuffle=True) for train_index, test_index in skf.split(X, y): Xtrain, Xtest = X.iloc[train_index], X.iloc[test_index] ytrain, ytest = y.iloc[train_index].values.ravel() , y.iloc[test_index].values.ravel() Xtrain_min, Xtrain_max = Xtrain.min() , Xtrain.max() Xtrain =(Xtrain - Xtrain_min)/(Xtrain_max-Xtrain_min) Xtest =(Xtest - Xtrain_min)/(Xtrain_max-Xtrain_min) Xtrain = Xtrain[Xtrain.columns[~(Xtrain_min==Xtrain_max)]] Xtest = Xtest [Xtest.columns[~(Xtrain_min==Xtrain_max)]] Xt_min, Xt_max = Xt.min() , Xt.max() Xt=(Xt - Xt_min)/(Xt_max-Xt_min) Xt = Xt[Xt.columns[~(Xt_min==Xt_max)]] dt=RFC(n_estimators=100,criterion=criterion,max_depth=max_height) dt.fit(Xtrain, ytrain) pred = dt.predict(Xtest) general_acc.append(accuracy_score(ytest,pred)) f1s.append(f1_score(ytest,pred)) f1su+=f1_score(ytest,pred) pred = dt.predict(Xtrain) train_acc.append(accuracy_score(ytrain,pred)) if Xt is not None and yt is not None: pred = dt.predict(Xt) test_acc.append(accuracy_score(yt,pred)) all_feature_importance.append(dt.feature_importances_) return [[np.mean(general_acc),np.std(general_acc)],[np.mean(train_acc),np.std(train_acc)],[np.mean(test_acc),np.std(test_acc)],all_feature_importance,f1s,f1su]<create_dataframe>
params ={ "max_depth":(2,8), "n_estimators":(100,400), "learning_rate":(0.5,1), "gamma":(0,0.2), "reg_lambda":(0.1,0.3), "reg_alpha":(0.1,0.3), "colsample_bytree":(0.8,1), } xgb_gd = RandomizedSearchCV(xgb,param_distributions=params,cv=5,scoring="roc_auc") xgb_gd.fit(x_train,y_train) print(xgb_gd.best_score_) print(xgb_gd.best_estimator_) print(xgb_gd.best_params_ )
Titanic - Machine Learning from Disaster
23,020,427
n_folds=10 criterion='gini' max_height=None results=cross_validate_random_forest(pd.DataFrame(X_train),pd.DataFrame(y_train),pd.DataFrame(X_test),pd.DataFrame(y_test),criterion, max_height, n_folds )<train_model>
xgbgd_pre = xgb_gd.predict(x_test) print("xgb_gd recall:",round(recall_score(y_test,xgbgd_pre),2)) print("xgb_gd precision:",round(precision_score(y_test,xgbgd_pre),2)) print("xgb_gd f1_score:",round(f1_score(y_test,xgbgd_pre),2)) print("xgb_gd rou_auc_score:",round(roc_auc_score(y_test,xgbgd_pre),2))
Titanic - Machine Learning from Disaster
23,020,427
print('General Accuracy: ',results[0][0], 'STD: ',results[0][1]) print('Train Accuracy: ',results[1][0]) print('Test Accuracy: ',results[2][0]) print('F1 Avg: ',results[-1]/10 )<compute_train_metric>
logstic = LogisticRegression(random_state=123) logstic.fit(x_train,y_train) logstic_pre = logstic.predict(x_test) print("logstic recall:",round(recall_score(y_test,logstic_pre),2)) print("logstic precision:",round(precision_score(y_test,logstic_pre),2)) print("logstic f1_score:",round(f1_score(y_test,logstic_pre),2)) print("logstic rou_auc_score:",round(roc_auc_score(y_test,logstic_pre),2))
Titanic - Machine Learning from Disaster
23,020,427
my_class = ExtraTreesClassifier(random_state=0) my_class.fit(X_train, y_train) y_pred= my_class.predict(X_test) print('accuracy: {}'.format(accuracy_score(y_test, y_pred))) print(f'F1: {f1_score(y_test,y_pred)}') confusion_matrix(y_test,y_pred )<compute_test_metric>
prams = { "C":[0.1,0.2,0.3,0.4,0.5], "solver":["newton-cg","lbfgs","sag","saga"] } logstic_gd=GridSearchCV(logstic, param_grid = prams, verbose=True, cv=10, scoring = "roc_auc") logstic_gd.fit(x_train, y_train) print(logstic_gd.best_score_) print(logstic_gd.best_estimator_) print(logstic_gd.best_params_ )
Titanic - Machine Learning from Disaster
23,020,427
my_class = AdaBoostClassifier(random_state=0) my_class.fit(X_train, y_train) y_pred= my_class.predict(X_test) print('accuracy: {}'.format(accuracy_score(y_test, y_pred))) print(f'F1: {f1_score(y_test,y_pred)}') confusion_matrix(y_test,y_pred )<compute_train_metric>
logsticgd_pre = logstic_gd.predict(x_test) print("logstic recall:",round(recall_score(y_test,logsticgd_pre),2)) print("logstic precision:",round(precision_score(y_test,logsticgd_pre),2)) print("logstic f1_score:",round(f1_score(y_test,logsticgd_pre),2)) print("logstic rou_auc_score:",round(roc_auc_score(y_test,logsticgd_pre),2))
Titanic - Machine Learning from Disaster
23,020,427
lg = LogisticRegression(solver='lbfgs', random_state=18) lg.fit(X_train, y_train) logistic_prediction = lg.predict(X_test) score = metrics.accuracy_score(y_test, logistic_prediction) print(score) confusion_matrix(y_test,logistic_prediction )<train_model>
df_t = df_test.iloc[:,2:] ids = df_test.iloc[:,0] df_t
Titanic - Machine Learning from Disaster
23,020,427
data_dmatrix = xgb.DMatrix(data=Xn,label=y) xgc = xgb.XGBClassifier(objective ='reg:logistic', colsample_bytree = 0.15, learning_rate = 0.1, max_depth = 20, alpha = 12, n_estimators = 700) xgc.fit(X,y )<predict_on_test>
Titanic - Machine Learning from Disaster
23,020,427
pred_train=xgc.predict(X) pred_train.sum()<predict_on_test>
Titanic - Machine Learning from Disaster
23,020,427
pred_test=xgc.predict(Xt) pred_test.sum()<create_dataframe>
sub = pd.DataFrame() sub["PassengerId"] = ids sub["Survived"] = knn_gd.predict(df_t) sub["Survived"] = sub["Survived"].astype(int) sub
Titanic - Machine Learning from Disaster
23,020,427
<save_to_csv><EOS>
sub.to_csv("submission.csv",index=None) print("End!" )
Titanic - Machine Learning from Disaster
23,121,995
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<save_to_csv>
TRAIN_PATH = ".. /input/titanic/train.csv" TEST_PATH = ".. /input/titanic/test.csv" SAMPLE_SUBMISSION_PATH = ".. /input/titanic/gender_submission.csv" SUBMISSION_PATH = "submission.csv" ID = "PassengerId" TARGET = "Survived" SEED = 123 N_SPLITS = 5 GRID_SEARCH_CV_NUM = 5 SCORING = "roc_auc"
Titanic - Machine Learning from Disaster
23,121,995
file_name='submision.csv' yt.to_csv(file_name,index=True) <load_pretrained>
copyfile(src = ".. /input/titanic-preprocess/titanic_preprocess_py.py", dst = ".. /working/titanic_preprocess_py.py") train,test = loadAndPreprocess(TRAIN_PATH,TEST_PATH) train.head()
Titanic - Machine Learning from Disaster
23,121,995
filename = 'Final_Model.mod' pickle.dump(xgc, open(filename, 'wb'))<load_from_csv>
y = train[TARGET] X = train.drop([ID,TARGET],axis=1) X_test = test.drop([ID,TARGET],axis=1) stk = StratifiedKFold(n_splits=N_SPLITS,random_state=SEED,shuffle=True) for train_index,val_index in stk.split(X,y): X_train,y_train = X.iloc[train_index],y.iloc[train_index] X_val,y_val = X.iloc[val_index],y.iloc[val_index] knn = KNeighborsClassifier() knn.fit(X_train,y_train) pred_knn = knn.predict(X_val) print("knn recall:",round(recall_score(y_val,pred_knn),2)) print("knn precision:",round(precision_score(y_val,pred_knn),2)) print("knn f1_score:",round(f1_score(y_val,pred_knn),2)) print("knn rou_auc_score:",round(roc_auc_score(y_val,pred_knn),2)) params = {'algorithm': ['auto'], 'weights': ['uniform', 'distance'], 'leaf_size': range(1,30), 'n_neighbors': range(3,20)} gs =GridSearchCV(knn, param_grid = params, verbose=True, cv=GRID_SEARCH_CV_NUM, scoring = SCORING ) gs.fit(X_train, y_train) print(gs.best_score_) print(gs.best_estimator_) print(gs.best_params_) pred_val = gs.predict(X_val) print("gs recall:",round(recall_score(y_val,pred_val),2)) print("gs precision:",round(precision_score(y_val,pred_val),2)) print("gs f1_score:",round(f1_score(y_val,pred_val),2)) print("gs rou_auc_score:",round(roc_auc_score(y_val,pred_val),2))
Titanic - Machine Learning from Disaster
23,121,995
dftreino = pd.read_csv('.. /input/train.csv') dfteste = pd.read_csv('.. /input/test.csv') dftotal = dfteste.append(dftreino )<data_type_conversions>
sub = pd.read_csv(SAMPLE_SUBMISSION_PATH) sub[TARGET] = gs.predict(X_test ).astype(int) sub.to_csv(SUBMISSION_PATH,index=False) sub.head(10 )
Titanic - Machine Learning from Disaster
22,931,851
dftotal['populacao']=dftotal['populacao'].str.replace('(','' ).str.replace(')', '' ).str.replace(',','' ).str.replace('.','' ).astype(dtype=np.int )<data_type_conversions>
import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import classification_report, confusion_matrix, roc_auc_score
Titanic - Machine Learning from Disaster
22,931,851
dftotal['area']=dftotal['area'].str.replace(',','' ).astype(float) <data_type_conversions>
train = pd.read_csv(".. /input/titanic/train.csv", index_col="PassengerId") X_test = pd.read_csv(".. /input/titanic/test.csv", index_col="PassengerId") print(train.info()) print(X_test.info() )
Titanic - Machine Learning from Disaster
22,931,851
dftotal['densidade_dem']=dftotal['densidade_dem'].str.replace(',','' ).astype(float) dftotal['cat_porte'] = dftotal['porte'].astype('category' ).cat.codes dftotal['cat_regiao'] = dftotal['regiao'].astype('category' ).cat.codes dftotal['cat_estado'] = dftotal['estado'].astype('category' ).cat.codes<data_type_conversions>
print("TrainData") print("---------------") print(train.isna().sum()) print("TestData") print("---------------") print(X_test.isna().sum() ," " )
Titanic - Machine Learning from Disaster
22,931,851
dftotal['gasto_pc_educacao'].fillna(dftotal['gasto_pc_educacao'].mean() , inplace=True) dftotal['perc_pop_econ_ativa'].fillna(dftotal['perc_pop_econ_ativa'].mean() , inplace=True) <define_variables>
train.loc[~train.isna().Cabin, "Cabin"] = 1 train.Cabin.fillna(0, inplace=True) X_test.loc[~X_test.isna().Cabin, "Cabin"] = 1 X_test.Cabin.fillna(0, inplace=True )
Titanic - Machine Learning from Disaster
22,931,851
feats = [c for c in dftotal.columns if c not in ['codigo_mun', 'comissionados_por_servidor','nota_mat', 'densidade_dem', 'participacao_transf_receita', 'servidores', 'gasto_pc_saude', 'hab_p_medico', 'exp_vida', 'exp_anos_estudo', 'regiao', 'estado', 'porte', 'municipio']] <split>
train.Age = train.groupby(["Pclass",pd.cut(train.Fare, np.arange(0, 300, 2)) ],group_keys=False ).Age.apply(lambda g: g.fillna(g.mean())) train.Age.fillna(train.Age.mean() , inplace=True) train = train.astype({"Age":"int64"}) X_test.Age = X_test.groupby(["Pclass",pd.cut(X_test.Fare, np.arange(0, 300, 2)) ],group_keys=False ).Age.apply(lambda g: g.fillna(g.mean())) X_test.Age.fillna(X_test.Age.mean() , inplace=True) X_test = X_test.astype({"Age":"int64"}) print(train.Age.isna().sum() , X_test.Age.isna().sum() )
Titanic - Machine Learning from Disaster
22,931,851
dftreino=dftotal[~dftotal.nota_mat.isnull() ] dfsubmissao=dftotal[dftotal.nota_mat.isnull() ]<split>
X_test.Fare.fillna(X_test[X_test.Pclass==3].Fare.mean() , inplace=True )
Titanic - Machine Learning from Disaster
22,931,851
dftreino2, dfteste = train_test_split(dftreino, test_size=0.20, random_state=42) dftreino2, dfvalida = train_test_split(dftreino2, test_size=0.20, random_state=42) rf = RandomForestClassifier(random_state=42, n_estimators=200, min_samples_split=5, max_depth=4 )<predict_on_test>
train = pd.get_dummies(train, columns=["Sex","Embarked"]) X_test = pd.get_dummies(X_test, columns=["Sex","Embarked"] )
Titanic - Machine Learning from Disaster
22,931,851
rf.fit(dftreino2[feats], dftreino2['nota_mat']) predicao = rf.predict(dfvalida[feats] )<compute_test_metric>
X = train.drop(columns=["Survived","Name","Ticket"]) y = train["Survived"] X_test.drop(columns=["Name","Ticket"], inplace=True) X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.5) print(y_train.value_counts()) print(y_valid.value_counts() )
Titanic - Machine Learning from Disaster
22,931,851
accuracy_score(dfvalida['nota_mat'], predicao )<compute_test_metric>
randomforest = RandomForestClassifier(n_estimators=200, max_depth=5) model1 = randomforest.fit(X_train, y_train) pred_valid = model1.predict(X_valid) pred_valid_prob = model1.predict_proba(X_valid) print(classification_report(y_valid, pred_valid)) print(model1.score(X_valid, y_valid)) print(roc_auc_score(y_valid,pred_valid_prob[:,1]))
Titanic - Machine Learning from Disaster
22,931,851
accuracy_score(dfteste['nota_mat'], rf.predict(dfteste[feats]))<save_to_csv>
model = randomforest.fit(X, y) pred = model.predict(X_test) result = pd.DataFrame(pred, index=X_test.index, columns=["Survived"]) result.to_csv("submission.csv") result.head()
Titanic - Machine Learning from Disaster
14,055,799
dfsubmissao['nota_mat']=rf.predict(dfsubmissao[feats]) dfsubmissao[['codigo_mun','nota_mat']].to_csv('dfresultado.csv', index=False) <train_model>
%matplotlib inline plt.style.use('seaborn-whitegrid') warnings.filterwarnings('ignore' )
Titanic - Machine Learning from Disaster
14,055,799
<import_modules>
test = pd.read_csv('.. /input/titanic/test.csv') train = pd.read_csv('.. /input/titanic/train.csv' )
Titanic - Machine Learning from Disaster
14,055,799
np.random.seed(10) <drop_column>
ntrain = train.shape[0] ntest = test.shape[0] y_train = train['Survived'].values passId = test['PassengerId'] data = pd.concat(( train, test)) print("data size is: {}".format(data.shape))
Titanic - Machine Learning from Disaster
14,055,799
def trim_all_columns(df): trim_strings = lambda x: x.strip() if isinstance(x, str)else x return df.applymap(trim_strings )<define_variables>
train['Survived'].value_counts()
Titanic - Machine Learning from Disaster
14,055,799
header_list = ["AAGE", "ACLSWKR", "ADTIND", "ADTOCC", "AHGA","AHRSPAY","AHRSCOL", "AMARITL","AMJIND","AMJOCC","ARACE","AREORGN","ASEX","AUNMEM", "AUNTYPE","AWKSTAT","CAPGAIN","CAPLOSS","DIVVAL", "FILESTAT", "GRINREG","GRINST","HHDFMX","HHDREL", "MARSUPWT","MIGMTR1", "MIGMTR3","MIGMTR4","MIGSAME", "MIGSUN","NOEMP","PARENT", "PEFNTVTY","PEMNTVTY","PENATVTY","PRCITSHP","SEOTR", "VETQVA","VETYN","WKSWORK","YEAR","INCCAT"] <load_from_csv>
data.isnull().sum()
Titanic - Machine Learning from Disaster
14,055,799
d_train=pd.read_csv('/kaggle/input/ml-challenge-week6/census-income.data',index_col=False,names=header_list) d_test=pd.read_csv('/kaggle/input/ml-challenge-week6/census-income.test',index_col=False,names=header_list )<drop_column>
data.Name.value_counts()
Titanic - Machine Learning from Disaster
14,055,799
d_test=trim_all_columns(d_test) d_train=trim_all_columns(d_train )<categorify>
temp = data.copy() temp['Initial'] = 0 temp['Initial'] = data.Name.str.extract('([A-Za-z]+)\.')
Titanic - Machine Learning from Disaster
14,055,799
label = {'50000+.':1, '- 50000.':0} d_train['INCCAT']=d_train['INCCAT'].map(label) d_test['INCCAT']=d_test['INCCAT'].map(label) d_test['AREORGN']=d_test['AREORGN'].fillna('NA') d_test=d_test.fillna('?') <create_dataframe>
temp['Initial'].value_counts()
Titanic - Machine Learning from Disaster
14,055,799
<feature_engineering>
def survpct(col): return temp.groupby(col)['Survived'].mean() survpct('Initial' )
Titanic - Machine Learning from Disaster
14,055,799
d_train.loc[(d_train['AWKSTAT']=='Children or Armed Forces')&(d_train['AAGE']<=18),'AWKSTAT']='Children' d_test.loc[(d_test['AWKSTAT']=='Children or Armed Forces')&(d_test['AAGE']<=18),'AWKSTAT']='Children' numb=d_train.groupby(['AWKSTAT','AMJIND'])['INCCAT'].agg({'Nr':'sum','Tot':'count'}) numb['Ratio']=numb['Nr']/numb['Tot'] numb=numb.reset_index() print("train=", d_train.shape," test=", d_test.shape) <feature_engineering>
temp.loc[temp['Initial'] == 'Dona']
Titanic - Machine Learning from Disaster
14,055,799
pd.set_option('display.max_rows', None) numb=d_train.groupby(['AAGE'])['INCCAT'].agg({'Nr':'sum','Tot':'count'}) numb['Ratio']=numb['Nr']/numb['Tot'] numb=numb.reset_index() <feature_engineering>
temp.loc[temp['Initial'] == 'Dona', 'Initial'] = 'Mrs'
Titanic - Machine Learning from Disaster
14,055,799
d_train.loc[d_train['ACLSWKR']=="Never worked",['INCCAT']]=0 d_train.loc[d_train['ACLSWKR']=="Without pay",['INCCAT']]=0<categorify>
temp = temp.reset_index(drop=True) temp['Age'] = temp.groupby('Initial')['Age'].apply(lambda x: x.fillna(x.mean())) temp[31:50]
Titanic - Machine Learning from Disaster
14,055,799
numb=d_train.groupby(['ACLSWKR'])['INCCAT'].agg({'Nr':'sum','Tot':'count'}) numb['Ratio']=numb['Nr']/numb['Tot'] a=numb.copy() dummy_le=LabelEncoder() a=a.reset_index() a.iloc[:, 0]=dummy_le.fit_transform(numb.iloc[:, 0]) numb=numb.sort_values(numb.columns[2] ).reset_index() le_ACLSWKR = LabelEncoder() le_ACLSWKR.classes_=numb.iloc[:, 0] numb.iloc[:, 0] = le_ACLSWKR.transform(numb.iloc[:, 0]) plt.figure(1) plt.subplot(211) plt.plot(a['ACLSWKR'],a['Ratio']) plt.subplot(212) plt.plot(numb['ACLSWKR'],numb['Ratio']) <categorify>
temp['Initial'].replace(['Capt', 'Col', 'Countess', 'Don', 'Dona' , 'Dr', 'Jonkheer', 'Lady', 'Major', 'Master', 'Miss' ,'Mlle', 'Mme', 'Mr', 'Mrs', 'Ms', 'Rev', 'Sir'], ['Sacrificed', 'Respected', 'Nobles', 'Mr', 'Mrs', 'Respected', 'Mr', 'Nobles', 'Respected', 'Kids', 'Miss', 'Nobles', 'Nobles', 'Mr', 'Mrs', 'Nobles', 'Sacrificed', 'Nobles'],inplace=True) temp['Initial'].replace(['Kids', 'Miss', 'Mr', 'Mrs', 'Nobles', 'Respected', 'Sacrificed'], [4, 4, 2, 5, 6, 3, 1], inplace=True )
Titanic - Machine Learning from Disaster
14,055,799
d_train.loc[:, 'ACLSWKR'] = le_ACLSWKR.transform(d_train.loc[:, 'ACLSWKR']) d_test.loc[:, 'ACLSWKR'] = le_ACLSWKR.transform(d_test.loc[:, 'ACLSWKR'] )<feature_engineering>
temp['Age_Range'] = pd.qcut(temp['Age'], 10) survpct('Age_Range')
Titanic - Machine Learning from Disaster
14,055,799
numb=d_train.groupby(['AHGA'])['INCCAT'].agg({'Nr':'sum','Tot':'count'}) numb['Ratio']=numb['Nr']/numb['Tot'] numb.sort_values(numb.columns[2] ).reset_index()<categorify>
temp['Agroup'] = 0 temp.loc[temp['Age'] < 1.0, 'Agroup'] = 1 temp.loc[(temp['Age'] >= 1.0)&(temp['Age'] <= 3.0), 'Agroup'] = 2 temp.loc[(temp['Age'] > 3.0)&(temp['Age'] < 11.0), 'Agroup'] = 7 temp.loc[(temp['Age'] >= 11.0)&(temp['Age'] < 15.0), 'Agroup'] = 13 temp.loc[(temp['Age'] >= 15.0)&(temp['Age'] < 18.0), 'Agroup'] = 16 temp.loc[(temp['Age'] >= 18.0)&(temp['Age'] <= 20.0), 'Agroup'] = 18 temp.loc[(temp['Age'] > 20.0)&(temp['Age'] <= 22.0), 'Agroup'] = 21 temp.loc[(temp['Age'] > 22.0)&(temp['Age'] <= 26.0), 'Agroup'] = 24 temp.loc[(temp['Age'] > 26.0)&(temp['Age'] <= 30.0), 'Agroup'] = 28 temp.loc[(temp['Age'] > 30.0)&(temp['Age'] <= 32.0), 'Agroup'] = 31 temp.loc[(temp['Age'] > 32.0)&(temp['Age'] <= 34.0), 'Agroup'] = 33 temp.loc[(temp['Age'] > 34.0)&(temp['Age'] <= 38.0), 'Agroup'] = 36 temp.loc[(temp['Age'] > 38.0)&(temp['Age'] <= 52.0), 'Agroup'] = 45 temp.loc[(temp['Age'] > 52.0)&(temp['Age'] <= 75.0), 'Agroup'] = 60 temp.loc[temp['Age'] > 75.0, 'Agroup'] = 78 temp.head()
Titanic - Machine Learning from Disaster
14,055,799
numb=d_train.groupby(['AHGA'])['INCCAT'].agg({'Nr':'sum','Tot':'count'}) numb['Ratio']=numb['Nr']/numb['Tot'] numb=numb.sort_values(numb.columns[2]) numb=numb.reset_index() le_AHGA = LabelEncoder() le_AHGA.classes_=numb.iloc[:, 0] <categorify>
temp.loc[(temp['Sex'] == 'male'), 'Sex'] = 1 temp.loc[(temp['Sex'] == 'female'), 'Sex'] = 2 temp.loc[(temp['Age'] < 1), 'Sex'] = 3 survpct('Sex' )
Titanic - Machine Learning from Disaster
14,055,799
d_train.loc[:, 'AHGA'] = le_AHGA.transform(d_train.loc[:, 'AHGA']) d_test.loc[:, 'AHGA'] = le_AHGA.transform(d_test.loc[:, 'AHGA'] )<categorify>
temp.loc[(temp['SibSp'] == 0)&(temp['Parch'] == 0), 'Alone'] = 1 temp['Family'] = temp['Parch'] + temp['SibSp'] + 1 temp.head(n=10 )
Titanic - Machine Learning from Disaster
14,055,799
numb=d_train.groupby(['AHRSCOL'])['INCCAT'].agg({'Nr':'sum','Tot':'count'}) numb['Ratio']=numb['Nr']/numb['Tot'] numb=numb.sort_values(numb.columns[2]) numb=numb.reset_index() le_AHRSCOL = LabelEncoder() le_AHRSCOL.classes_=numb.iloc[:, 0]<categorify>
bag('Parch', 'Survived', 'Survived per Parch', 'Parch Survived vs Not Survived' )
Titanic - Machine Learning from Disaster
14,055,799
d_train.loc[:, 'AHRSCOL'] = le_AHRSCOL.transform(d_train.loc[:, 'AHRSCOL']) d_test.loc[:, 'AHRSCOL'] = le_AHRSCOL.transform(d_test.loc[:, 'AHRSCOL'] )<categorify>
temp.loc[(temp.Embarked.isnull())]
Titanic - Machine Learning from Disaster
14,055,799
numb=d_train.groupby(['AMARITL'])['INCCAT'].agg({'Nr':'sum','Tot':'count'}) numb['Ratio']=numb['Nr']/numb['Tot'] numb=numb.sort_values(numb.columns[2]) numb=numb.reset_index() le_AMARITL = LabelEncoder() le_AMARITL.classes_=numb.iloc[:, 0] d_train.loc[:, 'AMARITL'] = le_AMARITL.transform(d_train.loc[:, 'AMARITL']) d_test.loc[:, 'AMARITL'] = le_AMARITL.transform(d_test.loc[:, 'AMARITL'] )<feature_engineering>
temp.loc[(temp.Ticket == '113572')]
Titanic - Machine Learning from Disaster
14,055,799
numb=d_train.groupby(['AMJIND','ADTIND'])['INCCAT'].agg({'Nr':'sum','Tot':'count'}) numb['Ratio']=numb['Nr']/numb['Tot'] numb=numb.reset_index() numb.sort_values(numb.columns[0]) nu=d_train.groupby(['AMJIND'])['INCCAT'].agg({'Nr':'sum','Tot':'count'}) nu['Ratio']=nu['Nr']/nu['Tot'] nu=nu.reset_index() for index, row in numb.iterrows() : if numb.loc[numb['ADTIND']==row['ADTIND'],'Tot'].values[0]<300: numb.loc[numb['ADTIND']==row['ADTIND'],'Ratio']=nu.loc[nu['AMJIND']==row['AMJIND']]['Ratio'].values[0] numb.loc[numb['AMJIND']=='Armed Forces','Ratio']=nu.loc[nu['AMJIND']=='Manufacturing-durable goods']['Ratio'].values[0] for index, row in numb.iterrows() : d_train.loc[d_train['ADTIND']==row['ADTIND'],'AMJIND']=row['Ratio'] d_test.loc[d_test['ADTIND']==row['ADTIND'],'AMJIND']=row['Ratio'] <feature_engineering>
temp.sort_values(['Ticket'], ascending=True)[55:70]
Titanic - Machine Learning from Disaster
14,055,799
pd.set_option('display.max_rows', 50) numb=d_train.groupby(['AMJOCC','ADTOCC'])['INCCAT'].agg({'Nr':'sum','Tot':'count'}) numb['Ratio']=numb['Nr']/numb['Tot'] numb=numb.reset_index() numb.sort_values(numb.columns[0]) nu=d_train.groupby(['AMJOCC'])['INCCAT'].agg({'Nr':'sum','Tot':'count'}) nu['Ratio']=nu['Nr']/nu['Tot'] nu=nu.reset_index() for index, row in numb.iterrows() : if numb.loc[numb['ADTOCC']==row['ADTOCC'],'Tot'].values[0]<300: numb.loc[numb['ADTOCC']==row['ADTOCC'],'Ratio']=nu.loc[nu['AMJOCC']==row['AMJOCC']]['Ratio'].values[0] numb.loc[numb['AMJOCC']=='Armed Forces','Ratio']=nu.loc[nu['AMJOCC']=='Technicians and related support']['Ratio'].values[0] numb for index, row in numb.iterrows() : d_train.loc[d_train['ADTOCC']==row['ADTOCC'],'AMJOCC']=row['Ratio'] d_test.loc[d_test['ADTOCC']==row['ADTOCC'],'AMJOCC']=row['Ratio'] <categorify>
temp.loc[(temp.Embarked.isnull()), 'Embarked'] = 'S' temp.loc[(temp.Embarked.isnull())]
Titanic - Machine Learning from Disaster
14,055,799
numb=d_train.groupby(['ARACE'])['INCCAT'].agg({'Nr':'sum','Tot':'count'}) numb['Ratio']=numb['Nr']/numb['Tot'] numb=numb.sort_values(numb.columns[2]) numb=numb.reset_index() le_ARACE = LabelEncoder() le_ARACE.classes_=numb.iloc[:, 0] d_train.loc[:, 'ARACE'] = le_ARACE.transform(d_train.loc[:, 'ARACE']) d_test.loc[:, 'ARACE'] = le_ARACE.transform(d_test.loc[:, 'ARACE'] )<categorify>
temp['Embarked'] = temp['Embarked'].factorize() [0] temp[11:20]
Titanic - Machine Learning from Disaster
14,055,799
numb=d_train.groupby(['AREORGN'])['INCCAT'].agg({'Nr':'sum','Tot':'count'}) numb['Ratio']=numb['Nr']/numb['Tot'] numb=numb.sort_values(numb.columns[2]) numb=numb.reset_index() le_AREORGN = LabelEncoder() le_AREORGN.classes_=numb.iloc[:, 0] d_train.loc[:, 'AREORGN'] = le_AREORGN.transform(d_train.loc[:, 'AREORGN']) d_test.loc[:, 'AREORGN'] = le_AREORGN.transform(d_test.loc[:, 'AREORGN'] )<categorify>
temp['Priority'] = 0 temp.loc[(temp['Initial'] == 6), 'Priority'] = 1 temp.loc[(temp['Pclass'] == 1)&(temp['Sex'] == 2), 'Priority'] = 2 temp.loc[(temp['Age'] < 1), 'Priority'] = 3 temp.loc[(temp['Pclass'] == 1)&(temp['Age'] <= 17), 'Priority'] = 4 temp.loc[(temp['Pclass'] == 2)&(temp['Age'] <= 17), 'Priority'] = 5
Titanic - Machine Learning from Disaster
14,055,799
numb=d_train.groupby(['ASEX'])['INCCAT'].agg({'Nr':'sum','Tot':'count'}) numb['Ratio']=numb['Nr']/numb['Tot'] numb=numb.sort_values(numb.columns[2]) numb=numb.reset_index() le_ASEX = LabelEncoder() le_ASEX.classes_=numb.iloc[:, 0] d_train.loc[:, 'ASEX'] = le_ASEX.transform(d_train.loc[:, 'ASEX']) d_test.loc[:, 'ASEX'] = le_ASEX.transform(d_test.loc[:, 'ASEX'] )<categorify>
temp.Priority.value_counts()
Titanic - Machine Learning from Disaster
14,055,799
numb=d_train.groupby(['AUNMEM','AUNTYPE'])['INCCAT'].agg({'Nr':'sum','Tot':'count'}) numb['Ratio']=numb['Nr']/numb['Tot'] numb=numb.sort_values(numb.columns[2]) numb=numb.reset_index() le_AUN = LabelEncoder() le_AUN.classes_=numb.iloc[:, 0] +" "+ numb.iloc[:, 1] d_train.loc[:,'AUNMEM'] = le_AUN.transform(( d_train.loc[:, 'AUNMEM'] + " " + d_train.loc[:,'AUNTYPE'])) d_test.loc[:,'AUNMEM'] = le_AUN.transform(( d_test.loc[:, 'AUNMEM'] + " " + d_test.loc[:,'AUNTYPE'])) d_train=d_train.drop(['AUNTYPE'],axis=1) d_test=d_test.drop(['AUNTYPE'],axis=1) <categorify>
temp['F1'] = temp['Priority'] temp['F2'] = temp['Initial'] temp['F3'] = temp['NumName'] temp['F4'] = temp['Family'] temp['F5'] = temp['Embarked'] temp['F6'] = temp['Sex'] temp['F7'] = temp['Pclass']
Titanic - Machine Learning from Disaster