kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
13,506,177
param_grid = { 'C':[0.01, 0.03, 0.1, 0.3, 1, 3, 10], 'max_iter':[50, 100, 150], } LR_clf = LogisticRegression() grid_search = GridSearchCV(LR_clf, param_grid, cv=kfold, scoring='accuracy', verbose=1) grid_search.fit(X_prep_train, y_train) print(grid_search.best_params_) LR_clf = grid_search.best_estimator_ scores = cross_val_score(LR_clf, X_prep_train, y_train, scoring='accuracy', cv=kfold) LR_clf.fit(X_prep_train, y_train) y_pred = LR_clf.predict(X_prep_train) acc = accuracy_score(y_train, y_pred) df_comp = pd.DataFrame([['Logistic Regression', np.round(acc,2), np.round(np.mean(scores),2)]], columns=['Algo', 'Train_acc', 'Val_acc']) df_comp<train_on_grid>
best_pipeline = automl.best_pipeline
Titanic - Machine Learning from Disaster
13,506,177
param_grid = { 'n_neighbors': range(1, 25, 5), 'weights':['uniform', 'distance'], 'algorithm':['auto', 'ball_tree', 'kd_tree', 'brute'], } KNN_clf = KNeighborsClassifier() grid_search = GridSearchCV(KNN_clf, param_grid, cv=kfold, scoring='accuracy', verbose=1) grid_search.fit(X_prep_train, y_train) print(grid_search.best_params_) KNN_clf = grid_search.best_estimator_ scores = cross_val_score(KNN_clf, X_prep_train, y_train, scoring='accuracy', cv=kfold) KNN_clf.fit(X_prep_train, y_train) y_pred = KNN_clf.predict(X_prep_train) acc = accuracy_score(y_train, y_pred) temp = pd.DataFrame([['KNN', np.round(acc,2), np.round(np.mean(scores),2)]], columns=['Algo', 'Train_acc', 'Val_acc']) df_comp = df_comp.append(temp,ignore_index=True) df_comp<train_on_grid>
best_pipeline.fit(X_train, y_train )
Titanic - Machine Learning from Disaster
13,506,177
param_grid = { } NB_clf = GaussianNB() grid_search = GridSearchCV(NB_clf, param_grid, cv=kfold, scoring='accuracy') grid_search.fit(X_prep_train, y_train) print(grid_search.best_params_) NB_clf = grid_search.best_estimator_ scores = cross_val_score(NB_clf, X_prep_train, y_train, scoring='accuracy', cv=kfold) NB_clf.fit(X_prep_train, y_train) y_pred = NB_clf.predict(X_prep_train) acc = accuracy_score(y_train, y_pred) temp = pd.DataFrame([['Naive Bayes', np.round(acc,2), np.round(np.mean(scores),2)]], columns=['Algo', 'Train_acc', 'Val_acc']) df_comp = df_comp.append(temp,ignore_index=True) df_comp<train_on_grid>
best_pipeline.score(X_holdout, y_holdout, objectives=[ "accuracy binary"] )
Titanic - Machine Learning from Disaster
13,506,177
param_grid = { 'criterion':['gini', 'entropy'], 'splitter':['best','random'], 'max_depth':[None] + list(range(1,20, 2)) , 'max_features':[None, 'auto', 'sqrt', 'log2'], 'ccp_alpha':[0.0, 0.01, 0.03, 0.1] } DT_clf = DecisionTreeClassifier() grid_search = GridSearchCV(DT_clf, param_grid, cv=kfold, scoring='accuracy', verbose=1) grid_search.fit(X_prep_train, y_train) print(grid_search.best_params_) DT_clf = grid_search.best_estimator_ scores = cross_val_score(DT_clf, X_prep_train, y_train, scoring='accuracy', cv=kfold) DT_clf.fit(X_prep_train, y_train) y_pred = DT_clf.predict(X_prep_train) acc = accuracy_score(y_train, y_pred) temp = pd.DataFrame([['Decision Tree', np.round(acc,2), np.round(np.mean(scores),2)]], columns=['Algo', 'Train_acc', 'Val_acc']) df_comp = df_comp.append(temp,ignore_index=True) df_comp<train_on_grid>
permutation_importance = evalml.model_understanding.calculate_permutation_importance(best_pipeline,X_holdout, y_holdout, objective= "accuracy binary" )
Titanic - Machine Learning from Disaster
13,506,177
param_grid = { 'C':[0.01, 0.03, 0.1, 0.3, 1, 3, 10], 'kernel':['linear', 'poly', 'rbf', 'sigmoid'], 'degree':list(range(1,5)) , } SVM_clf = SVC() grid_search = GridSearchCV(SVM_clf, param_grid, cv=kfold, scoring='accuracy', verbose=1) grid_search.fit(X_prep_train, y_train) print(grid_search.best_params_) SVM_clf = grid_search.best_estimator_ scores = cross_val_score(SVM_clf, X_prep_train, y_train, scoring='accuracy', cv=kfold) SVM_clf.fit(X_prep_train, y_train) y_pred = SVM_clf.predict(X_prep_train) acc = accuracy_score(y_train, y_pred) temp = pd.DataFrame([['SVM', np.round(acc,2), np.round(np.mean(scores),2)]], columns=['Algo', 'Train_acc', 'Val_acc']) df_comp = df_comp.append(temp,ignore_index=True) df_comp<train_on_grid>
test = pd.read_csv('.. /input/titanic/test.csv' )
Titanic - Machine Learning from Disaster
13,506,177
param_grid = { 'n_estimators':range(50,200,50), 'criterion':['gini', 'entropy'], 'bootstrap':[True, False], 'max_features':['auto', 'sqrt', 'log2'], 'ccp_alpha':[0.01, 0.03, 0.1] } RF_clf = RandomForestClassifier() grid_search = GridSearchCV(RF_clf, param_grid, cv=kfold, scoring='accuracy', verbose=1, n_jobs=-1) grid_search.fit(X_prep_train, y_train) print(grid_search.best_params_) RF_clf = grid_search.best_estimator_ scores = cross_val_score(RF_clf, X_prep_train, y_train, scoring='accuracy', cv=kfold) RF_clf.fit(X_prep_train, y_train) y_pred = RF_clf.predict(X_prep_train) acc = accuracy_score(y_train, y_pred) temp = pd.DataFrame([['Random Forest', np.round(acc,2), np.round(np.mean(scores),2)]], columns=['Algo', 'Train_acc', 'Val_acc']) df_comp = df_comp.append(temp,ignore_index=True) df_comp<train_on_grid>
test_es = ft.EntitySet(id = 'Titanic_test') test_es.entity_from_dataframe(entity_id = 'data', dataframe = test, make_index = True, index = 'index' )
Titanic - Machine Learning from Disaster
13,506,177
param_grid = { 'loss':['deviance', 'exponential'], 'learning_rate':[0.01, 0.03, 0.1, 0.3, 1], 'n_estimators':list(range(50,200,50)) , 'criterion':['friedman_mse', 'mse', 'mae'], 'ccp_alpha':[0.01, 0.03, 0.1] } GB_clf = GradientBoostingClassifier() grid_search = GridSearchCV(GB_clf, param_grid, cv=kfold, scoring='accuracy', verbose=1, n_jobs=-1) grid_search.fit(X_prep_train, y_train) print(grid_search.best_params_) GB_clf = grid_search.best_estimator_ scores = cross_val_score(GB_clf, X_prep_train, y_train, scoring='accuracy', cv=kfold) GB_clf.fit(X_prep_train, y_train) y_pred = GB_clf.predict(X_prep_train) acc = accuracy_score(y_train, y_pred) temp = pd.DataFrame([['Grad.Boost.', np.round(acc,2), np.round(np.mean(scores),2)]], columns=['Algo', 'Train_acc', 'Val_acc']) df_comp = df_comp.append(temp,ignore_index=True) df_comp<normalization>
test_feature = ft.calculate_feature_matrix(feature_defs, test_es)
Titanic - Machine Learning from Disaster
13,506,177
X_test = df_test[cat_feats + num_feats] X_prep_test = full_pipeline.fit_transform(X_test )<predict_on_test>
ypred = best_pipeline.predict(test ).astype(int )
Titanic - Machine Learning from Disaster
13,506,177
y_pred_test = SVM_clf.predict(X_prep_test )<save_to_csv>
predictions = pd.DataFrame(ypred, columns = ['Survived']) submission = pd.concat([test.PassengerId,predictions],axis=1) submission.to_csv('submission.csv',index=False )
Titanic - Machine Learning from Disaster
13,478,547
submission = pd.DataFrame({ "id": df_test['id'], "target": y_pred_test }) submission.to_csv('submission.csv', index=False )<load_pretrained>
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.pipeline import Pipeline from sklearn.model_selection import RepeatedStratifiedKFold from sklearn.model_selection import cross_val_score from sklearn.preprocessing import LabelEncoder,MinMaxScaler from sklearn.ensemble import GradientBoostingClassifier
Titanic - Machine Learning from Disaster
13,478,547
xtr = np.load('.. /input/xtr.npy') xte = np.load('.. /input/xte.npy') ytr = np.load('.. /input/ytr.npy' )<data_type_conversions>
train=pd.read_csv('/kaggle/input/titanic/train.csv') test=pd.read_csv('/kaggle/input/titanic/test.csv') target=train['Survived'] submit=pd.DataFrame(test['PassengerId'] )
Titanic - Machine Learning from Disaster
13,478,547
xtr = np.expand_dims(xtr, -1 ).astype('float32')/ 255 xte = np.expand_dims(xte, -1 ).astype('float32')/ 255 ytr = np.array([[1 if ytr[i] == j else 0 for j in range(10)] for i in range(len(ytr)) ] )<choose_model_class>
print(train.isnull().sum()) train['Age'].fillna(train['Age'].mean() ,inplace=True) train['Embarked'].fillna('S',inplace=True) train.drop('Cabin',axis=1,inplace=True) print(train.isnull().sum() )
Titanic - Machine Learning from Disaster
13,478,547
model = Sequential() model.add(Conv2D(16, 5, input_shape=(28, 28, 1), padding='same', activation='relu')) model.add(MaxPool2D()) model.add(Conv2D(32, 5, padding='same', activation='relu')) model.add(MaxPool2D()) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.25)) model.add(Dense(10, activation='softmax')) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['acc'] )<choose_model_class>
print(test.isnull().sum()) test['Age'].fillna(test['Age'].mean() ,inplace=True) test['Fare'].fillna(test['Fare'].median() ,inplace=True) test.drop('Cabin',axis=1,inplace=True) print(test.isnull().sum() )
Titanic - Machine Learning from Disaster
13,478,547
datagen = ImageDataGenerator(rotation_range=20, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2 )<define_variables>
title=[] for i in range(len(train['Name'])) : a=train.loc[i,'Name'].split() [1] title.append(a) title=pd.Series(title) train['Title']=title title1=[] for i in range(len(test['Name'])) : a=test.loc[i,'Name'].split() [1] title1.append(a) title1=pd.Series(title1) test['Title']=title1 train['Passenger']=train['SibSp']+train['Parch']+1 test['Passenger']=test['SibSp']+test['Parch']+1
Titanic - Machine Learning from Disaster
13,478,547
train_idx = np.random.choice(range(len(xtr)) , size=int(np.floor(0.8 * len(xtr))), replace=False) val_idx = np.array([i for i in range(len(xtr)) if i not in train_idx] )<prepare_x_and_y>
encode=LabelEncoder() train['Sex']=encode.fit_transform(train['Sex']) test['Sex']=encode.fit_transform(test['Sex']) train['Title']=encode.fit_transform(train['Title']) test['Title']=encode.fit_transform(test['Title']) train['Embarked']=encode.fit_transform(train['Embarked']) test['Embarked']=encode.fit_transform(test['Embarked'] )
Titanic - Machine Learning from Disaster
13,478,547
xtrain = xtr[train_idx, :, :] ytrain = ytr[train_idx, :] xval = xtr[val_idx, :, :] yval = ytr[val_idx, :]<train_model>
train['New1']=train['Sex']*train['Pclass'] test['New1']=test['Sex']*test['Pclass']
Titanic - Machine Learning from Disaster
13,478,547
early_stop = EarlyStopping(patience=3, monitor='val_loss') batch_size = 128 epochs=50 model.fit_generator(datagen.flow(xtrain, ytrain, batch_size=batch_size), steps_per_epoch = len(xtr)// batch_size, epochs=epochs, validation_data=(xval, yval), callbacks=[early_stop] )<save_to_csv>
train.drop(['Name','Ticket','PassengerId','Parch','SibSp'],axis=1,inplace=True) test.drop(['Name','Ticket','PassengerId','Parch','SibSp'],axis=1,inplace=True )
Titanic - Machine Learning from Disaster
13,478,547
predictions = model.predict(xte) preds = predictions.argmax(axis=1) submission = pd.DataFrame({'id': range(len(xte)) , 'label': preds}) submission.to_csv('my_submission.csv', index=None )<import_modules>
steps=list() steps.append(( 'scaler',MinMaxScaler())) steps.append(( 'model',GradientBoostingClassifier())) pipeline=Pipeline(steps=steps) x=train.drop('Survived',axis=1) y=train['Survived']
Titanic - Machine Learning from Disaster
13,478,547
import pandas as pd from sklearn.metrics import log_loss<load_from_csv>
cv=RepeatedStratifiedKFold(n_splits=10,n_repeats=10,random_state=1) score=cross_val_score(pipeline,x,y,scoring='accuracy',cv=cv,n_jobs=-1) print('Accuracy: %.3f(%.3f)' %(np.mean(score)*100, np.std(score)*100))
Titanic - Machine Learning from Disaster
13,478,547
df_train = pd.read_csv('.. /input/dmia-sport-2019-fall-competition-1/train.csv' )<count_unique_values>
model=GradientBoostingClassifier() model.fit(x,y) pred=model.predict(test) submit['Survived']=pred
Titanic - Machine Learning from Disaster
13,478,547
checks_num = df_train.check_id.nunique() checks_num<remove_duplicates>
submit.to_csv('ver1.csv',index=False )
Titanic - Machine Learning from Disaster
13,385,017
df_train = df_train.drop_duplicates(subset=['check_id', 'target'] )<count_values>
train_data = pd.read_csv(".. /input/titanic/train.csv") train_data.head()
Titanic - Machine Learning from Disaster
13,385,017
target_probs = df_train.target.value_counts(normalize=True ).sort_index() target_probs<compute_test_metric>
missing_val_count_by_column =(train_data.isnull().sum()) print(missing_val_count_by_column[missing_val_count_by_column > 0] )
Titanic - Machine Learning from Disaster
13,385,017
log_loss(df_train.target, [df_train.target.value_counts(normalize=True)]*df_train.shape[0] )<load_from_csv>
features = ['Pclass', 'Sex','SibSp','Age','Parch','Fare','Embarked'] X = pd.get_dummies(train_data[features], drop_first=True) y = train_data["Survived"]
Titanic - Machine Learning from Disaster
13,385,017
df_test = pd.read_csv('.. /input/dmia-sport-2019-fall-competition-1/test.csv' )<define_variables>
my_imputer = SimpleImputer() imputed_X = my_imputer.fit_transform(X )
Titanic - Machine Learning from Disaster
13,385,017
columns_names = ['target_{}'.format(i)for i in range(25)]<prepare_output>
forest = RandomForestClassifier(oob_score=True) forest.fit(imputed_X,y) forest.oob_score_
Titanic - Machine Learning from Disaster
13,385,017
df_result = pd.DataFrame([target_probs.tolist() ]*len(ids), columns=columns_names) df_result.insert(0, 'check_id', ids) df_result.head(10 )<save_to_csv>
for name, score in zip(X.columns, forest.feature_importances_): print(score, name )
Titanic - Machine Learning from Disaster
13,385,017
df_result.to_csv('const_submission.csv', index=False )<set_options>
smart_forest = RandomForestClassifier(n_estimators = 100, max_depth=5, max_leaf_nodes=12, min_impurity_decrease=.0048,oob_score=True) smart_forest.fit(imputed_X,y) smart_forest.oob_score_
Titanic - Machine Learning from Disaster
13,385,017
%matplotlib inline print(os.listdir(".. /input"))<load_from_csv>
for name, score in zip(X.columns, smart_forest.feature_importances_): print(score, name )
Titanic - Machine Learning from Disaster
13,385,017
file_train = '.. /input/train.csv' file_valid = '.. /input/valid.csv' file_exemplo = '.. /input/sample_su.csv' data_train = pd.read_csv(file_train) data_valid = pd.read_csv(file_valid) print(len(data_train), len(data_valid))<prepare_x_and_y>
bag = BaggingClassifier( DecisionTreeClassifier(splitter="random"), n_estimators = 100, max_samples=1.0, oob_score=True ) bag.fit(imputed_X,y) bag.oob_score_
Titanic - Machine Learning from Disaster
13,385,017
Y_dummies = pd.get_dummies(data_train['is_sarcastic'] ).values Y = data_train['is_sarcastic'] data_train_format = data_train.drop('is_sarcastic', axis=1) data_all = data_train_format.append(data_valid) data_all = data_all.drop('article_link', axis=1) data_all = data_all.drop('ID', axis=1) data_all['headline'] = data_all['headline'].apply(lambda x: x.lower()) data_all['headline'] = data_all['headline'].apply(( lambda x: re.sub('[^a-zA-z0-9\s]','',x))) print(len(data_train['headline']), len(data_valid['headline'])) print(len(data_all))<string_transform>
extra_trees=ExtraTreesClassifier(n_estimators = 50, max_depth=5, max_leaf_nodes=24, bootstrap=True, oob_score=True) extra_trees.fit(imputed_X,y) extra_trees.oob_score_
Titanic - Machine Learning from Disaster
13,385,017
max_fatures = 5000 tokenizer = Tokenizer(num_words=max_fatures, split=' ') tokenizer.fit_on_texts(data_all['headline'].values) X = tokenizer.texts_to_sequences(data_all['headline'].values) X = pad_sequences(X )<split>
for name, score in zip(X.columns, extra_trees.feature_importances_): print(score, name )
Titanic - Machine Learning from Disaster
13,385,017
data_train_split = X[:len(data_train)] data_valid_split = X[len(data_train):] print(len(data_train_split), len(data_valid_split))<choose_model_class>
test_data = pd.read_csv(".. /input/titanic/test.csv") X_test=pd.get_dummies(test_data[features], drop_first=True) imputed_test=my_imputer.fit_transform(X_test )
Titanic - Machine Learning from Disaster
13,385,017
embed_dim = 64 lstm_out = 196 model = Sequential() model.add(Embedding(max_fatures, embed_dim,input_length = data_train_split.shape[1])) model.add(LSTM(lstm_out, dropout=0.2, recurrent_dropout=0.2)) model.add(Dense(2,activation='softmax')) model.compile(loss = 'categorical_crossentropy', optimizer='adam', metrics = ['accuracy']) <train_model>
predictions=smart_forest.predict(imputed_test) output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions}) output.to_csv('my_submission.csv', index=False) print("Your submission was successfully saved!" )
Titanic - Machine Learning from Disaster
2,944,068
batch_size = 32 history = model.fit(data_train_split, Y_dummies, epochs = 60, batch_size=batch_size, verbose = 2 )<predict_on_test>
%reload_ext autoreload %autoreload 2 %matplotlib inline
Titanic - Machine Learning from Disaster
2,944,068
Y_sarcasm = model.predict(data_valid_split,batch_size=1,verbose = 2 )<save_to_csv>
from fastai import * from fastai.vision import * from fastai.tabular import *
Titanic - Machine Learning from Disaster
2,944,068
data_to_submit = pd.DataFrame({ 'ID': data_valid['ID'], 'is_sarcastic':list_sarcasm }) data_to_submit.to_csv('csv_to_submit.csv', index = False )<set_options>
train = pd.read_csv('.. /input/train.csv') train.head()
Titanic - Machine Learning from Disaster
2,944,068
sns.set() %matplotlib inline<load_from_csv>
test = pd.read_csv(".. /input/test.csv") test.isnull().sum()
Titanic - Machine Learning from Disaster
2,944,068
df_train = pd.read_csv(os.path.join(_input_path, "train.csv"), index_col=0) df_test = pd.read_csv(os.path.join(_input_path, "test.csv"), index_col=0 )<count_values>
train["Embarked"] = train["Embarked"].fillna("C") test["Embarked"] = test["Embarked"].fillna("C") train['Fare'].fillna(train['Fare'].median() , inplace = True) test['Fare'].fillna(test['Fare'].median() , inplace = True) train.Cabin.fillna("N", inplace=True) test.Cabin.fillna("N", inplace=True )
Titanic - Machine Learning from Disaster
2,944,068
df_train["Engine"].str[-3:].value_counts()<count_values>
train_title = [i.split(",")[1].split(".")[0].strip() for i in train["Name"]] train["Title"] = pd.Series(train_title) train["Title"].head() test_title = [i.split(",")[1].split(".")[0].strip() for i in test["Name"]] test["Title"] = pd.Series(test_title) test["Title"].head()
Titanic - Machine Learning from Disaster
2,944,068
df_test["Engine"].str[-3:].value_counts()<filter>
test_id = test['PassengerId']
Titanic - Machine Learning from Disaster
2,944,068
df_train[df_train[["Engine", "Power"]].isnull().sum(axis=1)>= 1]<define_variables>
dep_var = 'Survived' cat_names = ['Title', 'Sex', 'Ticket', 'Cabin', 'Embarked'] cont_names = [ 'Age', 'SibSp', 'Parch', 'Fare'] print("Categorical columns are : ", cat_names) print('Continuous numerical columns are :', cont_names) procs = [FillMissing, Categorify, Normalize]
Titanic - Machine Learning from Disaster
2,944,068
drop_origin_cols = []<data_type_conversions>
test = TabularList.from_df(test, cat_names=cat_names, cont_names=cont_names, procs=procs) data =(TabularList.from_df(train, path='.', cat_names=cat_names, cont_names=cont_names, procs=procs) .split_by_idx(list(range(0,200))) .label_from_df(cols = dep_var) .add_test(test, label=0) .databunch() )
Titanic - Machine Learning from Disaster
2,944,068
df_train["Engine_num"] = df_train["Engine"].str[: -3].astype(np.float16) df_test["Engine_num"] = df_test["Engine"].str[: -3].astype(np.float16) if "Engine" not in drop_origin_cols: drop_origin_cols.append("Engine" )<data_type_conversions>
data.show_batch(rows=10)
Titanic - Machine Learning from Disaster
2,944,068
df_train["Mileage_num"] = df_train["Mileage"].apply(lambda x: str(x ).split(" ")[0] ).astype(np.float) df_test["Mileage_num"] = df_test["Mileage"].apply(lambda x: str(x ).split(" ")[0] ).astype(np.float) if "Mileage" not in drop_origin_cols: drop_origin_cols.append("Mileage" )<count_values>
learn = tabular_learner(data, layers=[200,100], metrics=accuracy, emb_drop=0.1)
Titanic - Machine Learning from Disaster
2,944,068
df_train["Power"].str[-4: ].value_counts()<data_type_conversions>
learn.lr_find()
Titanic - Machine Learning from Disaster
2,944,068
df_train["Power_num"] = df_train["Power"].str[: -4].replace({"null": np.nan} ).astype(np.float) df_test["Power_num"] = df_test["Power"].str[: -4].replace({"null": np.nan} ).astype(np.float) if "Power" not in drop_origin_cols: drop_origin_cols.append("Power" )<drop_column>
learn.fit(15, slice(1e-01))
Titanic - Machine Learning from Disaster
2,944,068
df_train.drop(drop_origin_cols, axis=1, inplace=True) df_test.drop(drop_origin_cols, axis=1, inplace=True )<feature_engineering>
preds, targets = learn.get_preds() predictions = np.argmax(preds, axis = 1) pd.crosstab(predictions, targets)
Titanic - Machine Learning from Disaster
2,944,068
df_train["maker"] = df_train["Name"].apply(lambda x: x.split(" ")[0]) df_test["maker"] = df_test["Name"].apply(lambda x: x.split(" ")[0] )<categorify>
predictions, *_ = learn.get_preds(DatasetType.Test) labels = np.argmax(predictions, 1 )
Titanic - Machine Learning from Disaster
2,944,068
<categorify><EOS>
submission = pd.DataFrame({'PassengerId': test_id, 'Survived': labels}) submission.to_csv('submission.csv', index=False) submission.head()
Titanic - Machine Learning from Disaster
6,526,349
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<categorify>
%matplotlib inline
Titanic - Machine Learning from Disaster
6,526,349
le = LabelEncoder() df_train["Location"] = le.fit_transform(df_train["Location"]) df_test["Location"] = le.transform(df_test["Location"]) df_train["Location"] = df_train["Location"].astype("category") df_test["Location"] = df_test["Location"].astype("category" )<categorify>
train = pd.read_csv(".. /input/titanic/train.csv") test = pd.read_csv(".. /input/titanic/test.csv" )
Titanic - Machine Learning from Disaster
6,526,349
le = ce.OrdinalEncoder() df_train["Fuel_Type"] = le.fit_transform(df_train["Fuel_Type"]) df_test["Fuel_Type"] = le.transform(df_test["Fuel_Type"]) df_train["Fuel_Type"] = df_train["Fuel_Type"].astype("category") df_test["Fuel_Type"] = df_test["Fuel_Type"].astype("category" )<categorify>
print(train.isnull().sum() )
Titanic - Machine Learning from Disaster
6,526,349
le = LabelEncoder() df_train["Transmission"] = le.fit_transform(df_train["Transmission"]) df_test["Transmission"] = le.transform(df_test["Transmission"] )<data_type_conversions>
print(test.isnull().sum() )
Titanic - Machine Learning from Disaster
6,526,349
df_train["Transmission"] = df_train["Transmission"].astype("category") df_test["Transmission"] = df_test["Transmission"].astype("category" )<categorify>
train['Title'] = train.Name.str.extract('([A-Za-z]+ ).', expand=False) train['Title'] = train['Title'].replace(['Lady', 'Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') train['Title'] = train['Title'].replace('Mlle', 'Miss') train['Title'] = train['Title'].replace('Ms', 'Miss') train['Title'] = train['Title'].replace('Mme', 'Mrs') Title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5} train['Title'] = train['Title'].map(Title_mapping) train['Title'] = train['Title'].fillna(0) del train['Name'] train["Age"] = train["Age"].fillna(train["Age"].median()) train["Sex"] = train["Sex"].map({"male" : 0, "female" : 1}) train["Embarked"] = train["Embarked"].fillna("S") train["Embarked"] = train["Embarked"].map({"S" : 0, "Q" : 1, "C" : 2}) train['Ticket_Lett'] = train['Ticket'].apply(lambda x: str(x)[0]) train['Ticket_Lett'] = train['Ticket_Lett'].apply(lambda x: str(x)) train['Ticket_Lett'] = np.where(( train['Ticket_Lett'] ).isin(['1', '2', '3', 'S', 'P', 'C', 'A']), train['Ticket_Lett'], np.where(( train['Ticket_Lett'] ).isin(['W', '4', '7', '6', 'L', '5', '8']), '0','0')) train['Ticket_Lett']=train['Ticket_Lett'].replace("1",1 ).replace("2",2 ).replace("3",3 ).replace("0",0 ).replace("S",3 ).replace("P",0 ).replace("C",3 ).replace("A",3) del train['Ticket'] train['Cabin_Lett'] = train['Cabin'].apply(lambda x: str(x)[0]) train['Cabin_Lett'] = train['Cabin_Lett'].apply(lambda x: str(x)) train['Cabin_Lett'] = np.where(( train['Cabin_Lett'] ).isin([ 'F', 'E', 'D', 'C', 'B', 'A']),train['Cabin_Lett'], np.where(( train['Cabin_Lett'] ).isin(['W', '4', '7', '6', 'L', '5', '8']), '0','0')) train['Cabin_Lett']=train['Cabin_Lett'].replace("A",1 ).replace("B",2 ).replace("C",1 ).replace("0",0 ).replace("D",2 ).replace("E",2 ).replace("F",1) del train['Cabin'] train["FamilySize"] = train["SibSp"] + train["Parch"] + 1 train
Titanic - Machine Learning from Disaster
6,526,349
le = LabelEncoder() df_train["Owner_Type"] = le.fit_transform(df_train["Owner_Type"]) df_test["Owner_Type"] = le.transform(df_test["Owner_Type"]) df_train["Owner_Type"] = df_train["Owner_Type"].astype("category") df_test["Owner_Type"] = df_test["Owner_Type"].astype("category" )<count_missing_values>
test['Title'] = test.Name.str.extract('([A-Za-z]+ ).', expand=False) test['Title'] = test['Title'].replace(['Lady', 'Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') test['Title'] = test['Title'].replace('Mlle', 'Miss') test['Title'] = test['Title'].replace('Ms', 'Miss') test['Title'] = test['Title'].replace('Mme', 'Mrs') Title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5} test['Title'] = test['Title'].map(Title_mapping) test['Title'] = test['Title'].fillna(0) del test['Name'] test["Age"] = test["Age"].fillna(test["Age"].median()) test["Sex"] = test["Sex"].map({"male" : 0, "female" : 1}) test["Embarked"] = test["Embarked"].fillna("S") test["Embarked"] = test["Embarked"].map({"S" : 0, "Q" : 1, "C" : 2}) test['Ticket_Lett'] = test['Ticket'].apply(lambda x: str(x)[0]) test['Ticket_Lett'] = test['Ticket_Lett'].apply(lambda x: str(x)) test['Ticket_Lett'] = np.where(( test['Ticket_Lett'] ).isin(['1', '2', '3', 'S', 'P', 'C', 'A']), test['Ticket_Lett'], np.where(( test['Ticket_Lett'] ).isin(['W', '4', '7', '6', 'L', '5', '8']), '0','0')) test['Ticket_Lett']=test['Ticket_Lett'].replace("1",1 ).replace("2",2 ).replace("3",3 ).replace("0",0 ).replace("S",3 ).replace("P",0 ).replace("C",3 ).replace("A",3) del test['Ticket'] test['Cabin_Lett'] = test['Cabin'].apply(lambda x: str(x)[0]) test['Cabin_Lett'] = test['Cabin_Lett'].apply(lambda x: str(x)) test['Cabin_Lett'] = np.where(( test['Cabin_Lett'] ).isin([ 'F', 'E', 'D', 'C', 'B', 'A']),test['Cabin_Lett'], np.where(( test['Cabin_Lett'] ).isin(['W', '4', '7', '6', 'L', '5', '8']), '0','0')) test['Cabin_Lett']=test['Cabin_Lett'].replace("A",1 ).replace("B",2 ).replace("C",1 ).replace("0",0 ).replace("D",2 ).replace("E",2 ).replace("F",1) del test['Cabin'] test["FamilySize"] = test["SibSp"] + test["Parch"] + 1 test["Fare"] = test["Fare"].fillna(test["Fare"].median()) test
Titanic - Machine Learning from Disaster
6,526,349
df_train.isnull().sum()<prepare_x_and_y>
train_x = train.drop(['PassengerId'], axis=1) trainx_corr = train_x.corr() plt.figure(figsize=(8, 8)) sns.heatmap(trainx_corr, cmap="Reds", annot=True, fmt="1.2f") train_x = train_x.drop(['Survived'], axis=1) train_y = train['Survived'] test_x= test.drop(['PassengerId'], axis=1 )
Titanic - Machine Learning from Disaster
6,526,349
drop_cols = ["Name", "New_Price"] X = df_train.drop(drop_cols + ["Price"], axis=1) y = np.log1p(df_train["Price"]) X_test = df_test.drop(drop_cols, axis=1 )<define_variables>
stdsc = StandardScaler() trainx_std = stdsc.fit_transform(train_x) testx_std = stdsc.fit_transform(test_x) X_train = np.reshape(trainx_std,(-1, 11, 1)) Y_train = to_categorical(train_y, num_classes = 2) X_test = np.reshape(testx_std,(-1, 11, 1))
Titanic - Machine Learning from Disaster
6,526,349
cat_cols = [ind for ind, typ in(df_test.dtypes=="category" ).items() if typ]<choose_model_class>
model = Sequential() model.add(Conv1D(32, 5, padding='same', input_shape=(11, 1), activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.25)) model.add(Conv1D(32, 5, padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.25)) model.add(MaxPool1D(2, padding='same')) model.add(Conv1D(64, 3, padding='same', activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.25)) model.add(Dense(2, activation='softmax')) model.summary()
Titanic - Machine Learning from Disaster
6,526,349
lgb_reg = lgb.LGBMRegressor() lgb_reg.fit(X, y) lgb_predict = np.expm1(lgb_reg.predict(X_test))<save_to_csv>
plot_model(model, to_file='model.png', show_layer_names=False, show_shapes=True )
Titanic - Machine Learning from Disaster
6,526,349
lgb_submit = pd.read_csv(os.path.join(_input_path, "sampleSubmission.csv")) lgb_submit["Price"] = lgb_predict lgb_submit.to_csv("lgb_submit.csv", index=False )<data_type_conversions>
model.compile(optimizer = "Adam" , loss='binary_crossentropy', metrics=["accuracy"] )
Titanic - Machine Learning from Disaster
6,526,349
cat_X = X.copy() cat_X[cat_cols] = cat_X[cat_cols].astype(np.int16) cat_X_test = X_test.copy() cat_X_test[cat_cols] = cat_X_test[cat_cols].astype(np.int16) cat_reg = cat.CatBoostRegressor() cat_reg.fit(cat_X, y, cat_features=cat_cols, verbose=False) cat_predict = np.expm1(cat_reg.predict(cat_X_test))<save_to_csv>
learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy', patience=2, verbose=1, factor=0.5, min_lr=0.00001 )
Titanic - Machine Learning from Disaster
6,526,349
cat_submit = pd.read_csv(os.path.join(_input_path, "sampleSubmission.csv")) cat_submit["Price"] = cat_predict cat_submit.to_csv("cat_submit.csv", index=False )<load_from_csv>
history = model.fit(X_train, Y_train, epochs = 30, verbose = 2, validation_split = 0.2, callbacks=[learning_rate_reduction] )
Titanic - Machine Learning from Disaster
6,526,349
train = pd.read_csv('.. /input/train_final.csv') test = pd.read_csv('.. /input/test_final.csv' )<define_variables>
pred_proba = model.predict(X_test)[:,1] pred = np.where(pred_proba > 0.5,1,0 )
Titanic - Machine Learning from Disaster
6,526,349
<drop_column><EOS>
submission = pd.DataFrame({'PassengerId': test['PassengerId'], 'Survived': pred}) submission.to_csv('submission_titanic.csv', index=False )
Titanic - Machine Learning from Disaster
1,898,014
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<import_modules>
train_data = pd.read_csv('.. /input/train.csv') test_data = pd.read_csv('.. /input/test.csv' )
Titanic - Machine Learning from Disaster
1,898,014
from sklearn.preprocessing import LabelEncoder<categorify>
def refineDataSets(data, isTest): data = data.drop(['PassengerId', 'Ticket', 'Cabin', 'Fare'], axis=1) for i in range(0, data.shape[0]): if('Mr.' in data['Name'][i]): data['Name'][i] = 0 elif('Misc.' in data['Name'][i]): data['Name'][i] = 1 elif('Master.' in data['Name'][i]): data['Name'][i] = 2 elif('Miss.' in data['Name'][i]): data['Name'][i] = 3 elif('Mrs.' in data['Name'][i]): data['Name'][i] = 4 else: data['Name'][i] = 5 if(data['Sex'][i] == 'female'): data['Sex'][i] = 1 else: data['Sex'][i] = 0 if(data['Embarked'][i] == 'S'): data['Embarked'][i] = 0 elif(data['Embarked'][i] == 'C'): data['Embarked'][i] = 1 else: data['Embarked'][i] = 2 if(math.isnan(data['Age'][i])) : data['Age'][i] = data['Age'].mean() if(math.isnan(data['Pclass'][i])) : data['Pclass'][i] = data['Pclass'].mean() if(math.isnan(data['SibSp'][i])) : data['SibSp'][i] = data['SibSp'].mean() data['Embarked'][0] = 0 x_data = data.values x_data = np.float32(np.transpose(x_data)) if isTest: return x_data p_data = np.array([data['Survived'].values]) p_data = np.float32(np.transpose(p_data)) return x_data, p_data
Titanic - Machine Learning from Disaster
1,898,014
le = LabelEncoder()<categorify>
x_train_data, p_train_data = refineDataSets(train_data, False) x_test_data = refineDataSets(test_data, True )
Titanic - Machine Learning from Disaster
1,898,014
cat_cols = [f for f in train.columns if(train[f].dtype == 'object')] for col in tqdm(cat_cols): lbl = LabelEncoder() lbl.fit(list(train[col].values.astype('str')) + list(test[col].values.astype('str'))) train[col] = lbl.transform(list(train[col].values.astype('str'))) test[col] = lbl.transform(list(test[col].values.astype('str')) )<prepare_x_and_y>
x_train_data = np.delete(x_train_data, 0, 0 )
Titanic - Machine Learning from Disaster
1,898,014
x = train.drop('SALES_PRICE',axis=1) y=train.SALES_PRICE<train_model>
x_train_data = np.transpose(x_train_data) x_test_data = np.transpose(x_test_data )
Titanic - Machine Learning from Disaster
1,898,014
reg = LinearRegression() x.fillna(0,inplace=True) reg.fit(x,y) <save_to_csv>
scaler = MinMaxScaler(feature_range=(-1, 1)).fit(np.concatenate(( x_train_data, x_test_data), axis=0)) x_train_data = scaler.transform(x_train_data) x_test_data = scaler.transform(x_test_data )
Titanic - Machine Learning from Disaster
1,898,014
price = reg.predict(test.fillna(0)) sub = pd.DataFrame(ids) sub['SALES_PRICE'] = price sub.to_csv('sample1.csv',index=None )<save_to_csv>
epoch_train = 1000 mlp = MLP(layer_size=[x_train_data.shape[1], 28, 28, 1], regularization=1, output_shrink=0.1, output_range=[0, 1], loss_type="hardmse") mlp.train(x_train_data, p_train_data, iteration_log=20000, rate_init=0.08, rate_decay=0.8, epoch_train=epoch_train, epoch_decay=1 )
Titanic - Machine Learning from Disaster
1,898,014
def create_download_link(df, title = "Download CSV file", filename = "data.csv"): csv = df.to_csv() b64 = base64.b64encode(csv.encode()) payload = b64.decode() html = '<a download="{filename}" href="data:text/csv;base64,{payload}" target="_blank">{title}</a>' html = html.format(payload=payload,title=title,filename=filename) return HTML(html) df = pd.DataFrame(np.random.randn(50, 4), columns=list('ABCD')) create_download_link(sub )<load_from_csv>
predictions = np.round(mlp.predict(x_test_data ).reshape(-1))
Titanic - Machine Learning from Disaster
1,898,014
data_dir = "/kaggle/input/ykc-2nd/" train = pd.read_csv(data_dir+"train.csv", index_col=0) test = pd.read_csv(data_dir+"test.csv", index_col=0) sub = pd.read_csv(data_dir+"sample_submission.csv", index_col=0) df = pd.concat([train, test]) df["product_name"] = df["product_name"].map(lambda w: list( filter(None, list(filter(None, re.sub("'", "", re.sub(r"-|/|\\|\|"|\:|\;|\@|\^", " ", re.sub(r"[0-9]+|\+|\&|\?|!|%|\.|,|\)|\(|'s|®|™| "", w.lower().replace("\xa0", " ")))).split(" ")))))) df_train = df[~df.department_id.isna() ] df_test = df[df.department_id.isna() ]<define_variables>
predictions = np.round(mlp.predict(x_test_data ).reshape(-1)) ids = list(test_data['PassengerId'].values) submission = pd.DataFrame(list(np.transpose([ids, predictions]))) submission.columns = ['PassengerId', 'Survived']
Titanic - Machine Learning from Disaster
1,898,014
n_department = 21 n_split = 5<feature_engineering>
submission['Survived'] = np.int32(np.round(submission['Survived'])) submission['PassengerId'] = np.int32(np.round(submission['PassengerId']))
Titanic - Machine Learning from Disaster
1,898,014
<compute_test_metric><EOS>
submission.to_csv('titanic-submission-ultimate-nn-1.csv', index=False )
Titanic - Machine Learning from Disaster
3,358,480
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<merge>
import pandas as pd import numpy as np import matplotlib.pyplot as plt
Titanic - Machine Learning from Disaster
3,358,480
tf_idf = df["product_name"].map(calculate_score) tf_idf = np.nan_to_num(np.array(tf_idf.tolist())) max_value, min_value = tf_idf.max() , tf_idf.min() tf_idf =(tf_idf - min_value)/(max_value - min_value) df = df.merge(pd.DataFrame(tf_idf.tolist() , columns=["dept_"+str(i)for i in range(n_department)]), left_index=True, right_index=True) df = df.reset_index()<load_pretrained>
dataset = pd.read_csv('.. /input/train.csv' )
Titanic - Machine Learning from Disaster
3,358,480
model_ft = gensim.models.KeyedVectors.load_word2vec_format('.. /input/ykc-2nd/wiki-news-300d-1M.vec/wiki-news-300d-1M.vec' )<install_modules>
dataset.isnull().sum()
Titanic - Machine Learning from Disaster
3,358,480
!pip install hyphenate<import_modules>
dataset['Age'] = dataset['Age'].fillna(dataset['Age'].median() )
Titanic - Machine Learning from Disaster
3,358,480
from hyphenate import hyphenate_word<categorify>
dataset['Embarked'].value_counts()
Titanic - Machine Learning from Disaster
3,358,480
unused_words = defaultdict(int) def to_vec(x, model_ft): v = np.zeros(model_ft.vector_size) for w in x: try: v += model_ft[w] except: hw = hyphenate_word(w) for w2 in hw: try: v += model_ft[w2] except: unused_words[w2] += 1 v = v /(np.sqrt(np.sum(v ** 2)) + 1e-16) return v def to_vec_max(x, model_ft): v = [np.zeros(model_ft.vector_size), np.zeros(model_ft.vector_size)] for w in x: try: v.append(model_ft[w]) except: hw = hyphenate_word(w) for w2 in hw: try: v.append(model_ft[w2]) except: pass v = np.max(v, axis=0) v = v /(np.sqrt(np.sum(v ** 2)) + 1e-16) return v vecs = df["product_name"].apply(lambda x : to_vec(x, model_ft)) vecs = np.vstack(vecs) vecs = np.nan_to_num(vecs) vecs_max = df["product_name"].apply(lambda x : to_vec_max(x, model_ft)) vecs_max = np.vstack(vecs_max) vecs_max = np.nan_to_num(vecs_max) fasttext_pretrain_cols = [f"fasttext_pretrain_vec{k}" for k in range(vecs.shape[1])] fasttext_pretrain_cols_max = [f"fasttext_pretrain_vec_max{k}" for k in range(vecs.shape[1])] vec_df = pd.DataFrame(vecs, columns=fasttext_pretrain_cols) vec_max_df = pd.DataFrame(vecs_max, columns=fasttext_pretrain_cols_max) df = pd.concat([df, vec_df], axis = 1) df = pd.concat([df, vec_max_df], axis = 1) df.head()<split>
dataset['Embarked'].fillna('S', inplace = True )
Titanic - Machine Learning from Disaster
3,358,480
df_train = df[~df.department_id.isna() ] df_test = df[df.department_id.isna() ]<define_variables>
dataset['Sex'].value_counts()
Titanic - Machine Learning from Disaster
3,358,480
np.random.seed(42) trainCols = fasttext_pretrain_cols + fasttext_pretrain_cols_max + \ ["order_rate", "order_dow_mode", "order_hour_of_day_mode"] trainCols2 = ["dept_"+str(i)for i in range(n_department)] + ["order_rate", "order_dow_mode", "order_hour_of_day_mode"] nn_cols = ["dept_"+str(i)for i in range(n_department)] nn_cols2 = fasttext_pretrain_cols + fasttext_pretrain_cols_max + ["dept_"+str(i)for i in range(n_department)] X_train = df_train[trainCols] X2_train = df_train[trainCols2] Xnn_train = df_train[nn_cols] Xnn2_train = df_train[nn_cols2] Y_train = df_train["department_id"] Ynn_train = pd.get_dummies(df_train, columns=["department_id"])[[f"department_id_{i}.0" for i in range(21)]] validInds = np.random.choice(X_train.index.values, 4000, replace = False) trainInds = np.setdiff1d(X_train.index.values, validInds )<init_hyperparams>
dataset['Sex'] = dataset['Sex'].apply(lambda x: 1 if x=='male' else 0 )
Titanic - Machine Learning from Disaster
3,358,480
params = { 'lambda_l1': 0.0, 'lambda_l2': 0.0, 'num_leaves': 200, 'feature_fraction': 0.9840000000000001, 'bagging_fraction': 1.0, 'bagging_freq': 0, 'min_child_samples': 20, 'task': 'train', 'boosting_type': 'dart', 'objective': 'multiclass', 'num_class': 21, 'metric': 'multi_error', 'verbose': 1, 'learning_rate': 0.1, 'num_iterations': 2000, 'max_depth': 7 }<train_model>
cat_columns = ['Embarked'] dataset = pd.get_dummies(dataset,prefix_sep = '__', columns = cat_columns )
Titanic - Machine Learning from Disaster
3,358,480
lgb_train = lgb.Dataset(X_train.iloc[trainInds], Y_train.iloc[trainInds]) lgb_val = lgb.Dataset(X_train.iloc[validInds], Y_train.iloc[validInds]) best_params, history = {}, [] lgb_model = lgb.train(params, lgb_train, valid_sets=lgb_val, verbose_eval=10 )<predict_on_test>
dataset.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1, inplace=True )
Titanic - Machine Learning from Disaster
3,358,480
pred_val_lgb = lgb_model.predict(X_train.iloc[validInds]) score = { "logloss" : log_loss(Y_train.iloc[validInds], pred_val_lgb), "f1_micro" : f1_score(Y_train.iloc[validInds], np.argmax(pred_val_lgb, axis = 1), average = "micro")} pred_test_lgb = lgb_model.predict(df_test[trainCols] )<init_hyperparams>
x = dataset.iloc[:, 1:].values y = dataset.iloc[:,:1].values
Titanic - Machine Learning from Disaster
3,358,480
params = { 'lambda_l1': 0.0, 'lambda_l2': 0.0, 'num_leaves': 111, 'feature_fraction': 0.9840000000000001, 'bagging_fraction': 1.0, 'bagging_freq': 0, 'min_child_samples': 20, 'task': 'train', 'boosting_type': 'dart', 'objective': 'multiclass', 'num_class': 21, 'metric': 'multi_error', 'verbose': 1, 'learning_rate': 0.05, 'num_iterations': 1000 }<train_model>
x_train, x_test, y_train, y_test = train_test_split(x,y, test_size = 0.2 )
Titanic - Machine Learning from Disaster
3,358,480
lgb_train2 = lgb.Dataset(X2_train.iloc[trainInds], Y_train.iloc[trainInds]) lgb_val2 = lgb.Dataset(X2_train.iloc[validInds], Y_train.iloc[validInds]) best_params, history = {}, [] lgb_model2 = lgb.train(params, lgb_train2, valid_sets=lgb_val2, verbose_eval=10 )<predict_on_test>
N,D = x.shape M = 10
Titanic - Machine Learning from Disaster
3,358,480
pred_val_lgb2 = lgb_model2.predict(X2_train.iloc[validInds]) score = { "logloss" : log_loss(Y_train.iloc[validInds], pred_val_lgb2), "f1_micro" : f1_score(Y_train.iloc[validInds], np.argmax(pred_val_lgb2, axis = 1), average = "micro")} pred_test_lgb2 = lgb_model2.predict(df_test[trainCols2] )<choose_model_class>
w1= np.random.randn(D,M)/np.sqrt(D+M) b1 = np.zeros(M) w2 = np.random.randn(M,1)/np.sqrt(M+1) b2 = 0
Titanic - Machine Learning from Disaster
3,358,480
nn_model1 = tf.keras.Sequential([ tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(32, activation='relu'), tf.keras.layers.Dense(21, activation='softmax') ] )<choose_model_class>
def sigmoid(z): return 1/(1+np.exp(-z))
Titanic - Machine Learning from Disaster
3,358,480
nn_model1.compile(optimizer=tf.optimizers.Adam(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy']) es_cb = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='auto' )<train_model>
d_relu = lambda x:(x>0 ).astype(x.dtype )
Titanic - Machine Learning from Disaster
3,358,480
history = nn_model1.fit(Xnn2_train.iloc[trainInds], Ynn_train.iloc[trainInds], validation_data=(Xnn2_train.iloc[validInds], Ynn_train.iloc[validInds]), epochs=100, batch_size=16, verbose=2, callbacks=[es_cb] )<predict_on_test>
def forward(x,w1,b1,w2,b2): z = np.maximum(( x.dot(w1)+b1), 0) return sigmoid(z.dot(w2)+b2), z
Titanic - Machine Learning from Disaster
3,358,480
pred_val_nn1 = nn_model1.predict(Xnn2_train.iloc[validInds]) pred_test_nn1 = nn_model1.predict(df_test[nn_cols2] )<compute_test_metric>
def cross_en(T,Y): return -np.mean(T*np.log(Y)+(1-T)*np.log(1-Y))
Titanic - Machine Learning from Disaster
3,358,480
log_loss(Y_train.iloc[validInds], pred_val_nn1), f1_score(Y_train.iloc[validInds], np.argmax(pred_val_nn1, axis = 1), average = "micro" )<choose_model_class>
cost_train = [] cost_test = [] lr = 0.001
Titanic - Machine Learning from Disaster
3,358,480
nn_model = tf.keras.Sequential([ tf.keras.layers.Dense(512, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.BatchNormalization() , tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dense(21, activation='softmax') ] )<choose_model_class>
for i in range(100000): yprep, ztrain = forward(x_train,w1,b1,w2,b2) train_cost = cross_en(y_train, yprep) yprep_test, ztest = forward(x_test, w1,b1,w2,b2) test_cost = cross_en(y_test, yprep_test) if i%5000==0: print('train_cost = {}'.format(train_cost)) print('test_cost = {}'.format(test_cost)) E =(yprep-y_train) w2-= lr*(ztrain.T.dot(E)/len(x_train)) b2-= lr*(E.sum() /len(x_train)) dz = E.dot(w2.T)*d_relu(ztrain) w1-= lr*(x_train.T.dot(dz)/len(x_train)) b1-= lr*(dz.sum() /len(x_train)) cost_train.append(train_cost) cost_test.append(test_cost)
Titanic - Machine Learning from Disaster
3,358,480
nn_model.compile(optimizer=tf.optimizers.Adam(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy']) es_cb = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='auto' )<prepare_x_and_y>
test_dataset = pd.read_csv('.. /input/test.csv' )
Titanic - Machine Learning from Disaster
3,358,480
nn_cols = ["dept_"+str(i)for i in range(n_department)] Xnn_train = df_train[nn_cols] Y_train = df_train["department_id"] <train_model>
test_dataset.isnull().sum()
Titanic - Machine Learning from Disaster
3,358,480
history = nn_model.fit(Xnn_train.iloc[trainInds], Ynn_train.iloc[trainInds], validation_data=(Xnn_train.iloc[validInds], Ynn_train.iloc[validInds]), epochs=100, batch_size=16, verbose=2, callbacks=[es_cb] )<predict_on_test>
test_dataset['Age']= test_dataset['Age'].fillna(test_dataset['Age'].median()) test_dataset['Fare']= test_dataset['Fare'].fillna(test_dataset['Fare'].median() )
Titanic - Machine Learning from Disaster
3,358,480
pred_val_nn = nn_model.predict(Xnn_train.iloc[validInds]) Xnn_test = df_test[nn_cols] pred_test_nn = nn_model.predict(Xnn_test )<compute_test_metric>
test_dataset['Sex'] = test_dataset['Sex'].apply(lambda x: 1 if x=='male' else 0 )
Titanic - Machine Learning from Disaster