kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
3,358,480 | log_loss(Y_train.iloc[validInds], pred_val_nn), f1_score(Y_train.iloc[validInds], np.argmax(pred_val_nn, axis = 1), average = "micro" )<train_model> | test_dataset = pd.get_dummies(test_dataset,prefix_sep = '__', columns = cat_columns ) | Titanic - Machine Learning from Disaster |
3,358,480 | rfc = RandomForestClassifier(random_state=0, n_estimators=1000, n_jobs=-1, verbose=1)
rfc.fit(Xnn_train.iloc[trainInds], Y_train.iloc[trainInds] )<predict_on_test> | passengerID = test_dataset['PassengerId']
test_dataset.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1, inplace=True ) | Titanic - Machine Learning from Disaster |
3,358,480 | pred_val_rfc = rfc.predict_proba(Xnn_train.iloc[validInds])
pred_test_rfc = rfc.predict_proba(Xnn_test )<compute_test_metric> | test_data_pred,z_test_data = forward(test_dataset,w1,b1,w2,b2)
test_data_pred =(np.round(test_data_pred))
submission = pd.DataFrame({'PassengerId': passengerID, 'Survived':test_data_pred[0]} ).astype(int ) | Titanic - Machine Learning from Disaster |
3,358,480 | log_loss(Y_train.iloc[validInds], pred_val_rfc), f1_score(Y_train.iloc[validInds], np.argmax(pred_val_rfc, axis = 1), average = "micro" )<train_model> | submission.to_csv('submission.csv', index=False)
| Titanic - Machine Learning from Disaster |
11,170,139 | rfc2 = RandomForestClassifier(random_state=0, n_estimators=1000, n_jobs=-1, verbose=1)
rfc2.fit(X_train.iloc[trainInds], Y_train.iloc[trainInds] )<predict_on_test> | train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
train = train_data.copy()
test = test_data.copy()
train.shape
y_train = train['Survived']
Id = pd.DataFrame(test['PassengerId'])
train.drop(['PassengerId'], axis = 1, inplace=True)
test.drop(['PassengerId'], axis = 1, inplace=True)
train.drop(['Survived'], axis = 1, inplace=True)
train.drop(['Ticket'], axis = 1, inplace=True)
test.drop(['Ticket'], axis = 1, inplace=True ) | Titanic - Machine Learning from Disaster |
11,170,139 | pred_val_rfc2 = rfc2.predict_proba(X_train.iloc[validInds])
pred_test_rfc2 = rfc2.predict_proba(df_test[trainCols])
log_loss(Y_train.iloc[validInds], pred_val_rfc2), f1_score(Y_train.iloc[validInds], np.argmax(pred_val_rfc2, axis = 1), average = "micro" )<train_model> | train.isnull().sum().sort_values(ascending=False)[0:20]
| Titanic - Machine Learning from Disaster |
11,170,139 | svc_clf = make_pipeline(StandardScaler() , SVC(gamma='auto', probability=True))
svc_clf.fit(Xnn_train.iloc[trainInds], Y_train.iloc[trainInds] )<predict_on_test> | for i in list(train.columns):
dtype = train[i].dtype
values = 0
if(dtype == float or dtype == int):
method = 'mean'
else:
method = 'mode'
if(train[i].notnull().sum() / 891 <=.5):
train.drop(i, axis = 1, inplace=True)
elif method == 'mean':
train[i]=train[i].fillna(train[i].mean())
else:
train[i]=train[i].fillna(train[i].mode() [0])
for i in list(test.columns):
dtype = test[i].dtype
values = 0
if(dtype == float or dtype == int):
method = 'mean'
else:
method = 'mode'
if(test[i].notnull().sum() / 418 <=.5):
test.drop(i, axis = 1, inplace=True)
elif method == 'mean':
test[i]=test[i].fillna(test[i].mean())
else:
test[i]=test[i].fillna(test[i].mode() [0])
| Titanic - Machine Learning from Disaster |
11,170,139 | pred_val_svc = svc_clf.predict_proba(Xnn_train.iloc[validInds])
pred_test_svc = svc_clf.predict_proba(Xnn_test )<compute_test_metric> | train_test_data = [train, test]
for dataset in train_test_data:
dataset['Title'] = dataset['Name'].str.extract('([A-Za-z]+)\.', expand=False)
title_mapping = {"Mr": 0, "Miss": 1, "Mrs": 1,
"Master": 0, "Dr": 1, "Rev": 0, "Col": 0, "Major": 0, "Mlle": 1,"Countess": 1,
"Ms": 1, "Lady": 1, "Jonkheer": 1, "Don": 0, "Dona" : 1, "Mme": 0,"Capt": 0,"Sir": 0 }
for dataset in train_test_data:
dataset['Title'] = dataset['Title'].map(title_mapping)
| Titanic - Machine Learning from Disaster |
11,170,139 | log_loss(Y_train.iloc[validInds], pred_val_svc), f1_score(Y_train.iloc[validInds], np.argmax(pred_val_svc, axis = 1), average = "micro" )<train_model> | sex_mapping = {"male": 0, "female":1}
for dataset in train_test_data:
dataset['Sex'] = dataset['Sex'].map(sex_mapping)
| Titanic - Machine Learning from Disaster |
11,170,139 | svc_clf2 = make_pipeline(StandardScaler() , SVC(gamma='auto', probability=True))
svc_clf2.fit(X_train.iloc[trainInds], Y_train.iloc[trainInds] )<predict_on_test> | Pclass1 = train_data[train_data['Pclass'] == 1]['Embarked'].value_counts()
Pclass2 = train_data[train_data['Pclass'] == 2]['Embarked'].value_counts()
Pclass3 = train_data[train_data['Pclass'] == 3]['Embarked'].value_counts()
df = pd.DataFrame([Pclass1, Pclass2, Pclass3])
df.index = ['1st class','2nd class', '3rd class']
df.plot(kind='bar',stacked=True, figsize=(10,5)) | Titanic - Machine Learning from Disaster |
11,170,139 | pred_val_svc2 = svc_clf2.predict_proba(X_train.iloc[validInds] )<compute_train_metric> | for data in train_test_data:
data['Embarked'] = data['Embarked'].fillna("S")
embarked_mapping = {"S": 0, "C": 1, "Q": 2}
for dataset in train_test_data:
dataset['Embarked'] = dataset['Embarked'].map(embarked_mapping ) | Titanic - Machine Learning from Disaster |
11,170,139 | pred_test_svc2 = svc_clf2.predict_proba(df_test[trainCols])
log_loss(Y_train.iloc[validInds], pred_val_svc2), f1_score(Y_train.iloc[validInds], np.argmax(pred_val_svc2, axis = 1), average = "micro" )<concatenate> | train["FamilySize"] = train['SibSp'] + train['Parch'] + 1
test["FamilySize"] = test['SibSp'] + test['Parch'] + 1 | Titanic - Machine Learning from Disaster |
11,170,139 | val_preds_all = np.hstack([pred_val_lgb2, pred_val_lgb, pred_val_nn, pred_val_nn1, pred_val_svc, pred_val_svc2, pred_val_rfc, pred_val_rfc2])
test_preds_all = np.hstack([pred_test_lgb2, pred_test_lgb, pred_test_nn, pred_test_nn1, pred_test_svc, pred_test_svc2, pred_test_rfc, pred_test_rfc2] )<train_model> | train.drop(['Name'], axis = 1, inplace=True)
test.drop(['Name'], axis = 1, inplace=True ) | Titanic - Machine Learning from Disaster |
11,170,139 | clf = LogisticRegression(multi_class="ovr" ).fit(val_preds_all, Y_train.iloc[validInds] )<predict_on_test> | from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from keras.wrappers.scikit_learn import KerasClassifier
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D,MaxPool2D
import keras | Titanic - Machine Learning from Disaster |
11,170,139 | prediction = clf.predict(test_preds_all)
prediction<save_to_csv> | continuous = ['Age', 'Fare', 'Parch', 'Pclass', 'SibSp', 'FamilySize']
scaler = StandardScaler()
for var in continuous:
train[var] = train[var].astype('float64')
train[var] = scaler.fit_transform(train[var].values.reshape(-1, 1))
for var in continuous:
test[var] = test[var].astype('float64')
test[var] = scaler.fit_transform(test[var].values.reshape(-1, 1)) | Titanic - Machine Learning from Disaster |
11,170,139 | sub["department_id"] = prediction.astype(int)
sub.to_csv("submission.csv", index = False)
sub.head()<save_to_csv> | tf.keras.optimizers.Adam(
learning_rate=0.003, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False,
name='Adam',
)
model = Sequential()
model.add(Flatten())
model.add(Dense(32, input_dim=train.shape[1],kernel_initializer = 'uniform', activation='relu'))
model.add(Dense(32, kernel_initializer = 'uniform', activation = 'relu'))
model.add(Dropout(0.4))
model.add(Dense(32,kernel_initializer = 'uniform', activation = 'relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
| Titanic - Machine Learning from Disaster |
11,170,139 | sub.to_csv('submission.csv' )<save_to_csv> | history = model.fit(train, y_train, epochs=20, batch_size=50, validation_split = 0.2)
%s: %.2f%%" %('val_acc', val_acc*100)) | Titanic - Machine Learning from Disaster |
11,170,139 | df.to_csv("dataframe_all.csv")
np.savetxt("val_preds_all.csv", val_preds_all, delimiter=",")
np.savetxt("test_preds_all.csv", test_preds_all, delimiter=",")
np.save("trainInds", trainInds)
np.save("validInds", validInds )<save_to_csv> | scores = model.evaluate(train, y_train, batch_size=32 ) | Titanic - Machine Learning from Disaster |
11,170,139 | df_test.to_csv("dataframe_test.csv" )<load_from_csv> | y_pred = model.predict(test)
y_final =(y_pred > 0.5 ).astype(int ).reshape(test.shape[0])
output = pd.DataFrame({'PassengerId': test_data['PassengerId'], 'Survived': y_final})
output.to_csv('prediction-ann.csv', index=False ) | Titanic - Machine Learning from Disaster |
5,560,938 | train = pd.read_csv(".. /input/ykc-cup-2nd/train.csv")
test = pd.read_csv(".. /input/ykc-cup-2nd/test.csv")
sub = pd.read_csv(".. /input/ykc-cup-2nd/sample_submission.csv")
train.shape, test.shape, sub.shape<concatenate> | dataset_train = pd.read_csv("/kaggle/input/titanic/train.csv")
dataset_train.head() | Titanic - Machine Learning from Disaster |
5,560,938 | df = pd.concat([train, test])
df = df.reset_index(drop=True)
df.shape<choose_model_class> | dataset_test = pd.read_csv("/kaggle/input/titanic/test.csv")
dataset_test.head() | Titanic - Machine Learning from Disaster |
5,560,938 | target = "department_id"
n_split = 5
kfold = StratifiedKFold(n_splits=n_split, shuffle = True, random_state=42 )<feature_engineering> | gender = pd.read_csv('/kaggle/input/titanic/gender_submission.csv')
gender.head() | Titanic - Machine Learning from Disaster |
5,560,938 | df["product_name"] = df["product_name"].apply(lambda words : words.lower().replace(",", "" ).replace("&", "" ).split(" "))
df.head()<define_variables> | dataset_train.isnull().sum() | Titanic - Machine Learning from Disaster |
5,560,938 | model_names = ["crawl-300d-2M", "crawl-300d-2M-subword", "wiki-news-300d-1M", "wiki-news-300d-1M-subword"]<load_pretrained> | dataset_test.isnull().sum() | Titanic - Machine Learning from Disaster |
5,560,938 | fasttext_pretrain_cols = []
unused_words = defaultdict(int)
for model_name in model_names:
model = pd.read_pickle(f".. /input/fasttext/{model_name}.pkl")
def to_mean_vec(x, model):
v = np.zeros(model.vector_size)
for w in x:
try:
v += model[w]
except:
unused_words[w] += 1
v = v /(np.sqrt(np.sum(v ** 2)) + 1e-16)
return v
def to_max_vec(x, model):
v = np.zeros(model.vector_size)
for w in x:
try:
v = np.maximum(v, model[w])
except:
pass
return v
mean_vecs = df["product_name"].apply(lambda x : to_mean_vec(x, model))
mean_vecs = np.vstack(mean_vecs)
cols = [f"fasttext_pretrain_{model_name}_mean_vec{k}" for k in range(mean_vecs.shape[1])]
fasttext_pretrain_cols += cols
mean_vec_df = pd.DataFrame(mean_vecs, columns=cols)
df = pd.concat([df, mean_vec_df], axis = 1)
max_vecs = df["product_name"].apply(lambda x : to_max_vec(x, model))
max_vecs = np.vstack(max_vecs)
cols = [f"fasttext_pretrain_{model_name}_max_vec{k}" for k in range(max_vecs.shape[1])]
fasttext_pretrain_cols += cols
max_vec_df = pd.DataFrame(max_vecs, columns=cols)
df = pd.concat([df, max_vec_df], axis = 1)
df.head()<split> | dataset_train['Age'] = dataset_train['Age'].fillna(dataset_train['Age'].mean())
mean_train = int(dataset_train['Age'].mean())
mean_test = int(dataset_test['Age'].mean())
dataset_train['Embarked'] = dataset_train['Embarked'].fillna(np.random.choice(['C','Q','S']))
dataset_test['Age'] = dataset_test['Age'].fillna(dataset_test['Age'].mean())
dataset_test['Fare'] = dataset_test['Fare'].fillna(dataset_test['Fare'].mean() ) | Titanic - Machine Learning from Disaster |
5,560,938 | train = df[~df[target].isna() ]
test = df[df[target].isna() ]<feature_engineering> | temp_title.value_counts() | Titanic - Machine Learning from Disaster |
5,560,938 | def to_weighted_count_vec(x, word_sets):
v = np.zeros(21)
for w in x:
hits = []
for i in range(21):
if w in word_sets[i]:
hits.append(i)
for i in hits:
v[i] += 1.0 / len(hits)
return v
weighted_count_cols = [f"weighted_count_vec{k}" for k in range(21)]<create_dataframe> | fare_cabin = dataset_train[['Cabin', 'Fare']].groupby(by = 'Cabin' ).mean()
fare_cabin = fare_cabin.sort_values(by = 'Fare', ascending = False)
fare_cabin = fare_cabin[:16][:]
fare_cabin = fare_cabin.reset_index()
fare_cabin = fare_cabin.sort_values(by = 'Cabin', ascending = True)
def label_encoder(string):
num = re.findall('\d+', string)
alpha = string[0]
if len(num)>1:
label = label = alpha + '(' + '-'.join(num)+ ')'
else:
label = alpha + '-'.join(num)
return label
fare_cabin['Cabin'] = fare_cabin['Cabin'].apply(label_encoder ) | Titanic - Machine Learning from Disaster |
5,560,938 | train_weighted_count = pd.DataFrame(index=train.index, columns=weighted_count_cols, dtype=np.float32)
for i_fold,(train_idx, valid_idx)in enumerate(kfold.split(train, train[target])) :
tr = train.loc[train_idx]
va = train.loc[valid_idx]
word_sets = [set(sum(tr[tr["department_id"] == i]["product_name"], [])) for i in range(21)]
vecs = va["product_name"].apply(lambda x : to_weighted_count_vec(x, word_sets))
vecs = np.vstack(vecs)
vec_df = pd.DataFrame(vecs, index=va.index, columns=weighted_count_cols)
train_weighted_count.loc[valid_idx, :] = vec_df
train = pd.concat([train, train_weighted_count], axis=1 )<create_dataframe> | dataset_train['Ticket'] = dataset_train['Ticket'].apply(lambda x: len(x))
dataset_train['Title'] = dataset_train['Name'].apply(get_title)
dataset_train['Title'] = dataset_train.apply(replace_titles, axis = 1)
drop_cols = ['PassengerId','Name','Cabin', 'Title']
encode_cols = ['Sex','Embarked', 'Title']
encode_after = pd.get_dummies(dataset_train[encode_cols])
fin_data = dataset_train.copy()
fin_data = fin_data.drop(drop_cols, axis = 1)
fin_data = pd.concat([fin_data, encode_after], axis = 1)
print(fin_data.columns)
fin_data.drop(['Sex', 'Embarked'], axis = 1, inplace = True ) | Titanic - Machine Learning from Disaster |
5,560,938 | test_weighted_count = pd.DataFrame(index=test.index, columns=weighted_count_cols, dtype=np.float32)
word_sets = [set(sum(train[train["department_id"] == i]["product_name"], [])) for i in range(21)]
vecs = test["product_name"].apply(lambda x : to_weighted_count_vec(x, word_sets))
vecs = np.vstack(vecs)
test_weighted_count.loc[:, :] = pd.DataFrame(vecs, index=test.index, columns=weighted_count_cols)
test = pd.concat([test, test_weighted_count], axis=1 )<define_variables> | dataset_test['Title'] = dataset_test['Name'].apply(get_title)
dataset_test['Ticket'] = dataset_test['Ticket'].apply(lambda x: str(x))
dataset_test['Ticket'] = dataset_test['Ticket'].apply(lambda x: len(x))
dataset_test['Title'] = dataset_test.apply(replace_titles, axis = 1)
encode_cols_teset = pd.get_dummies(dataset_test[encode_cols])
fin_data_test = dataset_test.copy()
fin_data_test = fin_data_test.drop(['PassengerId','Name','Cabin', 'Embarked','Sex'],axis =1)
fin_data_test = pd.concat([fin_data_test, encode_cols_teset], axis = 1)
fin_data_test = fin_data_test[[ 'Pclass', 'Age', 'SibSp', 'Parch', 'Ticket', 'Fare',
'Sex_female', 'Sex_male', 'Embarked_C', 'Embarked_Q',
'Embarked_S', 'Title_Master', 'Title_Miss', 'Title_Mr',
'Title_Mrs']] | Titanic - Machine Learning from Disaster |
5,560,938 | features = fasttext_pretrain_cols + weighted_count_cols + ["order_rate", "order_dow_mode", "order_hour_of_day_mode"]<drop_column> | X_train = fin_data.values[:,1:]
Y_train = fin_data.values[:,0]
X_test = fin_data_test.values[:,:]
Y_test = gender.values[:,1:] | Titanic - Machine Learning from Disaster |
5,560,938 | train = reduce_mem_usage(train)
test = reduce_mem_usage(test )<train_model> | params = {'penalty':['l1','l2'], 'C':[0.01,0.1,1,10,100]}
lr = LogisticRegression(solver = 'liblinear')
grid = GridSearchCV(lr, param_grid=params, scoring ='f1', cv = 10, n_jobs=-1)
grid.fit(X_train, Y_train)
grid.best_params_ | Titanic - Machine Learning from Disaster |
5,560,938 | preds_test = []
scores = []
oof = np.zeros(( len(train), 21))
for i_fold,(train_idx, valid_idx)in enumerate(kfold.split(train, train[target])) :
print(f"--------fold {i_fold}-------")
x_tr = train.loc[train_idx, features]
y_tr = train.loc[train_idx, target]
x_va = train.loc[valid_idx, features]
y_va = train.loc[valid_idx, target]
lgb = LGBMClassifier(objective='multiclass', colsample_bytree=0.2, n_estimators=500)
lgb.fit(x_tr, y_tr, eval_set=[(x_va, y_va)], early_stopping_rounds=10, verbose=10)
def predict_proba(x):
return lgb.predict_proba(x)
pred_val = predict_proba(x_va)
oof[valid_idx] += pred_val
score = {
"logloss" : log_loss(y_va, pred_val),
"f1_micro" : f1_score(y_va, np.argmax(pred_val, axis = 1), average = "micro")}
print(score)
scores.append(score)
pred_test = predict_proba(test[features])
preds_test.append(pred_test )<create_dataframe> | lr = LogisticRegression(C = 1, penalty='l2', solver='liblinear')
lr.fit(X_train, Y_train)
predict_lr = lr.predict(X_test ) | Titanic - Machine Learning from Disaster |
5,560,938 | score_df = pd.DataFrame(scores)
score_df<save_to_csv> | print("Accuracy = {0}%".format(round(accuracy_score(Y_test, predict_lr)*100, 2)))
print(classification_report(Y_test, predict_lr))
print("Score = {0}".format(f1_score(Y_test, predict_lr)))
print(confusion_matrix(Y_test, predict_lr)) | Titanic - Machine Learning from Disaster |
5,560,938 | oof_df = pd.DataFrame(oof)
oof_df.to_csv("oof_lgb.csv", index = False )<save_to_csv> | rf = RandomForestClassifier()
params_rf = {'n_estimators':list(range(1,20)) , 'max_depth':list(range(1,10)) , 'criterion':['gini', 'entropy']}
grid_rf = GridSearchCV(rf, param_grid=params_rf, cv = 5, scoring='accuracy', n_jobs = -1)
grid_rf.fit(X_train,Y_train)
print('HyperParameter optimization')
grid_rf.best_params_ | Titanic - Machine Learning from Disaster |
5,560,938 | for i in range(len(preds_test)) :
pred_df = pd.DataFrame(preds_test[i])
pred_df.to_csv(f"pred_{i}_lgb.csv", index = False )<prepare_output> | rf = RandomForestClassifier(criterion = 'entropy', max_depth = 7, n_estimators = 16, bootstrap=False)
rf.fit(X_train, Y_train)
predict_rf = rf.predict(X_test)
print("Accuracy Score = {} %".format(rf.score(X_test, Y_test)*100))
print(classification_report(Y_test, predict_rf))
print('F1 Score = {}'.format(f1_score(Y_test, predict_rf)) ) | Titanic - Machine Learning from Disaster |
5,560,938 | pred_test_final = np.array(preds_test ).mean(axis = 0)
pred_test_final = np.argmax(pred_test_final, axis = 1 )<save_to_csv> | def model() :
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(120, activation = tf.nn.relu, kernel_initializer = 'normal'))
model.add(tf.keras.layers.Dense(120, activation = tf.nn.relu, kernel_initializer = 'normal'))
model.add(tf.keras.layers.Dense(120, activation = tf.nn.relu, kernel_initializer = 'normal'))
model.add(tf.keras.layers.Dense(1, activation = tf.nn.sigmoid))
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
return model | Titanic - Machine Learning from Disaster |
5,560,938 | sub["department_id"] = pred_test_final
sub.to_csv("submission_lgb.csv", index = False)
sub.head()<set_options> | model = model()
model.fit(X_train, Y_train, epochs = 500, verbose = 0 ) | Titanic - Machine Learning from Disaster |
5,560,938 | %matplotlib inline
plt.style.use('seaborn' )<load_from_csv> | val_loss, val_acc = model.evaluate(X_train, Y_train ) | Titanic - Machine Learning from Disaster |
5,560,938 | print(check_output(["ls", ".. /input/adultb"] ).decode("utf8"))
df_train = pd.read_csv(".. /input/adultb/train_data.csv", na_values = '?')
df_train.set_index('Id',inplace=True)
df_train.head()<sort_values> | val_loss, val_acc = model.evaluate(X_test, Y_test ) | Titanic - Machine Learning from Disaster |
5,560,938 | total = df_train.isnull().sum().sort_values(ascending = False)
percent =(( df_train.isnull().sum() /df_train.isnull().count())*100 ).sort_values(ascending = False)
missing_data = pd.concat([total, percent], axis = 1, keys = ['Total', '%'])
missing_data.head()<sort_values> | predict_nn = model.predict([X_test])
predict_nn_fin = []
for i in range(len(predict_nn)) :
if predict_nn[i]>=0.7:
predict_nn_fin.append(1)
else:
predict_nn_fin.append(0 ) | Titanic - Machine Learning from Disaster |
5,560,938 | total = df_train.isnull().sum().sort_values(ascending = False)
percent =(( df_train.isnull().sum() /df_train.isnull().count())*100 ).sort_values(ascending = False)
missing_data = pd.concat([total, percent], axis = 1, keys = ['Total', '%'])
missing_data.head()<count_values> | print("Accuracy = {0}%".format(round(accuracy_score(Y_test, predict_nn_fin)*100, 2)))
print(classification_report(Y_test, predict_nn_fin))
print("Score = {0}".format(f1_score(Y_test, predict_nn_fin)))
print(confusion_matrix(Y_test, predict_nn_fin)) | Titanic - Machine Learning from Disaster |
5,560,938 | <define_variables><EOS> | Titanic - Machine Learning from Disaster | |
9,024,077 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<filter> | %matplotlib inline
data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
data.describe() | Titanic - Machine Learning from Disaster |
9,024,077 | female = df_train[df_train['sex'] == 'Female']
male = df_train[df_train['sex'] == 'Male']<count_values> | columnFilter = ["Survived", "Pclass", "Sex", "Age", "SibSp", "Parch"]
filteredData = data[columnFilter]
filteredTestData = test_data[columnFilter[1:]]
filteredData.head() | Titanic - Machine Learning from Disaster |
9,024,077 | white = df_train[df_train['race'] == 'White'].count() [0]
black = df_train[df_train['race'] == 'Black'].count() [0]
amer = df_train[df_train['race'] == 'Amer-Indian-Eskimo'].count() [0]
other = df_train[df_train['race'] == 'Other'].count() [0]
asian = df_train[df_train['race'] == 'Asian-Pac-Islander'].count() [0]
white_income = []
temp = df_train[df_train['race'] == 'White']
white_income.append(temp[temp['income'] == '>50K'].count() [0])
white_income.append(white-white_income[0])
black_income = []
temp = df_train[df_train['race'] == 'Black']
black_income.append(temp[temp['income'] == '>50K'].count() [0])
black_income.append(black-black_income[0])
amer_income = []
temp = df_train[df_train['race'] == 'Amer-Indian-Eskimo']
amer_income.append(temp[temp['income'] == '>50K'].count() [0])
amer_income.append(amer-amer_income[0])
asian_income = []
temp = df_train[df_train['race'] == 'Asian-Pac-Islander']
asian_income.append(temp[temp['income'] == '>50K'].count() [0])
asian_income.append(asian-asian_income[0])
other_income = []
temp = df_train[df_train['race'] == 'Other']
other_income.append(temp[temp['income'] == '>50K'].count() [0])
other_income.append(other-other_income[0] )<filter> | def oneHotEncode(dataToEncode, column):
onehot = pd.get_dummies(dataToEncode[column])
dataToEncode = dataToEncode.join(onehot)
dataToEncode = dataToEncode.drop(columns=column)
return dataToEncode
filteredData = oneHotEncode(filteredData, "Pclass")
filteredData.head() | Titanic - Machine Learning from Disaster |
9,024,077 | white = df_train[df_train['race'] == 'White']
black = df_train[df_train['race'] == 'Black']<filter> | def sex_to_numerical(d):
sex = d["Sex"]
if sex == "male":
return 1
return 0
filteredData['Sex'] = filteredData.apply(sex_to_numerical, axis=1)
filteredData.head() | Titanic - Machine Learning from Disaster |
9,024,077 | over_50k = df_train[df_train['income'] == '>50K']
under_50k = df_train[df_train['income'] == '<=50K']<count_values> | def normalize_age(dataToNormalize):
scaler = MinMaxScaler()
dataToNormalize["Age"] = scaler.fit_transform(dataToNormalize["Age"].values.reshape(-1,1))
return dataToNormalize
filteredData.Age.fillna(( filteredData['Age'].mean()), inplace=True)
filteredData = normalize_age(filteredData)
filteredData.head() | Titanic - Machine Learning from Disaster |
9,024,077 | df_train['income'].value_counts()<define_variables> | X = filteredData[filteredData.columns[1:]]
y = filteredData[filteredData.columns[0]]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
X_train = X_train.to_numpy()
X_test = X_test.to_numpy()
y_train = y_train.to_numpy()
y_test = y_test.to_numpy() | Titanic - Machine Learning from Disaster |
9,024,077 | base = df_train<define_variables> | model = tf.keras.Sequential([
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(32, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(16, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(optimizer='adam', loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=100, validation_data=(X_test, y_test)) | Titanic - Machine Learning from Disaster |
9,024,077 | quantitative_columns = ['age', 'fnlwgt', 'education.num', 'capital.gain', 'capital.loss', 'hours.per.week']
qualitative_columns = ['education', 'marital.status', 'occupation', 'relationship', 'race',
'sex', 'native.country', 'income']<compute_test_metric> | filteredTestData = oneHotEncode(filteredTestData, "Pclass")
filteredTestData['Sex'] = filteredTestData.apply(sex_to_numerical, axis=1)
filteredTestData.Age.fillna(( filteredTestData['Age'].mean()), inplace=True)
filteredTestData = normalize_age(filteredTestData ) | Titanic - Machine Learning from Disaster |
9,024,077 | def isprivate(value):
if value == 'Private':
return 1
return 0
def catg(value, categories, ordenation = None):
if ordenation is not None:
ordenation = np.arange(0, len(categories))
for pos in ordenation:
if value == categories[pos]:
return pos
return -1
def equals(value, x):
for v in x:
if v == value:
return 1
return 0<create_dataframe> | model.fit(X, y, epochs=100 ) | Titanic - Machine Learning from Disaster |
9,024,077 | private = pd.DataFrame({'private': base['workclass'].apply(isprivate)} )<count_values> | predictions = model.predict(filteredTestData)
predictions = np.where(predictions > 0.5, 1, 0)
predictions | Titanic - Machine Learning from Disaster |
9,024,077 | <create_dataframe><EOS> | predictions = pd.DataFrame(predictions ).rename(columns={0: "Survived"})
predictions.index.name ="PassengerId"
predictions.index += 892
predictions.to_csv("predictions.csv")
predictions | Titanic - Machine Learning from Disaster |
5,074,309 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<prepare_output> | %matplotlib inline
sns.set(style="ticks", color_codes=True)
| Titanic - Machine Learning from Disaster |
5,074,309 | edu_order = [15, 11, 5, 12, 10, 1, 14, 7, 2, 8, 4, 13, 0, 3, 6, 9]
args = [base['education'].unique() , edu_order]
education_classes = pd.DataFrame({'education.classes': base['education'].apply(catg, args = args)} )<define_variables> | seed = 7
np.random.seed(seed ) | Titanic - Machine Learning from Disaster |
5,074,309 | median = np.median(base[base['capital.gain'] > 0]['capital.gain'])
aux = pd.cut(base['capital.gain'],
bins = [-1, 0, median, base['capital.gain'].max() +1],
labels = [0, 1, 2])
capital_gain_clusters = pd.DataFrame({'capital.gain.clusters': aux})
capital_gain_clusters = capital_gain_clusters.astype(np.int)
median = np.median(base[base['capital.loss'] > 0]['capital.loss'])
aux = pd.cut(base['capital.loss'],
bins = [-1, 0, median, base['capital.loss'].max() +1],
labels = [0, 1, 2])
capital_loss_clusters = pd.DataFrame({'capital.loss.clusters': aux})
capital_loss_clusters = capital_loss_clusters.astype(np.int )<concatenate> | train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv')
train_copy = train.copy()
test_copy = test.copy()
all_data = pd.concat([train, test], sort = False)
train.head() | Titanic - Machine Learning from Disaster |
5,074,309 | new_data = pd.concat([sul_global, private, education_classes,
hours_per_week_clusters, capital_gain_clusters,
capital_loss_clusters], axis = 1 )<create_dataframe> | train.groupby('Pclass' ).Survived.mean() | Titanic - Machine Learning from Disaster |
5,074,309 | aux = base['income'].apply(equals, args = [['>50K']])
aux = pd.concat([new_data, pd.DataFrame({'income': aux})], axis = 1)
new = aux.astype(np.int)
aux.head()<drop_column> | train.groupby('Sex' ).Survived.mean() | Titanic - Machine Learning from Disaster |
5,074,309 | base = base.drop(['fnlwgt', 'education', 'sex', 'native.country', 'workclass', 'marital.status'], axis = 1)
base.columns<concatenate> | train.groupby(['SibSp'] ).Survived.mean() | Titanic - Machine Learning from Disaster |
5,074,309 | base = pd.concat([new_data, base], axis = 1 )<categorify> | train.groupby(['Parch'] ).Survived.mean() | Titanic - Machine Learning from Disaster |
5,074,309 | names = ['occupation', 'relationship', 'race']
enc_x = []
for i in range(len(names)) :
enc_x.append(prep.LabelEncoder())
enc_y = prep.LabelEncoder()<categorify> | all_data['Title'] = all_data['Name'].str.extract('([A-Za-z]+)\.', expand=True ) | Titanic - Machine Learning from Disaster |
5,074,309 | i = 0
for name in names:
base[name] = enc_x[i].fit_transform(base[name])
i += 1
base['income'] = enc_y.fit_transform(base['income'] )<drop_column> | all_data['Title'].value_counts() | Titanic - Machine Learning from Disaster |
5,074,309 | unselected_columns = []
unselected_columns.append('capital.loss')
unselected_columns.append('capital.gain')
unselected_columns.append('sul.global')
unselected_columns.append('private')
unselected_columns.append('education.classes')
unselected_columns.append('hours.per.week.clusters')
base = base.drop(unselected_columns, axis = 1)
base.head()<data_type_conversions> | mappings = {'Dr':'Respected_Male', 'Col':'Respected_Male', 'Major':'Respected_Male', 'Capt':'Respected_Male',
'Mme':'Noble', 'Mlle':'Noble', 'Countess': 'Noble', 'Lady': 'Noble', 'Sir':'Noble',
'Ms' : 'Miss', 'Rev': 'Other', 'Jonkheer': 'Other', 'Dona': 'Other', 'Don': 'Other'
}
all_data.replace({'Title': mappings}, inplace = True ) | Titanic - Machine Learning from Disaster |
5,074,309 | aux = base.astype(np.int )<import_modules> | all_data['Title'].value_counts() | Titanic - Machine Learning from Disaster |
5,074,309 | from sklearn.preprocessing import StandardScaler<prepare_x_and_y> | one_hot_encoding_list = []
one_hot_encoding_list.append('Title' ) | Titanic - Machine Learning from Disaster |
5,074,309 | X = base.drop(['income'], axis = 1)
y = base['income']<normalization> | all_data['Age'].isnull().sum() | Titanic - Machine Learning from Disaster |
5,074,309 | scaler_x = StandardScaler()
X = scaler_x.fit_transform(X )<import_modules> | title_grouped = all_data.groupby(['Title'])
for title in all_data.Title.unique() :
all_data.loc[(all_data.Age.isnull())&(all_data.Title == title), 'Age'] = title_grouped.get_group(title ).Age.mean() | Titanic - Machine Learning from Disaster |
5,074,309 | from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score<find_best_params> | all_data['AgeBin'] = pd.qcut(all_data['Age'], 5 ) | Titanic - Machine Learning from Disaster |
5,074,309 | scores_mean = []
scores_std = []
k_lim_inf = 1
k_lim_sup = 30
folds = 5
k_max = None
max_std = 0
max_acc = 0
i = 0
print('Finding best k...')
for k in range(k_lim_inf, k_lim_sup):
KNNclf = KNeighborsClassifier(n_neighbors=k, p = 1)
score = cross_val_score(KNNclf, X, y, cv = folds)
scores_mean.append(score.mean())
scores_std.append(score.std())
if scores_mean[i] > max_acc:
k_max = k
max_acc = scores_mean[i]
max_std = scores_std[i]
i += 1
if not(k%3):
print(' K = {0} | Best CV acc = {1:2.2f}% +/-{3:4.2f}%(best k = {2})'.format(k, max_acc*100, k_max, max_std*100))
print('
Best k: {}'.format(k_max))<train_model> | label = preprocessing.LabelEncoder()
all_data['AgeBin'] = label.fit_transform(all_data['AgeBin'])
cat_plot('AgeBin', data = all_data ) | Titanic - Machine Learning from Disaster |
5,074,309 | k = k_max
KNNclf = KNeighborsClassifier(n_neighbors=k, p = 1)
KNNclf.fit(X, y )<load_from_csv> | all_data['FamilySize'] = all_data['SibSp'] + all_data['Parch'] + 1 | Titanic - Machine Learning from Disaster |
5,074,309 | df_test = pd.read_csv('.. /input/adultb/test_data.csv', na_values='?')
df_test.set_index('Id', inplace = True)
df_test.head()<concatenate> | all_data['FamilySize'].value_counts(dropna = False ) | Titanic - Machine Learning from Disaster |
5,074,309 | features = ['age', 'education.num', 'occupation', 'relationship', 'race', 'hours.per.week']
base_test = pd.concat([new_data, df_test[features]], axis = 1 )<sort_values> | all_data['Alone'] = 0;
all_data.loc[all_data['FamilySize'] == 1, 'Alone'] = 1
all_data['SmallFamily'] = 0;
all_data.loc[(all_data['FamilySize'] > 1)&(all_data['FamilySize'] <= 4), 'SmallFamily'] = 1
all_data['BigFamily'] = 0;
all_data.loc[all_data['FamilySize'] > 4, 'BigFamily'] = 1 | Titanic - Machine Learning from Disaster |
5,074,309 | total = base_test.isnull().sum().sort_values(ascending = False)
percent =(( base_test.isnull().sum() /base_test.isnull().count())*100 ).sort_values(ascending = False)
missing_data = pd.concat([total, percent], axis = 1, keys = ['Total', '%'])
missing_data.head()<sort_values> | all_data.Cabin.isnull().sum() | Titanic - Machine Learning from Disaster |
5,074,309 | total = base_test.isnull().sum().sort_values(ascending = False)
percent =(( base_test.isnull().sum() /base_test.isnull().count())*100 ).sort_values(ascending = False)
missing_data = pd.concat([total, percent], axis = 1, keys = ['Total', '%'])
missing_data.head()<categorify> | all_data['HaveCabin'] = 0;
all_data.loc[all_data['Cabin'].notnull() , 'HaveCabin'] = 1 | Titanic - Machine Learning from Disaster |
5,074,309 | names = ['occupation', 'relationship', 'race']
i = 0
for name in names:
base_test[name] = enc_x[i].transform(base_test[name])
i += 1<normalization> | all_data['AnyoneSurvivedFromGroup'] = 0.5
for _, grp_df in all_data.groupby('Ticket'):
if(len(grp_df)!= 1):
for ind, row in grp_df.iterrows() :
if(row['AnyoneSurvivedFromGroup'] == 0)|(row['AnyoneSurvivedFromGroup']== 0.5):
smax = grp_df.drop(ind)['Survived'].max()
smin = grp_df.drop(ind)['Survived'].min()
passID = row['PassengerId']
if(smax == 1.0):
all_data.loc[all_data['PassengerId'] == passID, 'AnyoneSurvivedFromGroup'] = 1
elif(smin == 0.0):
all_data.loc[all_data['PassengerId'] == passID, 'AnyoneSurvivedFromGroup'] = 0 | Titanic - Machine Learning from Disaster |
5,074,309 | X_prev = scaler_x.transform(base_test.values )<predict_on_test> | all_data[all_data['AnyoneSurvivedFromGroup'] != 0.5]['PassengerId'].count() | Titanic - Machine Learning from Disaster |
5,074,309 | temp = KNNclf.predict(X_prev)
temp = enc_y.inverse_transform(temp)
temp = {'Income': temp}
predictions = pd.DataFrame(temp )<save_to_csv> | for _, grp_df in all_data.groupby('Cabin'):
if(len(grp_df)!= 1):
for ind, row in grp_df.iterrows() :
if(row['AnyoneSurvivedFromGroup'] == 0)|(row['AnyoneSurvivedFromGroup']== 0.5):
smax = grp_df.drop(ind)['Survived'].max()
smin = grp_df.drop(ind)['Survived'].min()
passID = row['PassengerId']
if(smax == 1.0):
all_data.loc[all_data['PassengerId'] == passID, 'AnyoneSurvivedFromGroup'] = 1
elif(smin == 0.0):
all_data.loc[all_data['PassengerId'] == passID, 'AnyoneSurvivedFromGroup'] = 0 | Titanic - Machine Learning from Disaster |
5,074,309 | predictions.to_csv("submission.csv", index = True, index_label = 'Id' )<import_modules> | all_data[all_data['AnyoneSurvivedFromGroup'] != 0.5]['PassengerId'].count() | Titanic - Machine Learning from Disaster |
5,074,309 | import pandas as pd
import sklearn<load_from_csv> | all_data.Fare.isnull().sum() | Titanic - Machine Learning from Disaster |
5,074,309 | adult = pd.read_csv(".. /input/adult-db/train_data.csv",header=0, index_col=0, na_values="?" )<count_values> | pclass_grouped = all_data.groupby(['Pclass'])
for pclass in all_data.Pclass.unique() :
all_data.loc[(all_data.Fare.isnull())&(all_data.Pclass == pclass), 'Fare'] = pclass_grouped.get_group(pclass ).Fare.mean() | Titanic - Machine Learning from Disaster |
5,074,309 | adult["native.country"].value_counts()<set_options> | all_data['FareBin'] = pd.qcut(all_data['Fare'], 4)
label = preprocessing.LabelEncoder()
all_data['FareBin'] = label.fit_transform(all_data['FareBin'] ) | Titanic - Machine Learning from Disaster |
5,074,309 | %matplotlib inline<count_values> | all_data.isnull().sum() | Titanic - Machine Learning from Disaster |
5,074,309 | adult["sex"].value_counts()<categorify> | all_data.loc[all_data['Embarked'].isnull() , 'Embarked'] = all_data['Embarked'].mode() [0] | Titanic - Machine Learning from Disaster |
5,074,309 | nadult = adult.dropna()
adult_fill = adult.fillna(-1)
for col in [1,3,5,6,7,8,9,13]:
nadult = adult.iloc[:,col].dropna()
label_encoder = LabelEncoder().fit(nadult)
nadult_encoded = label_encoder.transform(nadult)
adult_fill.iloc[np.where(adult_fill.iloc[:,col].values==-1)[0],col] = label_encoder.inverse_transform([int(mode(nadult_encoded)) ])
for col in [0,2,4,10,11,12]:
adult_fill.iloc[np.where(adult_fill.iloc[:,col].values==-1)[0],col] = int(np.mean(adult.iloc[:,col].dropna().values))
adult_fill<prepare_x_and_y> | one_hot_encoding_list.append('Embarked' ) | Titanic - Machine Learning from Disaster |
5,074,309 | minmaxscaler = MinMaxScaler()
col_inds = [0,1,4,5,6,7,8,10,11,12]
Xadult_unscaled = adult_fill.iloc[:,col_inds].apply(LabelEncoder().fit_transform)
Xadult = minmaxscaler.fit_transform(Xadult_unscaled)
Yadult = adult_fill.income
print(Xadult_unscaled.columns.values )<find_best_params> | all_data.drop(['PassengerId', 'Name', 'Ticket', 'Cabin', 'Age', 'Fare', 'SibSp', 'Parch', 'FamilySize', 'FareBin'], axis = 1, inplace = True ) | Titanic - Machine Learning from Disaster |
5,074,309 | score_medio = np.zeros(50)
std_score = np.zeros(50)
for i in range(1,51):
knn = KNeighborsClassifier(n_neighbors=i, p=1)
scores = cross_val_score(knn, Xadult, Yadult, cv=10)
score_medio[i-1]=np.mean(scores)
std_score[i-1]=np.std(scores)
print(np.argmax(score_medio)+1)
print(np.amax(score_medio))<load_from_csv> | sex_mappings = {'male': 0,'female': 1}
all_data['Sex'].replace(sex_mappings, inplace = True ) | Titanic - Machine Learning from Disaster |
5,074,309 | testAdult = pd.read_csv(".. /input/adult-db/test_data.csv",header=0, index_col=0, na_values="?")
testAdult.shape<categorify> | one_hot_encoding_list.append('Pclass' ) | Titanic - Machine Learning from Disaster |
5,074,309 | testAdult_fill = testAdult.fillna(-1)
for col in [1,3,5,6,7,8,9,13]:
nTestAdult = testAdult.iloc[:,col].dropna()
label_encoder = LabelEncoder().fit(nTestAdult)
ntestAdult_encoded = label_encoder.transform(nTestAdult)
testAdult_fill.iloc[np.where(testAdult_fill.iloc[:,col].values==-1)[0],col] = label_encoder.inverse_transform([int(mode(ntestAdult_encoded)) ])
for col in [0,2,4,10,11,12]:
testAdult_fill.iloc[np.where(testAdult_fill.iloc[:,col].values==-1)[0],col] = int(np.mean(testAdult.iloc[:,col].dropna().values))
testAdult_fill<categorify> | all_data = pd.get_dummies(data = all_data, columns = one_hot_encoding_list ) | Titanic - Machine Learning from Disaster |
5,074,309 | XtestAdult_unscaled = testAdult_fill.iloc[:,col_inds].apply(LabelEncoder().fit_transform)
XtestAdult = minmaxscaler.transform(XtestAdult_unscaled )<train_model> | train_data = all_data[:len(train)]
test_data = all_data[len(train):] | Titanic - Machine Learning from Disaster |
5,074,309 | knn = KNeighborsClassifier(n_neighbors=34,p=1)
knn.fit(Xadult,Yadult )<predict_on_test> | X = train_data.iloc[: , 1:].values
y = train_data.iloc[:, 0].values
test_data = test_data.iloc[: , 1:]
X_test = test_data.values
print(str(X.shape))
print(str(X_test.shape)) | Titanic - Machine Learning from Disaster |
5,074,309 | YtestAdult = knn.predict(XtestAdult)
YtestAdult<create_dataframe> | def create_model() :
model = Sequential()
model.add(Dense(16, input_dim = X.shape[1], activation = 'relu', kernel_regularizer=l2(0.01)))
model.add(Dense(16, activation = 'relu', kernel_regularizer=l2(0.01)))
model.add(Dense(1, activation = 'sigmoid'))
model.compile(loss='binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
return model | Titanic - Machine Learning from Disaster |
5,074,309 | prediction = pd.DataFrame(testAdult.index)
prediction["income"] = YtestAdult<save_to_csv> | num_epochs = max_val_acc_epoch
kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed)
cvscores = []
for train, test in kfold.split(X, y):
model = create_model()
history = model.fit(X[train], y[train], epochs=num_epochs, batch_size=10, verbose = 0)
scores = model.evaluate(X[test], y[test], verbose=0)
print("%s: %.2f%%" %(model.metrics_names[1], scores[1]*100))
cvscores.append(scores[1] * 100)
print("%.2f%%(+/- %.2f%%)" %(np.mean(cvscores), np.std(cvscores)) ) | Titanic - Machine Learning from Disaster |
5,074,309 | prediction.to_csv("adult_prediction_5.csv", index=False )<set_options> | model = create_model()
history = model.fit(X, y, epochs=num_epochs, batch_size=10, verbose = 0 ) | Titanic - Machine Learning from Disaster |
5,074,309 | %matplotlib inline
<install_modules> | prediction = model.predict(X_test ) | Titanic - Machine Learning from Disaster |
5,074,309 | !pip install -q -U tf-hub-nightly
!pip install -q tfds-nightly
<define_variables> | submission = pd.DataFrame(test_copy[['PassengerId']])
submission['Survived'] = prediction
submission['Survived'] = submission['Survived'].apply(lambda x: 0 if x < 0.5 else 1 ) | Titanic - Machine Learning from Disaster |
5,074,309 | <load_from_csv><EOS> | submission.to_csv('submission.csv', index = False ) | Titanic - Machine Learning from Disaster |
929,171 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<define_variables> | %matplotlib inline
warnings.filterwarnings('ignore')
| Titanic - Machine Learning from Disaster |
929,171 | batch_size = 64
IMG_HEIGHT = 224
IMG_WIDTH = 224<define_variables> | train = pd.read_csv(".. /input/train.csv")
test = pd.read_csv(".. /input/test.csv" ) | Titanic - Machine Learning from Disaster |
929,171 | train_image_generator = ImageDataGenerator(rescale=1./255, validation_split=0.2, horizontal_flip=True, rotation_range=5, shear_range=0.2, zoom_range=0.2, width_shift_range=0.2, height_shift_range=0.2)
validation_image_generator = ImageDataGenerator(rescale=1./255)
<categorify> | train = pd.read_csv(".. /input/train.csv")
test = pd.read_csv(".. /input/test.csv" ) | Titanic - Machine Learning from Disaster |
929,171 | def oh_to_class(Y):
result = np.zeros(( Y.shape[0], 1))
for i in range(Y.shape[0]):
result[i]=np.argmax(Y[i])
return result<define_variables> | train.Embarked.value_counts() /train.shape[0]
train["Embarked"] = train["Embarked"].fillna("S")
test["Embarked"] = test["Embarked"].fillna("S" ) | Titanic - Machine Learning from Disaster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.