kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
22,892,834 | X_train1 = X_train
X_train2 = X_train
X_train3 = X_train
X_test1 = X_test
X_test2 = X_test
X_test3 = X_test<drop_column> | one_hot_encoder = OneHotEncoder(sparse=False)
def encode_embarked(data):
encoded = pd.DataFrame(one_hot_encoder.fit_transform(data[['Embarked']]))
encoded.columns = one_hot_encoder.get_feature_names(['Embarked'])
data.drop(['Embarked'], axis=1, inplace=True)
data = data.join(encoded)
return data
train_data = encode_embarked(train_data)
test_data = encode_embarked(test_data ) | Titanic - Machine Learning from Disaster |
22,892,834 | drop_col = ['emp_title']
X_train1 = X_train1.drop(columns=drop_col)
X_test1 = X_test1.drop(columns=drop_col )<drop_column> | X = train_data.drop(['Survived', 'PassengerId'], axis=1)
y = train_data['Survived']
test_X = test_data.drop(['PassengerId'], axis=1 ) | Titanic - Machine Learning from Disaster |
22,892,834 | drop_col = ['emp_title']
X_train2 = X_train2.drop(columns=drop_col)
X_test2 = X_test2.drop(columns=drop_col )<normalization> | best_models = {}
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1)
def print_best_parameters(hyperparameters, best_parameters):
value = "Best parameters: "
for key in hyperparameters:
value += str(key)+ ": " + str(best_parameters[key])+ ", "
if hyperparameters:
print(value[:-2])
def get_best_model(estimator, hyperparameters):
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
grid_search = GridSearchCV(estimator=estimator, param_grid=hyperparameters,
n_jobs=-1, cv=cv, scoring="accuracy")
best_model = grid_search.fit(train_X, train_y)
best_parameters = best_model.best_estimator_.get_params()
print_best_parameters(hyperparameters, best_parameters)
return best_model
def evaluate_model(model, name):
print("Accuracy score:", accuracy_score(train_y, model.predict(train_X)))
best_models[name] = model | Titanic - Machine Learning from Disaster |
22,892,834 | ratio_PCA = 0.95
sequence_col = ['sub_grade'
,'loan_amnt'
,'annual_inc'
,'dti'
,'Asset']
X_train_sequence = X_train2[sequence_col]
X_test_sequence = X_test2[sequence_col]
scaler = StandardScaler()
scaler.fit(X_train_sequence)
X_train_sequence[sequence_col] = scaler.transform(X_train_sequence[sequence_col])
X_test_sequence[sequence_col] = scaler.transform(X_test_sequence[sequence_col])
X_train_sequence.fillna(X_train_sequence.median() , inplace=True)
X_test_sequence.fillna(X_test_sequence.median() , inplace=True)
pca = PCA(n_components=X_train_sequence.shape[1])
pca.fit(X_train_sequence)
sorted_variance = sorted(pca.explained_variance_ratio_,reverse=True)
cum_sum = np.cumsum(sorted_variance)
ratio = cum_sum / np.sum(sorted_variance)
col_num = len(ratio)- len(ratio[ratio>ratio_PCA])+1
if col_num >= X_train_sequence.shape[1]:
col_num = X_train_sequence.shape[1]-1
sorted_variance = sorted(pca.explained_variance_ratio_,reverse=True)
np.set_printoptions(suppress=True)
svd = TruncatedSVD(n_components=col_num, n_iter=7, random_state=42)
svd.fit(X_train_sequence)
X_train_sequence = svd.transform(X_train_sequence)
X_test_sequence = svd.transform(X_test_sequence)
column_name = []
for c in range(0,col_num):
column_name.append('PCA' + str(c))
X_train_sequence = pd.DataFrame(X_train_sequence,columns = column_name)
X_test_sequence = pd.DataFrame(X_test_sequence,columns = column_name)
X_train_sequence.index = X_train.index
X_test_sequence.index = X_test.index
drop_col = sequence_col
X_train2 = X_train2.drop(columns=drop_col)
X_test2 = X_test2.drop(columns=drop_col)
X_train2 = pd.concat([X_train2, X_train_sequence], axis=1)
X_test2 = pd.concat([X_test2, X_test_sequence], axis=1 )<drop_column> | class MyXGBClassifier(XGBClassifier):
def fit(self, X, y=None):
return super(XGBClassifier, self ).fit(X, y,
verbose=False,
early_stopping_rounds=40,
eval_metric='logloss',
eval_set=[(val_X, val_y)] ) | Titanic - Machine Learning from Disaster |
22,892,834 | drop_col = ['emp_title4']
X_train3 = X_train3.drop(columns=drop_col)
X_test3 = X_test3.drop(columns=drop_col )<train_model> | randomForest = RandomForestClassifier(random_state=1, n_estimators=20, max_features='auto',
criterion='gini', max_depth=4, min_samples_split=2,
min_samples_leaf=3)
xgbClassifier = MyXGBClassifier(seed=1, tree_method='gpu_hist', predictor='gpu_predictor',
use_label_encoder=False, learning_rate=0.4, gamma=0.4,
max_depth=4, reg_lambda=0, reg_alpha=0.1)
lgbmClassifier = LGBMClassifier(random_state=1, device='gpu', boosting_type='dart',
num_leaves=8, learning_rate=0.1, n_estimators=100,
reg_alpha=1, reg_lambda=1)
classifiers = [
('randomForest', randomForest),
('xgbClassifier', xgbClassifier),
('lgbmClassifier', lgbmClassifier)
] | Titanic - Machine Learning from Disaster |
22,892,834 | clf = LGBMClassifier(boosting_type = 'gbdt',class_weight='balanced')
clf.fit(X_train1, y_train, eval_metric='auc')
y_pred1 = clf.predict_proba(X_test1)[:,1]
clf = LGBMClassifier(boosting_type = 'gbdt',class_weight='balanced')
clf.fit(X_train2, y_train, eval_metric='auc')
y_pred2 = clf.predict_proba(X_test2)[:,1]
clf = LGBMClassifier(boosting_type = 'gbdt',class_weight='balanced')
clf.fit(X_train3, y_train, eval_metric='auc')
y_pred3 = clf.predict_proba(X_test3)[:,1]
y_pred =(y_pred1+y_pred2+y_pred3)/3<save_to_csv> | hyperparameters = {
'n_jobs' : [-1],
'voting' : ['hard', 'soft'],
'weights' : [(1, 1, 1),
(2, 1, 1),(1, 2, 1),(1, 1, 2),
(2, 2, 1),(1, 2, 2),(2, 1, 2),
(3, 2, 1),(1, 3, 2),(2, 1, 3),(3, 1, 2)]
}
estimator = VotingClassifier(estimators=classifiers)
best_model_voting = get_best_model(estimator, hyperparameters ) | Titanic - Machine Learning from Disaster |
22,892,834 | submission = pd.read_csv('/kaggle/input/homework-for-students3/sample_submission.csv', index_col=0)
submission.loan_condition = y_pred
submission.to_csv('submission.csv' )<import_modules> | evaluate_model(best_model_voting.best_estimator_, 'voting' ) | Titanic - Machine Learning from Disaster |
22,892,834 | import numpy as np
import pandas as pd
import seaborn as sb
import matplotlib.pyplot as plt
import sklearn as sk
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.pipeline import Pipeline
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.model_selection import cross_validate<load_from_csv> | for model in best_models:
predictions = best_models[model].predict(test_X)
output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions})
output.to_csv('submission_' + model + '.csv', index=False ) | Titanic - Machine Learning from Disaster |
22,893,677 | o_train = pd.read_csv(".. /input/train.csv")
o_valid = pd.read_csv(".. /input/valid.csv")
train = pd.read_csv(".. /input/train.csv")
valid = pd.read_csv(".. /input/valid.csv")
data = pd.concat([train, valid], sort=False)
example_sub = pd.read_csv(".. /input/sample_submission.csv" )<filter> | train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv' ) | Titanic - Machine Learning from Disaster |
22,893,677 | train[train.ID == 5022].article_link<filter> | features = ['Pclass','Sex','SibSp','Parch','Fare','Age']
x = pd.get_dummies(train_data[features])
x_test = pd.get_dummies(test_data[features])
y = train_data["Survived"]
| Titanic - Machine Learning from Disaster |
22,893,677 | sarcastic = len(train[train.is_sarcastic == 1])
non_sarcastic = len(train[train.is_sarcastic == 0])
sarcastic /(non_sarcastic + sarcastic )<filter> | x['Fare'].fillna(x['Fare'].mode() [0], inplace=True)
x_test['Fare'].fillna(x_test['Fare'].mode() [0], inplace=True)
x['Age'].fillna(x['Age'].mode() [0], inplace=True)
x_test['Age'].fillna(x_test['Age'].mode() [0], inplace=True)
| Titanic - Machine Learning from Disaster |
22,893,677 | print(np.where(pd.isnull(train)))
print(np.where(pd.isna(train)))
np.where(train.applymap(lambda x: x == ''))<choose_model_class> | param_grid = {'alpha': sp_rand() }
model = Ridge()
rsearch = RandomizedSearchCV(estimator=model, param_distributions=param_grid, n_iter=100)
rsearch.fit(x,y)
print(rsearch ) | Titanic - Machine Learning from Disaster |
22,893,677 | sarcasm_classfication = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('classify', LinearSVC(C=1))
] )<split> | output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': y_pred})
output.to_csv('submission3.csv', index=False)
print("Your submission was successfully saved!" ) | Titanic - Machine Learning from Disaster |
22,745,657 | X_train, X_test, y_train, y_test = train_test_split(train.headline, train.is_sarcastic )<train_model> | train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
test_data = pd.read_csv("/kaggle/input/titanic/test.csv" ) | Titanic - Machine Learning from Disaster |
22,745,657 | sarcasm_classfication.fit(X_train, y_train )<compute_train_metric> | temp = np.where(np.isnan(train_data.Age))
len(temp[0] ) | Titanic - Machine Learning from Disaster |
22,745,657 | print(roc_auc_score(y_train, sarcasm_classfication.decision_function(X_train)))
print(roc_auc_score(y_test, sarcasm_classfication.decision_function(X_test)))
cross_validate(sarcasm_classfication, train.headline, train.is_sarcastic, cv=5, scoring='roc_auc' )<feature_engineering> | for item in temp[0]:
train_data.Age.at[item] = np.mean(train_data.Age)
temp = np.where(np.isnan(train_data.Age))
len(temp[0] ) | Titanic - Machine Learning from Disaster |
22,745,657 | def remove_punctuation(dataframe):
rgx = '('s|[!?,.:;'$])'
tmp = dataframe.copy()
tmp['headline'] = tmp['headline'].str.replace(rgx, '')
return tmp<find_best_model_class> | temp = np.where(np.isnan(test_data.Fare))
for item in temp[0]:
test_data.Fare.at[item] = np.mean(test_data.Fare)
temp = np.where(np.isnan(test_data.Fare))
len(temp[0] ) | Titanic - Machine Learning from Disaster |
22,745,657 | tmp = train.copy()
tmp = remove_punctuation(tmp)
X_train, X_test, y_train, y_test = train_test_split(tmp.headline, tmp.is_sarcastic)
sarcasm_classfication.fit(X_train, y_train)
print(roc_auc_score(y_train, sarcasm_classfication.decision_function(X_train)))
print(roc_auc_score(y_test, sarcasm_classfication.decision_function(X_test)))
cross_validate(sarcasm_classfication, X_train, y_train, cv=5, scoring='roc_auc' )<feature_engineering> | temp = np.where(np.isnan(test_data.Age))
for item in temp[0]:
test_data.Age.at[item] = np.mean(train_data.Age)
temp = np.where(np.isnan(test_data.Age))
len(temp[0] ) | Titanic - Machine Learning from Disaster |
22,745,657 | def get_top_n_words(corpus, n=None):
vec = CountVectorizer().fit(corpus)
bag_of_words = vec.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx], idx)for word, idx in vec.vocabulary_.items() ]
words_freq = sorted(words_freq, key = lambda x: x[1], reverse=True)
words = [x[0] for x in words_freq]
count = [x[1] for x in words_freq]
return pd.DataFrame({'Words': words[:n], 'Amount': count[:n]} )<find_best_model_class> | y = train_data['Survived']
features = ['Pclass', 'Sex', 'SibSp', 'Parch','Fare', 'Age']
x = pd.get_dummies(train_data[features])
x_test = pd.get_dummies(test_data[features])
model = RandomForestClassifier(n_estimators = 100, max_depth = 5, random_state = 1)
model.fit(x, y)
predictions = model.predict(x_test ) | Titanic - Machine Learning from Disaster |
22,745,657 | t = train.copy()
for word in tmp.Words:
t.headline.str.replace(word, '')
X_train, X_test, y_train, y_test = train_test_split(t.headline, t.is_sarcastic)
sarcasm_classfication.fit(X_train, y_train)
print(roc_auc_score(y_train, sarcasm_classfication.decision_function(X_train)))
print(roc_auc_score(y_test, sarcasm_classfication.decision_function(X_test)))
cross_validate(sarcasm_classfication, X_train, y_train, cv=5, scoring='roc_auc' )<feature_engineering> | output = pd.DataFrame({'PassengerId' : test_data.PassengerId, 'Survived' : predictions})
output.to_csv('submission.csv', index = False)
print("Your submission was successfully saved!" ) | Titanic - Machine Learning from Disaster |
22,798,211 | def remove_double_links(series):
rgx = '(https?(?!.+https? ).+)'
tmp = series.copy()
tmp['article_link'] = tmp['article_link'].str.extract(rgx)
return tmp<feature_engineering> | df = pd.read_csv('.. /input/titanic/train.csv')
test_df = pd.read_csv('.. /input/titanic/test.csv' ) | Titanic - Machine Learning from Disaster |
22,798,211 | def get_source(series):
rgx = '(( ?!https?:)\/\/.+?\.. +?\/)'
tmp = series.copy()
tmp['article_link'] = tmp['article_link'].str.extract(rgx)
tmp['article_link'] = tmp['article_link'].str.strip(to_strip="/w")
tmp['article_link'] = tmp['article_link'].str.strip(to_strip=r'^\.')
return tmp<create_dataframe> | df['Sex'].value_counts() | Titanic - Machine Learning from Disaster |
22,798,211 | def rename_article_link(dataframe):
return dataframe.rename(columns={'article_link': 'source'} )<feature_engineering> | complete_df = pd.concat([df, test_df] ) | Titanic - Machine Learning from Disaster |
22,798,211 | train['headline_and_source'] = train.source + ' ' + train.headline<split> | complete_df.isnull().sum() | Titanic - Machine Learning from Disaster |
22,798,211 | X_train, X_test, y_train, y_test = train_test_split(train['headline_and_source'], train.is_sarcastic)
sarcasm_classfication.fit(X_train, y_train )<compute_train_metric> | complete_df[complete_df['Embarked'].isnull() ] | Titanic - Machine Learning from Disaster |
22,798,211 | print(roc_auc_score(y_train, sarcasm_classfication.decision_function(X_train)))
print(roc_auc_score(y_test, sarcasm_classfication.decision_function(X_test)))
cross_validate(sarcasm_classfication, train.headline_and_source, train.is_sarcastic, cv=25, scoring='roc_auc', return_train_score=True )<train_model> | complete_df['Embarked'] = complete_df['Embarked'].fillna('C' ) | Titanic - Machine Learning from Disaster |
22,798,211 | tmp = train.copy()
X_tmp_train, X_tmp_test, y_tmp_train, y_tmp_test = train_test_split(tmp.source, tmp.is_sarcastic)
tmp_model = sarcasm_classfication
tmp_model.fit(X_tmp_train, y_tmp_train)
print(roc_auc_score(y_tmp_train, tmp_model.decision_function(X_tmp_train)))
print(roc_auc_score(y_tmp_test, tmp_model.decision_function(X_tmp_test)))
cross_validate(sarcasm_classfication, X_tmp_train, y_tmp_train, cv=5, scoring='roc_auc', return_train_score=True )<find_best_model_class> | complete_df[complete_df['Fare'].isnull() ] | Titanic - Machine Learning from Disaster |
22,798,211 | tmp = train.copy()
tmp = tmp.drop(columns=['ID', 'headline', 'headline_and_source'])
tmp = pd.get_dummies(data=tmp, columns=['source'])
X_tmp_train, X_tmp_test, y_tmp_train, y_tmp_test = train_test_split(tmp.drop(columns=['is_sarcastic']), tmp.is_sarcastic)
tmp_model = RandomForestClassifier(n_jobs=-1, n_estimators=100)
tmp_model.fit(X_tmp_train, y_tmp_train)
prob = tmp_model.predict_proba(X_tmp_train)
prob = prob[:, 1]
print(roc_auc_score(y_tmp_train, prob))
prob = tmp_model.predict_proba(X_tmp_test)
prob = prob[:, 1]
print(roc_auc_score(y_tmp_test, prob))
cross_validate(tmp_model, X_tmp_train, y_tmp_train, cv=5, scoring='roc_auc', return_train_score=True )<drop_column> | complete_df['Fare'] = complete_df.groupby('Pclass')['Fare'].transform(lambda val: val.fillna(val.median())) | Titanic - Machine Learning from Disaster |
22,798,211 | valid = remove_double_links(valid)
valid = get_source(valid)
valid = valid.rename(columns={'article_link': 'source'} )<save_to_csv> | complete_df.loc[complete_df['Sex']=='female','Age'] = complete_df[complete_df['Sex']=='female']['Age'].transform(lambda val: val.fillna(val.median()))
complete_df.loc[complete_df['Sex']=='male', 'Age'] = complete_df[ complete_df['Sex']=='male' ]['Age'].transform(lambda val: val.fillna(val.median())) | Titanic - Machine Learning from Disaster |
22,798,211 | predicted = sarcasm_classfication.predict(valid.source)
prediction_dataframe = pd.DataFrame({'ID': valid.ID, 'is_sarcastic': predicted})
prediction_dataframe.to_csv('output.csv', index=False )<import_modules> | complete_df.isnull().sum() | Titanic - Machine Learning from Disaster |
22,798,211 | from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, roc_auc_score
from sklearn.tree import DecisionTreeClassifier<load_from_csv> | X = complete_df[:891].drop(['PassengerId', 'Survived', 'Name', 'Ticket', 'Cabin'] ,axis=1)
X | Titanic - Machine Learning from Disaster |
22,798,211 | X_train = pd.read_csv(".. /input/nctu-bdalab-2020-onboard/data_train.csv", index_col="index")
Y_train = pd.read_csv(".. /input/nctu-bdalab-2020-onboard/answer_train.csv", index_col="index")
X_train, X_test, Y_train, Y_test = train_test_split(X_train, Y_train, test_size=0.2 )<train_model> | X = pd.get_dummies(X)
X | Titanic - Machine Learning from Disaster |
22,798,211 | model = DecisionTreeClassifier()
model.fit(X_train, Y_train )<compute_train_metric> | y = complete_df[:891]['Survived'] | Titanic - Machine Learning from Disaster |
22,798,211 | train_pred = model.predict(X_train)
print(classification_report(Y_train, train_pred))
print(roc_auc_score(Y_train, train_pred))<predict_on_test> | from sklearn.model_selection import train_test_split | Titanic - Machine Learning from Disaster |
22,798,211 | test_pred = model.predict(X_test)
print(classification_report(Y_test, test_pred))
print(roc_auc_score(Y_test, test_pred))<save_to_csv> | from sklearn.model_selection import train_test_split | Titanic - Machine Learning from Disaster |
22,798,211 | X_submit = pd.read_csv(".. /input/nctu-bdalab-2020-onboard/data_test.csv", index_col="index")
pred = model.predict(X_submit)
pred_df = pd.DataFrame(data={'default.payment.next.month': pred} ).reset_index()
pred_df.to_csv("./pred.csv", index=False )<define_variables> | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42 ) | Titanic - Machine Learning from Disaster |
22,798,211 | nrows = None<load_from_csv> | from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV | Titanic - Machine Learning from Disaster |
22,798,211 | %%time
train = pd.read_csv(".. /input/ykc-cup-2nd/train.csv", nrows = nrows)
test = pd.read_csv(".. /input/ykc-cup-2nd/test.csv", nrows = nrows)
sub = pd.read_csv(".. /input/ykc-cup-2nd/sample_submission.csv" )<concatenate> | param_grid = {'max_depth':[4,5,6,7,8,9,10]} | Titanic - Machine Learning from Disaster |
22,798,211 | df = pd.concat([train, test])
df = df.reset_index(drop = True)
df.shape<feature_engineering> | forest = RandomForestClassifier(random_state=42)
grid = GridSearchCV(forest, param_grid, cv=10)
grid.fit(X_train, y_train ) | Titanic - Machine Learning from Disaster |
22,798,211 | def clear(x, punct, rep = ""):
for p in punct:
x = x.replace(p, rep)
return x
punct = ["ñ","\",<categorify> | grid.best_params_ | Titanic - Machine Learning from Disaster |
22,798,211 | unuse_words = defaultdict(int)
def get_vec(x):
vs = []
for xx in x:
if len(vs)>= n_length:
break
try:
vs.append(model.wv[xx])
except:
flg = False
for i in range(1, len(xx)) :
try:
v1 = model.wv[xx[:i]]
v2 = model.wv[xx[i:]]
vs.append(v1)
vs.append(v2)
flg = True
break
except:
pass
if flg == False:
for i in range(1, len(xx)) :
try:
v1 = model.wv[xx[:i]]
vs.append(v1)
break
except:
pass
if len(vs)>= n_length:
break
for i in range(1, len(xx)) :
try:
v2 = model.wv[xx[i:]]
vs.append(v2)
break
except:
pass
unuse_words[xx] += 1
if len(vs)< n_length:
for i in range(n_length - len(vs)) :
vs.append(np.zeros(model.vector_size))
if len(vs)> n_length:
vs = vs[:n_length]
vs = np.array(vs)
if len(vs)== 0:
vs = np.zeros([1, model.vector_size])
return vs<load_pretrained> | from sklearn.metrics import accuracy_score, classification_report | Titanic - Machine Learning from Disaster |
22,798,211 | %%time
model = pd.read_pickle(".. /input/ykc-cup-2nd-save-fasttext/fasttext_gensim_model.pkl" )<categorify> | test_predictions = grid.predict(X_test)
print(accuracy_score(y_test, test_predictions)) | Titanic - Machine Learning from Disaster |
22,798,211 | def to_vec(x):
vs = get_vec(x)
return vs
n_length = 15
vecs = df["product_name"].apply(lambda x : to_vec(x))
vecs = np.stack(vecs)
del model<sort_values> | print(classification_report(y_test, test_predictions)) | Titanic - Machine Learning from Disaster |
22,798,211 | sorted(unuse_words.items() , key=lambda x: x[1], reverse = True)[:100]<categorify> | X_final = complete_df[891:].drop(['PassengerId', 'Survived', 'Name', 'Ticket', 'Cabin'] ,axis=1)
X_final = pd.get_dummies(X_final ) | Titanic - Machine Learning from Disaster |
22,798,211 | additional_feats = []
additional_words = [k for k, v in unuse_words.items() if v >= 5 and len(k)> 2]
print(additional_words)
additional_word_cols = []
for w in additional_words:
c = f"is_{w}"
df[c] = df["product_name"].apply(lambda x : w in x)
additional_word_cols.append(c)
additional_feats.append(df[additional_word_cols])
additional_feats.append(pd.get_dummies(df["order_dow_mode"]))
additional_feats.append(pd.get_dummies(df["order_hour_of_day_mode"]))
additional_feats.append(StandardScaler().fit_transform(df[["order_rate"]]))
additional_feats = np.hstack(additional_feats)
additional_feats.shape<prepare_x_and_y> | forest = RandomForestClassifier(max_depth=6, random_state=42)
forest.fit(X,y)
final_preds = forest.predict(X_final ) | Titanic - Machine Learning from Disaster |
22,798,211 | target = "department_id"
train_wv = vecs[~df[target].isna() ]
test_wv = vecs[df[target].isna() ]
train_x = additional_feats[~df[target].isna() ]
test_x = additional_feats[df[target].isna() ]
y_train = df[~df[target].isna() ][target].values
y_train_ohe = to_categorical(y_train, num_classes=None)
del vecs, additional_feats, df
train_wv.shape, test_wv.shape, train_x.shape, test_x.shape, y_train.shape, y_train_ohe.shape<import_modules> | submission = pd.read_csv('.. /input/titanic/gender_submission.csv')
submission | Titanic - Machine Learning from Disaster |
22,798,211 | from tensorflow import keras
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.layers import *
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.optimizers import Adam, SGD, Nadam
from tensorflow.keras import backend as K
<predict_on_test> | submission['Survived'] = final_preds
submission | Titanic - Machine Learning from Disaster |
22,798,211 | class F1(keras.callbacks.Callback):
def __init__(self, model, inputs, targets, epoch_max):
self.model = model
self.inputs = inputs
self.targets = targets
self.epoch = 0
self.epoch_max = epoch_max
self.eval_freq = 5
def on_epoch_end(self, epoch, logs):
if(self.epoch % self.eval_freq == 0)or(self.epoch_max - self.epoch < 3):
p = self.model.predict(self.inputs)
score = get_score(self.targets, p)
print(f"{self.epoch}, : ")
print(score)
self.epoch += 1
def get_score(t, p):
score = {
"logloss" : log_loss(t, p),
"f1_micro" : f1_score(t, np.argmax(p, axis = 1), average = "micro")}
return score<compute_test_metric> | submission['Survived'] = submission['Survived'].astype(int)
submission | Titanic - Machine Learning from Disaster |
22,798,211 | def get_model(param):
inp_wv = Input(( n_length, 300))
inp = Input(( train_x.shape[1],))
mask = Lambda(lambda x : 1 /(1 + K.exp(-100 * K.std(x, axis = 2, keepdims = True))))(inp_wv)
def calc_mean(x, mask, axis = 1, keepdims = False):
return K.sum(x * mask, axis = axis, keepdims = keepdims)/ K.sum(mask, axis = axis, keepdims = keepdims)
aggs = []
mean = Lambda(lambda x, mask : calc_mean(x, mask, axis = 1))(inp_wv, mask)
maximum = Lambda(lambda x : K.max(x, axis = 1))(inp_wv)
aggs.append(BatchNormalization()(mean))
aggs.append(BatchNormalization()(maximum))
h = inp_wv
for f in param["n_units"]:
h = keras.layers.Dense(f, "relu" )(h)
h = Dropout(param["p_drop"] )(h)
mean = Lambda(lambda x, mask : calc_mean(x, mask, axis = 1))(h, mask)
maximum = Lambda(lambda x : K.max(x, axis = 1))(h)
aggs.append(BatchNormalization()(mean))
aggs.append(BatchNormalization()(maximum))
h = Concatenate()(aggs)
h = Dropout(param["p_drop_fc"] )(h)
h = Concatenate()([h, inp])
for f in param["n_units_fc"]:
h = Dense(f, "relu" )(h)
h = Dropout(param["p_drop_fc"] )(h)
out = Dense(21, "softmax" )(h)
model = keras.models.Model(inputs = [inp_wv, inp], outputs = out)
return model
<train_model> | submission.to_csv('submission.csv', index=False ) | Titanic - Machine Learning from Disaster |
22,798,211 | def trainNN(param, x_tr, y_tr, x_va, y_va, y_va_label, verbose = 0):
param_base = {
"lr" : 1e-3,
"lr_min" : 1e-5,
"lr_reduce_factor" : 0.5,
"lr_reduce_patience" : 5,
"epochs" : 100,
}
param.update(param_base)
param["n_units"] = [int(param["n_unit"] *(param["n_unit_scale"] ** k)) for k in range(param["n_layer"])]
param["n_units_fc"] = [int(param["n_unit_fc"] *(param["n_unit_fc_scale"] ** k)) for k in range(param["n_layer_fc"])]
param["n_units_fc"] = [n for n in param["n_units_fc"] if n >=64]
print(param)
model = get_model(param)
model.compile(loss=keras.losses.CategoricalCrossentropy() , optimizer=keras.optimizers.Adam(learning_rate=param["lr"], decay = param["decay"]))
cb_schedule = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=param["lr_reduce_factor"], patience=param["lr_reduce_patience"],
verbose=1, mode='auto', min_delta=0.0001, cooldown=0, min_lr=param["lr_min"])
cb_earlystop = keras.callbacks.EarlyStopping(monitor='val_loss',patience=10, restore_best_weights=True)
cb_save = keras.callbacks.ModelCheckpoint(f'model.hdf5', save_best_only=True, monitor='val_loss', mode='min')
callbacks = [cb_schedule, cb_earlystop, cb_save, F1(model, x_va, y_va_label, param["epochs"])]
model.fit(x_tr, y_tr,
batch_size = param["batch_size"],
epochs=param["epochs"],
callbacks = callbacks,
validation_data=(x_va, y_va), verbose = verbose)
loss_history = model.history.history["val_loss"]
return model<split> | submission.to_csv('submission.csv', index=False ) | Titanic - Machine Learning from Disaster |
22,487,239 | def run_cv(param, n_split = 3, verbose = 1):
preds_test = []
scores = []
oof = np.zeros([len(train), 21])
kfold = StratifiedKFold(n_splits=n_split, shuffle = True, random_state=42)
for i_fold,(train_idx, valid_idx)in enumerate(kfold.split(train, y_train)) :
print(f"--------fold {i_fold}-------")
x_tr_wv = train_wv[train_idx]
x_tr = train_x[train_idx]
y_tr = y_train_ohe[train_idx]
x_va_wv = train_wv[valid_idx]
x_va = train_x[valid_idx]
y_va = y_train_ohe[valid_idx]
y_va_label = y_train[valid_idx]
model = trainNN(param, [x_tr_wv, x_tr], y_tr, [x_va_wv, x_va], y_va, y_va_label, verbose = verbose)
pred_val = model.predict([x_va_wv, x_va])
oof[valid_idx] = pred_val
score = get_score(y_va_label, pred_val)
print(score)
scores.append(score)
pred_test = model.predict([test_wv, test_x])
preds_test.append(pred_test)
del model
score_df = pd.DataFrame(scores)
return score_df, oof, preds_test<categorify> | train = pd.read_csv(".. /input/titanic/train.csv")
test = pd.read_csv(".. /input/titanic/test.csv")
submission_df = pd.read_csv('/kaggle/input/titanic/gender_submission.csv' ) | Titanic - Machine Learning from Disaster |
22,487,239 |
<compute_train_metric> | df = pd.concat([train, test], axis=0)
df = df.set_index('PassengerId')
df.info() | Titanic - Machine Learning from Disaster |
22,487,239 | params = {'decay': 0.0002561250212980425, 'batch_size': 276, 'n_unit': 2048, 'n_layer': 1, 'n_unit_scale': 0.6165090828302822, 'n_unit_fc': 225, 'n_layer_fc': 1, 'n_unit_fc_scale': 0.5835791877627263, 'p_drop': 0.57113050869184264, 'p_drop_fc': 0.3911374259982809}
score_df, oof, preds_test = run_cv(params, n_split = 10, verbose = False )<prepare_output> | df = df.drop(['Name', 'Ticket', 'Cabin'], axis=1)
| Titanic - Machine Learning from Disaster |
22,487,239 | pred_test_mean = np.array(preds_test ).mean(axis = 0)
pred_test_final = np.argmax(pred_test_mean, axis = 1 )<save_to_csv> | df = df.drop('Fare', axis=1 ) | Titanic - Machine Learning from Disaster |
22,487,239 | sub["department_id"] = pred_test_final
sub.to_csv("submission.csv", index = False)
sub.head()<categorify> | def fillna_using_knn_imputer(df):
imputer = KNNImputer(n_neighbors=5,
weights='uniform',
metric='nan_euclidean')
df['Sex'] = df['Sex'].factorize() [0]
df['Embarked'] = df['Embarked'].factorize() [0]
df['Kind'] = df['Kind'].factorize() [0]
X_train = df[~df['Survived'].isnull() ].drop('Survived', axis=1)
X_test = df[df['Survived'].isnull() ].drop('Survived', axis=1)
X_train_trans = pd.DataFrame(imputer.fit_transform(X_train), columns=X_train.columns, index=X_train.index)
X_test_trans = pd.DataFrame(imputer.transform(X_test), columns=X_test.columns, index=X_test.index)
dff = pd.concat([X_train_trans, X_test_trans], axis=0)
dff['Survived'] = df['Survived']
dff = dff.sort_index()
return dff
df = fillna_using_knn_imputer(df)
df.info() | Titanic - Machine Learning from Disaster |
22,487,239 | pd.to_pickle(
{"train" : train, "test" : test, "oof" : oof, "pred_test" : pred_test_mean, "sub" : sub}, "data.pkl" )<load_from_csv> | def encode_cols(df, cols=['Pclass', 'Embarked']):
for col in cols:
dumm = pd.get_dummies(data=df[col], prefix=col)
df = pd.concat([df, dumm], axis=1)
df = df.drop(col, axis=1)
return df
def scale_all_features(df):
X = df.drop('Survived', axis=1)
scaler = MinMaxScaler()
X = pd.DataFrame(scaler.fit_transform(X), columns=X.columns, index=X.index)
df = pd.concat([X, df['Survived']], axis=1)
return df, scaler
df = encode_cols(df)
df, _ = scale_all_features(df)
df.info() | Titanic - Machine Learning from Disaster |
22,487,239 | train = pd.read_csv(".. /input/ykc-cup-2nd/train.csv")
test = pd.read_csv(".. /input/ykc-cup-2nd/test.csv")
sub = pd.read_csv(".. /input/ykc-cup-2nd/sample_submission.csv")
train.shape, test.shape, sub.shape<concatenate> | def split_to_train_test_X_y(df):
X = df[~df['Survived'].isna() ].drop('Survived', axis=1)
y = df[~df['Survived'].isna() ]['Survived']
X_test = df[df['Survived'].isna() ].drop('Survived', axis=1)
return X, y, X_test
X, y, X_test = split_to_train_test_X_y(df ) | Titanic - Machine Learning from Disaster |
22,487,239 | df = pd.concat([train, test])
df = df.reset_index(drop=True)
df.shape<choose_model_class> | class ML_Classifier_Switcher(object):
def pick_model(self, model_name):
self.param_grid = None
method_name = str(model_name)
method = getattr(self, method_name, lambda: "Invalid ML Model")
return method()
def SVM(self):
self.param_grid = {'kernel': ['rbf', 'sigmoid', 'linear'],
'C': np.logspace(-2, 2, 10),
'gamma': np.logspace(-5, 1, 14)}
return SVC()
def XGR(self):
self.param_grid = {'gamma': np.logspace(-5, 1, 7),
'subsample': [0.5, 0.75, 1.0],
'colsample_bytree': [0.5, 0.75, 1.0],
'eta': [0.1, 0.5, 0.9],
'max_depth': [3, 5]}
return XGBClassifier(random_state=42, nthread=7, use_label_encoder=False,
eval_metric='error', tree_method = "hist")
def RF(self):
self.param_grid = {
'n_estimators': [50, 100, 200, 300],
'max_features': ['auto'],
'criterion': ['entropy'],
'max_depth': [5, 10],
'min_samples_split': [5],
'min_samples_leaf': [1]
}
return RandomForestClassifier(random_state=42, n_jobs=-1)
def LR(self):
self.param_grid = {'solver': ['newton-cg', 'lbfgs', 'liblinear'],
'penalty': ['l2'],
'C': [100, 10, 1.0, 0.1, 0.01]}
return LogisticRegression(n_jobs=None, random_state=42)
def KNN(self):
self.param_grid = {
'n_neighbors': list(range(1, 5)) ,
'weights': ['uniform', 'distance'],
'algorithm': ['auto', 'ball_tree', 'kd_tree', 'brute'],
'leaf_size': list(range(1, 10)) ,
'p': [1, 2]
}
return KNeighborsClassifier() | Titanic - Machine Learning from Disaster |
22,487,239 | target = "department_id"
n_split = 5
kfold = StratifiedKFold(n_splits=n_split, shuffle = True, random_state=42 )<feature_engineering> | def cross_validate(X, y, model_name='RF', cv=5, scoring='accuracy',
gridsearch=True):
switcher = ML_Classifier_Switcher()
model = switcher.pick_model(model_name)
if gridsearch:
gr = GridSearchCV(model, switcher.param_grid,
scoring=scoring, cv=cv, n_jobs=-1)
gr.fit(X, y)
model = gr.best_estimator_
cvr = cross_val_score(model, X, y, cv=cv, scoring=scoring)
cvr = pd.DataFrame(cvr ).T
cvr.index = [model_name]
cvr.columns = ['fold_{}'.format(x+1)for x in cvr.columns]
cvr['mean'] = cvr.mean(axis=1)
cvr['std'] = cvr.std(axis=1)
return cvr, model | Titanic - Machine Learning from Disaster |
22,487,239 | df["product_name"] = df["product_name"].apply(lambda words : words.lower().replace(",", "" ).replace("&", "" ).split(" "))
df.head()<define_variables> | def cross_validate_models(X, y, cv=5, scoring='accuracy'):
models = ['LR', 'KNN', 'SVM', 'RF']
cvrs = []
bests = []
for model_name in models:
print('Optimizing {} model'.format(model_name))
cvr, best = cross_validate(X, y, model_name=model_name, cv=cv, scoring=scoring,
gridsearch=True)
cvrs.append(cvr)
bests.append(best)
cvr = pd.concat(cvrs, axis=0)
return cvr, bests | Titanic - Machine Learning from Disaster |
22,487,239 | model_names = ["crawl-300d-2M", "crawl-300d-2M-subword", "wiki-news-300d-1M", "wiki-news-300d-1M-subword"]<load_pretrained> | cvr, bests = cross_validate_models(X,y)
print(cvr ) | Titanic - Machine Learning from Disaster |
22,487,239 | fasttext_pretrain_cols = []
unused_words = defaultdict(int)
for model_name in model_names:
model = pd.read_pickle(f".. /input/fasttext/{model_name}.pkl")
def to_mean_vec(x, model):
v = np.zeros(model.vector_size)
for w in x:
try:
v += model[w]
except:
unused_words[w] += 1
v = v /(np.sqrt(np.sum(v ** 2)) + 1e-16)
return v
def to_max_vec(x, model):
v = np.zeros(model.vector_size)
for w in x:
try:
v = np.maximum(v, model[w])
except:
pass
return v
mean_vecs = df["product_name"].apply(lambda x : to_mean_vec(x, model))
mean_vecs = np.vstack(mean_vecs)
cols = [f"fasttext_pretrain_{model_name}_mean_vec{k}" for k in range(mean_vecs.shape[1])]
fasttext_pretrain_cols += cols
mean_vec_df = pd.DataFrame(mean_vecs, columns=cols)
df = pd.concat([df, mean_vec_df], axis = 1)
max_vecs = df["product_name"].apply(lambda x : to_max_vec(x, model))
max_vecs = np.vstack(max_vecs)
cols = [f"fasttext_pretrain_{model_name}_max_vec{k}" for k in range(max_vecs.shape[1])]
fasttext_pretrain_cols += cols
max_vec_df = pd.DataFrame(max_vecs, columns=cols)
df = pd.concat([df, max_vec_df], axis = 1)
df.head()<split> | def predict_on_test(X_test, model, submission_df, target='Survived'):
preds_test = model.predict(X_test)
submission_df.loc[:, target] = [int(x)for x in preds_test]
submission_df.to_csv('submission.csv', index=False)
return
predict_on_test(X_test, bests[-1], submission_df ) | Titanic - Machine Learning from Disaster |
22,480,454 | train = df[~df[target].isna() ]
test = df[df[target].isna() ]<feature_engineering> | train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv' ) | Titanic - Machine Learning from Disaster |
22,480,454 | def to_weighted_count_vec(x, word_sets):
v = np.zeros(21)
for w in x:
hits = []
for i in range(21):
if w in word_sets[i]:
hits.append(i)
for i in hits:
v[i] += 1.0 / len(hits)
return v
weighted_count_cols = [f"weighted_count_vec{k}" for k in range(21)]<create_dataframe> | women = train_data.loc[train_data.Sex == 'female']['Survived']
print('Women survived',sum(women)/len(women))
men = train_data.loc[train_data.Sex == 'male']['Survived']
print('Men survived',sum(men)/len(men)) | Titanic - Machine Learning from Disaster |
22,480,454 | train_weighted_count = pd.DataFrame(index=train.index, columns=weighted_count_cols, dtype=np.float32)
for i_fold,(train_idx, valid_idx)in enumerate(kfold.split(train, train[target])) :
tr = train.loc[train_idx]
va = train.loc[valid_idx]
word_sets = [set(sum(tr[tr["department_id"] == i]["product_name"], [])) for i in range(21)]
vecs = va["product_name"].apply(lambda x : to_weighted_count_vec(x, word_sets))
vecs = np.vstack(vecs)
vec_df = pd.DataFrame(vecs, index=va.index, columns=weighted_count_cols)
train_weighted_count.loc[valid_idx, :] = vec_df
train = pd.concat([train, train_weighted_count], axis=1 )<create_dataframe> | train_data['female'] = pd.get_dummies(train_data['Sex'])['female']
test_data['female'] = pd.get_dummies(test_data['Sex'])['female'] | Titanic - Machine Learning from Disaster |
22,480,454 | test_weighted_count = pd.DataFrame(index=test.index, columns=weighted_count_cols, dtype=np.float32)
word_sets = [set(sum(train[train["department_id"] == i]["product_name"], [])) for i in range(21)]
vecs = test["product_name"].apply(lambda x : to_weighted_count_vec(x, word_sets))
vecs = np.vstack(vecs)
test_weighted_count.loc[:, :] = pd.DataFrame(vecs, index=test.index, columns=weighted_count_cols)
test = pd.concat([test, test_weighted_count], axis=1 )<define_variables> | sum(train_data['Age'].isnull())
train_data['Age'] = train_data['Age'].fillna(train_data['Age'].mean())
test_data['Age'] = test_data['Age'].fillna(test_data['Age'].mean() ) | Titanic - Machine Learning from Disaster |
22,480,454 | features = fasttext_pretrain_cols + weighted_count_cols + ["order_rate", "order_dow_mode", "order_hour_of_day_mode"]<normalization> | high_fare = train_data.loc[train_data.Fare > 100]['Survived']
print('High fare survivors',sum(high_fare)/len(high_fare))
low_fare = train_data.loc[train_data.Fare < 32]['Survived']
print('High fare survivors',sum(low_fare)/len(low_fare)) | Titanic - Machine Learning from Disaster |
22,480,454 | scaler = preprocessing.StandardScaler()
train[features] = scaler.fit_transform(train[features])
test[features] = scaler.transform(test[features] )<set_options> | pclass1 = train_data.loc[train_data.Pclass == 1]['Survived']
print('Class1',sum(pclass1)/len(pclass1))
pclass2 = train_data.loc[train_data.Pclass == 2]['Survived']
print('Class2',sum(pclass2)/len(pclass2))
pclass3 = train_data.loc[train_data.Pclass == 3]['Survived']
print('Class3',sum(pclass3)/len(pclass3)) | Titanic - Machine Learning from Disaster |
22,480,454 | def seed_everything(seed : int)-> NoReturn :
random.seed(seed)
np.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
tf.random.set_seed(seed)
seed_everything(1220)
params = {
'hidden_layers': 1,
'hidden_units': 128,
'hidden_activation': 'relu',
'lr': 1e-4,
'batch_size': 128,
'epochs': 100
}
def nn_model(L : int):
n_neuron = params['hidden_units']
inputs = layers.Input(shape=(L,), dtype='float32')
x = layers.Dense(n_neuron, activation=params['hidden_activation'] )(inputs)
for i in np.arange(params['hidden_layers'] - 1):
x = layers.Dense(n_neuron //(2 *(i+1)) , activation=params['hidden_activation'] )(x)
out = layers.Dense(21, activation='softmax', name = 'out' )(x)
model = models.Model(inputs=inputs, outputs=out)
model.compile(loss="categorical_crossentropy", optimizer=optimizers.Adam(lr=params['lr']))
return model
history = model.fit(train_set['X'], train_set['y'], callbacks=[er, ReduceLR],
epochs=params['epochs'], batch_size=params['batch_size'],
validation_data=(val_set['X'], val_set['y']))
return model, fi
<prepare_x_and_y> | sum(test_data.Pclass.isna() ) | Titanic - Machine Learning from Disaster |
22,480,454 | preds_test = []
scores = []
oof = np.zeros(( len(train), 21))
for i_fold,(train_idx, valid_idx)in enumerate(kfold.split(train, train[target])) :
print(f"--------fold {i_fold}-------")
x_tr = train.loc[train_idx, features]
y_tr = train.loc[train_idx, target]
x_va = train.loc[valid_idx, features]
y_va = train.loc[valid_idx, target]
ohe = preprocessing.OneHotEncoder(sparse=False, categories='auto')
y_tr_ohe = ohe.fit_transform(y_tr.values.reshape(-1, 1))
y_va_ohe = ohe.transform(y_va.values.reshape(-1, 1))
nn = nn_model(len(features))
er = callbacks.EarlyStopping(patience=8, min_delta=params['lr'], restore_best_weights=True, monitor='val_loss')
ReduceLR = callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1, epsilon=params['lr'], mode='min')
checkpoint_filepath = ''
model_checkpoint_callback = callbacks.ModelCheckpoint(filepath=f'mybestweight_fold{i_fold}.hdf5', save_weights_only=True, monitor='val_loss', save_best_only=True)
nn.fit(x_tr.values, y_tr_ohe, callbacks=[er, ReduceLR, model_checkpoint_callback], epochs=params['epochs'], batch_size=params['batch_size'],
validation_data=(x_va.values, y_va_ohe))
def predict_proba(x):
return nn.predict(x)
pred_val = predict_proba(x_va)
oof[valid_idx] += pred_val
score = {
"logloss" : log_loss(y_va, pred_val),
"f1_micro" : f1_score(y_va, np.argmax(pred_val, axis = 1), average = "micro")}
print(score)
scores.append(score)
pred_test = predict_proba(test[features])
preds_test.append(pred_test )<create_dataframe> | train_data['class1'] = pd.get_dummies(train_data.Pclass)[1]
test_data['class1'] = pd.get_dummies(test_data.Pclass)[1]
train_data['class2'] = pd.get_dummies(train_data.Pclass)[2]
test_data['class2'] = pd.get_dummies(test_data.Pclass)[2] | Titanic - Machine Learning from Disaster |
22,480,454 | score_df = pd.DataFrame(scores)
score_df<save_to_csv> | sum(test_data.SibSp.isna() ) | Titanic - Machine Learning from Disaster |
22,480,454 | oof_df = pd.DataFrame(oof)
oof_df.to_csv("oof_nn.csv", index = False )<save_to_csv> | sibs = train_data.loc[train_data.SibSp <= 1]['Survived']
print(sum(sibs)/len(sibs))
train_data['many_sibs'] =(train_data.SibSp > 1)*1
test_data['many_sibs'] =(test_data.SibSp > 1)*1 | Titanic - Machine Learning from Disaster |
22,480,454 | for i in range(len(preds_test)) :
pred_df = pd.DataFrame(preds_test[i])
pred_df.to_csv(f"pred_{i}_nn.csv", index = False )<prepare_output> | young = train_data.loc[train_data.Age <= 15]['Survived']
print(sum(young)/len(young))
old = train_data.loc[train_data.Age >=40]['Survived']
print(sum(old)/len(old)) | Titanic - Machine Learning from Disaster |
22,480,454 | pred_test_final = np.array(preds_test ).mean(axis = 0)
pred_test_final = np.argmax(pred_test_final, axis = 1 )<save_to_csv> | bins = [0.42, 15, 30, 50,80]
train_data['bin_age'] = pd.cut(x=train_data.Age, bins=bins)
test_data['bin_age'] = pd.cut(x=test_data.Age, bins=bins ) | Titanic - Machine Learning from Disaster |
22,480,454 | sub["department_id"] = pred_test_final
sub.to_csv("submission_nn.csv", index = False)
sub.head()<set_options> | train_data['young'] = pd.get_dummies(train_data.bin_age ).iloc[:,0]
test_data['young'] = pd.get_dummies(test_data.bin_age ).iloc[:,0]
train_data['senior'] = pd.get_dummies(train_data.bin_age ).iloc[:,3]
test_data['senior'] = pd.get_dummies(test_data.bin_age ).iloc[:,3] | Titanic - Machine Learning from Disaster |
22,480,454 | %matplotlib inline
warnings.simplefilter(action="ignore", category=FutureWarning )<load_pretrained> | from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, confusion_matrix | Titanic - Machine Learning from Disaster |
22,480,454 | shutil.copyfile(src=".. /input/redcarpet.py", dst=".. /working/redcarpet.py")
<load_pretrained> | X = train_data[features]
y = train_data.Survived
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.33, random_state = 0 ) | Titanic - Machine Learning from Disaster |
22,480,454 | item_file = ".. /input/talent.pkl"
item_records, COLUMN_LABELS, READABLE_LABELS, ATTRIBUTES = pickle.load(open(item_file, "rb"))
item_df = pd.DataFrame(item_records)[ATTRIBUTES + COLUMN_LABELS].fillna(value=0)
ITEM_NAMES = item_df["name"].values
ITEM_IDS = item_df["id"].values
s_items = mat_to_sets(item_df[COLUMN_LABELS].values)
assert len(item_df)== len(s_items), "Item matrix is not the same length as item category set list."
print("Talent:", len(item_df))
print("Categories:", len(COLUMN_LABELS))
item_df.head()<load_pretrained> | log_reg = LogisticRegression()
log_reg.fit(X_train, y_train)
y_pred = log_reg.predict(X_test)
y_pred | Titanic - Machine Learning from Disaster |
22,480,454 | csr_train, csr_test, csr_input, csr_hidden = pickle.load(open(".. /input/train_test_mat.pkl", "rb"))
m_split = [np.array(csr.todense())for csr in [csr_train, csr_test, csr_input, csr_hidden]]
m_train, m_test, m_input, m_hidden = m_split
s_train, s_test, s_input, s_hidden = pickle.load(open(".. /input/train_test_set.pkl", "rb"))
assert len(m_train)== len(s_train), "Train matrix is not the same length as train sets."
assert len(m_test)== len(s_test), "Test matrix is not the same length as test sets."
assert len(m_input)== len(s_input), "Input matrix is not the same length as input sets."
assert len(m_hidden)== len(s_hidden), "Hidden matrix is not the same length as hidden sets."
print("Train Users", len(m_train))
print("Test Users", len(m_test))
print("Minimum Test Items per User:", min(m_test.sum(axis=1)))
print("Minimum Input Items per User:", min(m_input.sum(axis=1)))
print("Minimum Hidden Items per User:", min(m_hidden.sum(axis=1)))
like_df = pd.DataFrame(m_train, columns=ITEM_NAMES)
like_df.head()<import_modules> | accuracy_score(y_pred, y_test ) | Titanic - Machine Learning from Disaster |
22,480,454 | from redcarpet import mapk_score, uhr_score<import_modules> | confusion_matrix(y_pred, y_test ) | Titanic - Machine Learning from Disaster |
22,480,454 | from redcarpet import jaccard_sim, cosine_sim<import_modules> | model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model.fit(X_train, y_train)
y_pred = model.predict(X_test ) | Titanic - Machine Learning from Disaster |
22,480,454 | from redcarpet import collaborative_filter, content_filter, weighted_hybrid<feature_engineering> | accuracy_score(y_pred, y_test ) | Titanic - Machine Learning from Disaster |
22,480,454 | collab_jac10 = collaborative_filter(s_train, s_input, j=30, sim_fn=jaccard_sim, threshold=0.05, k=10 )<import_modules> | confusion_matrix(y_pred, y_test ) | Titanic - Machine Learning from Disaster |
22,480,454 | from redcarpet import get_recs
from redcarpet import show_user_recs, show_item_recs, show_user_detail
from redcarpet import show_apk_dist, show_hit_dist, show_score_dist<define_variables> | test_data.Fare = test_data.Fare.fillna(test_data.Fare.mean() ) | Titanic - Machine Learning from Disaster |
22,480,454 | k_top = 10<compute_train_metric> | param_grid = {
'n_estimators': [200, 500],
'max_features': ['auto', 'sqrt', 'log2'],
'max_depth' : [4,5,6,7,8],
'criterion' :['gini', 'entropy']
}
| Titanic - Machine Learning from Disaster |
22,480,454 | print("Model: Collaborative Filtering with Jacccard Similarity(j=10)")
collab_jac10 = collaborative_filter(s_train, s_input, sim_fn=jaccard_sim, j=50, threshold=0.005, k=k_top)
print("MAP = {0:.3f}".format(mapk_score(s_hidden, get_recs(collab_jac10), k=k_top)))
print("UHR = {0:.3f}".format(uhr_score(s_hidden, get_recs(collab_jac10), k=k_top)) )<compute_train_metric> | rfc1=RandomForestClassifier(random_state=42, max_features='log2', n_estimators= 200, max_depth=6, criterion='entropy')
rfc1.fit(X_train, y_train ) | Titanic - Machine Learning from Disaster |
22,480,454 | <compute_test_metric><EOS> | predictions = rfc1.predict(test_data[features])
output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions})
output.to_csv('submission.csv', index=False)
print("Your submission was successfully saved!" ) | Titanic - Machine Learning from Disaster |
22,470,622 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<compute_test_metric> | from typing import Tuple, Dict, Any
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_addons as tfa
from tensorflow.keras.metrics import Mean
import tensorflow.keras.backend as K
import tensorflow.keras.layers | Titanic - Machine Learning from Disaster |
22,470,622 | print("Model: Hybrid Collaborative Filtering")
print("Similarity: Hybrid(0.2 * Jaccard + 0.8 * Cosine)")
collab_hybrid = weighted_hybrid([
(collab_jac10, 0.5),
(collab_cos10, 0.5)
])
print("MAP = {0:.3f}".format(mapk_score(s_hidden, get_recs(collab_hybrid), k=k_top)))
print("UHR = {0:.3f}".format(uhr_score(s_hidden, get_recs(collab_hybrid), k=k_top)) )<compute_train_metric> | def extract_titanic_features(
source: pd.DataFrame,
):
features = {
name: np.array(value, np.float32)
for name, value in source[["Age", "Fare", "SibSp", "Parch", "Pclass"]].items()
}
for age in [4, 8, 12, 16, 22, 26, 30, 35, 40, 45, 50, 60]:
features['at_least_' + str(age)] = np.array(source['Age'] >= age, np.float32)
features['name_len'] = np.array(source["Name"].str.len() , np.float32)
for title in ['Miss.', 'Master', 'Mr.', 'Mrs.']:
features['is_' + title] = np.array(source['Name'].str.contains(title), np.float32)
features["Sex"] = np.array(source["Sex"], np.str)
features["Embarked"] = np.array(source["Embarked"], np.str)
return features
train_labels = titanic_train_df["Survived"]
train_features = extract_titanic_features(titanic_train_df)
test_features = extract_titanic_features(titanic_test_df)
inputs = {
k: tf.keras.Input(shape=(1,), name=k, dtype=v.dtype)
for k, v in train_features.items()
}
preprocessed_inputs = []
numeric_inputs = {k: v for k, v in inputs.items() if v.dtype == tf.float32}
def nan_mask(
layer: tf.keras.layers.Layer,
)-> Tuple[tf.keras.layers.Layer, tf.keras.layers.Layer]:
is_nan = tf.math.is_nan(layer)
finite_input = tf.where(is_nan, tf.zeros_like(layer), layer)
norm_input = tf.keras.layers.BatchNormalization()(finite_input)
mask = tf.cast(tf.math.logical_not(is_nan), tf.float32)
return mask, norm_input
finite_numeric_inputs = {}
numeric_masks = {}
for k, v in numeric_inputs.items() :
mask_v, finite_v = nan_mask(v)
preprocessed_inputs.append(mask_v)
preprocessed_inputs.append(finite_v)
finite_numeric_inputs[k] = finite_v
numeric_masks[k] = mask_v
accomp_count = tf.math.add(
finite_numeric_inputs["Parch"], finite_numeric_inputs["SibSp"]
)
preprocessed_inputs.append(accomp_count)
gender_vocab = np.unique(train_features["Sex"])
gender_lookup = tf.keras.layers.experimental.preprocessing.StringLookup(
vocabulary=gender_vocab
)
gender_onehot = tf.keras.layers.experimental.preprocessing.CategoryEncoding(
max_tokens=gender_lookup.vocab_size()
)
gender_input = gender_onehot(gender_lookup(inputs["Sex"]))
preprocessed_inputs.append(gender_input)
embarked_vocab = np.array(( "C", "Q", "S"))
embarked_lookup = tf.keras.layers.experimental.preprocessing.StringLookup(
vocabulary=embarked_vocab
)
embarked_onehot = tf.keras.layers.experimental.preprocessing.CategoryEncoding(
max_tokens=embarked_lookup.vocab_size()
)
embarked_input = embarked_onehot(embarked_lookup(inputs["Embarked"]))
preprocessed_inputs.append(embarked_input)
x = tf.keras.layers.Concatenate()(preprocessed_inputs)
preprocess_model = tf.keras.Model(inputs, x)
tf.keras.utils.plot_model(
preprocess_model,
to_file="preprocess_model.png",
rankdir="LR",
show_shapes=True,
show_layer_names=True,
) | Titanic - Machine Learning from Disaster |
22,470,622 | print("Model: Collaborative Filtering with Cosine Similarity(j=10)")
collab_cos10 = collaborative_filter(s_train, s_input, sim_fn=cosine_sim, j=10, k=k_top)
print("MAP = {0:.3f}".format(mapk_score(s_hidden, get_recs(collab_cos10), k=k_top)))
print("UHR = {0:.3f}".format(uhr_score(s_hidden, get_recs(collab_cos10), k=k_top)) )<define_variables> | head = preprocess_model(inputs)
columns = []
activation="relu"
for i in range(4):
c = head
c = tf.keras.layers.Dense(64, activation=activation )(c)
c = tf.keras.layers.Dropout(0.5 )(c)
c = tf.keras.layers.Dense(64, activation=activation )(c)
c = tf.keras.layers.BatchNormalization()(c)
c = tf.keras.layers.Dropout(0.2 )(c)
c = tf.keras.layers.Dense(32, activation=activation )(c)
columns.append(c)
if len(columns)== 1:
tail = columns[0]
else:
tail = tf.keras.layers.Add()(columns)
tail = tf.keras.layers.Dense(16, activation="relu" )(tail)
tail = tf.keras.layers.Dense(1, activation="sigmoid" )(tail)
model = tf.keras.Model(inputs, tail)
model.compile(
loss=tf.losses.BinaryCrossentropy() ,
optimizer=tfa.optimizers.Lookahead(tf.keras.optimizers.Nadam()),
metrics=["accuracy"],
)
if False:
model.summary()
tf.keras.utils.plot_model(
model,
to_file="final_model.png",
rankdir="LR",
show_shapes=True,
)
callbacks = [
tf.keras.callbacks.ProgbarLogger() ,
tf.keras.callbacks.EarlyStopping(
monitor='val_loss',
patience=10,
),
]
fit_params = dict(
batch_size=128,
validation_freq=1,
validation_split=0.1,
epochs=300,
verbose=0,
callbacks=[callbacks],
)
submit = True
if submit:
fit_params.update(dict(
validation_freq=0,
validation_split=0,
epochs=125,
verbose=0,
))
history = model.fit(
train_features,
train_labels.to_numpy() ,
**fit_params,
)
print("accuracy:", history.history['accuracy'][-1])
if not submit:
print("val_accuracy:", history.history['val_accuracy'][-1])
plt.plot(history.history['accuracy'][10:])
plt.plot(history.history['loss'][10:])
if not submit:
plt.plot(history.history['val_accuracy'][10:])
plt.plot(history.history['val_loss'][10:])
plt.xlabel('epoch')
plt.show() | Titanic - Machine Learning from Disaster |
22,470,622 | results = [
(collab_jac10, "Jaccard(j=10)"),
(collab_cos10, "Cosine(j=10)")
]<compute_test_metric> | test_predictions = model(test_features)
test_ids = titanic_test_df["PassengerId"].to_numpy()
test_hard_predictions =(
np.floor(np.array(test_predictions)+ 0.5 ).astype("int" ).reshape(-1)
)
pred_df = pd.Series(data=test_hard_predictions, name="Survived", index=test_ids)
pred_df.to_csv(
"submission.csv",
index_label="PassengerId",
header=["Survived"],
) | Titanic - Machine Learning from Disaster |
22,340,798 | show_hit_dist(s_hidden, results, k=k_top )<compute_test_metric> | train_data = pd.read_csv('.. /input/titanic/train.csv')
test_data = pd.read_csv('.. /input/titanic/test.csv')
train_data.info() | Titanic - Machine Learning from Disaster |
22,340,798 | print("Model: Hybrid Collaborative Filtering")
print("Similarity: Hybrid(0.2 * Jaccard + 0.8 * Cosine)")
collab_hybrid = weighted_hybrid([
(collab_jac10, 0.2),
(collab_cos10, 0.8)
])
print("MAP = {0:.3f}".format(mapk_score(s_hidden, get_recs(collab_hybrid), k=k_top)))
print("UHR = {0:.3f}".format(uhr_score(s_hidden, get_recs(collab_hybrid), k=k_top)) )<import_modules> | missing_train_total = train_data.isnull().sum().sort_values(ascending= False)
missing_train_percentage =(train_data.isnull().sum() /train_data.count() ).sort_values(ascending= False)
missing_train_data = pd.concat([missing_train_total, missing_train_percentage], axis=1, keys=['Total', 'Percent'])
missing_train_data.head(10 ) | Titanic - Machine Learning from Disaster |
22,340,798 | from redcarpet import write_kaggle_recs, download_kaggle_recs<load_pretrained> | missing_test_total = test_data.isnull().sum().sort_values(ascending= False)
missing_test_percentage =(test_data.isnull().sum() /test_data.count() ).sort_values(ascending= False)
missing_test_data = pd.concat([missing_test_total, missing_test_percentage], axis=1, keys=['Total', 'Percent'])
missing_test_data.head(10 ) | Titanic - Machine Learning from Disaster |
22,340,798 | s_hold_input = pickle.load(open(".. /input/hold_set.pkl", "rb"))
print("Hold Out Set: N = {}".format(len(s_hold_input)))
s_all_input = s_input + s_hold_input
print("All Input: N = {}".format(len(s_all_input)) )<choose_model_class> | train_data.groupby('Pclass')['Age'].mean() | Titanic - Machine Learning from Disaster |
22,340,798 | print("Final Model")
print("Strategy: Collaborative")
print("Similarity: Cosine(j=10)")
final_scores = collaborative_filter(s_train, s_all_input, sim_fn=cosine_sim, j=41, threshold=0.08, k=10)
final_recs = get_recs(final_scores )<save_to_csv> | train_data.groupby(['Pclass','Sex'])['Sex'].count() | Titanic - Machine Learning from Disaster |
22,340,798 | outfile = "kaggle_submission_hybrid_collab.csv"
n_lines = write_kaggle_recs(final_recs, outfile)
print("Wrote predictions for {} users to {}.".format(n_lines, outfile))
download_kaggle_recs(final_recs, outfile )<import_modules> | train_data.groupby('Sex')['Sex'].count() | Titanic - Machine Learning from Disaster |
22,340,798 | import pandas as pd
import numpy as np
import sklearn
import os
from matplotlib import pyplot as plt<define_variables> | mean = train_data.groupby(['Pclass','Sex'])['Age'].mean()
median = train_data.groupby(['Pclass','Sex'])['Age'].median()
age_sex_Pclass = pd.concat([mean, median], axis=1, keys=['Age mean', 'Age median'])
age_sex_Pclass.head(6)
| Titanic - Machine Learning from Disaster |
22,340,798 | filepath = ".. /input/adult-dataset/train_data.csv"<load_from_csv> | train_data.Embarked.value_counts() | Titanic - Machine Learning from Disaster |
22,340,798 | adult = pd.read_csv(filepath,
names=[
"Age", "Workclass", "fnlwgt", "Education", "Education-Num", "Martial Status",
"Occupation", "Relationship", "Race", "Sex", "Capital Gain", "Capital Loss",
"Hours per week", "Country", "Target"],
sep=r'\s*,\s*',
engine='python',
na_values="?" )<count_missing_values> | features = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']
X = train_data[features]
y = train_data.Survived
Pclass_Sex_Age_median = X.groupby(['Pclass','Sex'] ).Age.transform('median')
X.Age.fillna(Pclass_Sex_Age_median, inplace = True)
Pclass_Fare_median = X.groupby('Pclass' ).Fare.transform('median')
X.Fare.fillna(Pclass_Fare_median, inplace = True)
missing_test_total = X.isnull().sum().sort_values(ascending= False)
X.Embarked.fillna('S', inplace = True)
X['Embarked'].replace({'S': 0, 'Q': 1, 'C': 2}, inplace = True)
X['Sex'].replace({'female': 0, 'male': 1}, inplace = True)
X.info()
print(X.Sex ) | Titanic - Machine Learning from Disaster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.