kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
9,293,880
%load_ext autoreload %autoreload 2 %matplotlib inline<import_modules>
dtree = tree.DecisionTreeClassifier(random_state = 0) base_results = model_selection.cross_validate(dtree, data1[data1_x_bin], data1[Target], cv = cv_split) dtree.fit(data1[data1_x_bin], data1[Target]) print('BEFORE DT Parameters: ', dtree.get_params()) print("BEFORE DT Test w/bin score mean: {:.2f}".format(base_results['test_score'].mean() *100)) print("BEFORE DT Test w/bin score 3*std: +/- {:.2f}".format(base_results['test_score'].std() *100*3)) print('-'*10) param_grid = {'criterion': ['gini', 'entropy'], 'max_depth': [2,4,6,8,10,None], 'random_state': [0] } tune_model = model_selection.GridSearchCV(tree.DecisionTreeClassifier() , param_grid=param_grid, scoring = 'roc_auc', cv = cv_split) tune_model.fit(data1[data1_x_bin], data1[Target]) print('AFTER DT Parameters: ', tune_model.best_params_) print("AFTER DT Test w/bin score mean: {:.2f}".format(tune_model.cv_results_['mean_test_score'][tune_model.best_index_]*100)) print("AFTER DT Test w/bin score 3*std: +/- {:.2f}".format(tune_model.cv_results_['std_test_score'][tune_model.best_index_]*100*3)) print('-'*10 )
Titanic - Machine Learning from Disaster
9,293,880
from fastai.imports import * from fastai.structured import * from pandas_summary import DataFrameSummary from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier from IPython.display import display from sklearn import metrics<load_from_csv>
print('BEFORE DT RFE Training Shape Old: ', data1[data1_x_bin].shape) print('BEFORE DT RFE Training Columns Old: ', data1[data1_x_bin].columns.values) print("BEFORE DT RFE Test w/bin score mean: {:.2f}".format(base_results['test_score'].mean() *100)) print("BEFORE DT RFE Test w/bin score 3*std: +/- {:.2f}".format(base_results['test_score'].std() *100*3)) print('-'*10) dtree_rfe = feature_selection.RFECV(dtree, step = 1, scoring = 'accuracy', cv = cv_split) dtree_rfe.fit(data1[data1_x_bin], data1[Target]) X_rfe = data1[data1_x_bin].columns.values[dtree_rfe.get_support() ] rfe_results = model_selection.cross_validate(dtree, data1[X_rfe], data1[Target], cv = cv_split) print('AFTER DT RFE Training Shape New: ', data1[X_rfe].shape) print('AFTER DT RFE Training Columns New: ', X_rfe) print("AFTER DT RFE Test w/bin score mean: {:.2f}".format(rfe_results['test_score'].mean() *100)) print("AFTER DT RFE Test w/bin score 3*std: +/- {:.2f}".format(rfe_results['test_score'].std() *100*3)) print('-'*10) rfe_tune_model = model_selection.GridSearchCV(tree.DecisionTreeClassifier() , param_grid=param_grid, scoring = 'roc_auc', cv = cv_split) rfe_tune_model.fit(data1[X_rfe], data1[Target]) print('AFTER DT RFE Tuned Parameters: ', rfe_tune_model.best_params_) print("AFTER DT RFE Tuned Test w/bin score mean: {:.2f}".format(rfe_tune_model.cv_results_['mean_test_score'][tune_model.best_index_]*100)) print("AFTER DT RFE Tuned Test w/bin score 3*std: +/- {:.2f}".format(rfe_tune_model.cv_results_['std_test_score'][tune_model.best_index_]*100*3)) print('-'*10 )
Titanic - Machine Learning from Disaster
9,293,880
df_raw = pd.read_csv('.. /input/train_V2.csv' )<correct_missing_values>
vote_est = [ ('ada', ensemble.AdaBoostClassifier()), ('bc', ensemble.BaggingClassifier()), ('etc',ensemble.ExtraTreesClassifier()), ('gbc', ensemble.GradientBoostingClassifier()), ('rfc', ensemble.RandomForestClassifier()), ('gpc', gaussian_process.GaussianProcessClassifier()), ('lr', linear_model.LogisticRegressionCV()), ('bnb', naive_bayes.BernoulliNB()), ('gnb', naive_bayes.GaussianNB()), ('knn', neighbors.KNeighborsClassifier()), ('svc', svm.SVC(probability=True)) , ('xgb', XGBClassifier()) ] vote_hard = ensemble.VotingClassifier(estimators = vote_est , voting = 'hard') vote_hard_cv = model_selection.cross_validate(vote_hard, data1[data1_x_bin], data1[Target], cv = cv_split) vote_hard.fit(data1[data1_x_bin], data1[Target]) print("Hard Voting Test w/bin score mean: {:.2f}".format(vote_hard_cv['test_score'].mean() *100)) print("Hard Voting Test w/bin score 3*std: +/- {:.2f}".format(vote_hard_cv['test_score'].std() *100*3)) print('-'*10) vote_soft = ensemble.VotingClassifier(estimators = vote_est , voting = 'soft') vote_soft_cv = model_selection.cross_validate(vote_soft, data1[data1_x_bin], data1[Target], cv = cv_split) vote_soft.fit(data1[data1_x_bin], data1[Target]) print("Soft Voting Test w/bin score mean: {:.2f}".format(vote_soft_cv['test_score'].mean() *100)) print("Soft Voting Test w/bin score 3*std: +/- {:.2f}".format(vote_soft_cv['test_score'].std() *100*3)) print('-'*10 )
Titanic - Machine Learning from Disaster
9,293,880
df_raw = df_raw.dropna()<feature_engineering>
grid_hard = ensemble.VotingClassifier(estimators = vote_est , voting = 'hard') grid_hard_cv = model_selection.cross_validate(grid_hard, data1[data1_x_bin], data1[Target], cv = cv_split) grid_hard.fit(data1[data1_x_bin], data1[Target]) print("Hard Voting w/Tuned Hyperparameters Test w/bin score mean: {:.2f}".format(grid_hard_cv['test_score'].mean() *100)) print("Hard Voting w/Tuned Hyperparameters Test w/bin score 3*std: +/- {:.2f}".format(grid_hard_cv['test_score'].std() *100*3)) print('-'*10) grid_soft = ensemble.VotingClassifier(estimators = vote_est , voting = 'soft') grid_soft_cv = model_selection.cross_validate(grid_soft, data1[data1_x_bin], data1[Target], cv = cv_split) grid_soft.fit(data1[data1_x_bin], data1[Target]) print("Soft Voting w/Tuned Hyperparameters Test w/bin score mean: {:.2f}".format(grid_soft_cv['test_score'].mean() *100)) print("Soft Voting w/Tuned Hyperparameters Test w/bin score 3*std: +/- {:.2f}".format(grid_soft_cv['test_score'].std() *100*3)) print('-'*10 )
Titanic - Machine Learning from Disaster
9,293,880
<sort_values><EOS>
print(data_val.info()) print("-"*10) data_val['Survived'] = mytree(data_val ).astype(int) data_val['Survived'] = grid_hard.predict(data_val[data1_x_bin]) submit = data_val[['PassengerId','Survived']] submit.to_csv(".. /working/submit.csv", index=False) print('Validation Data Distribution: ', data_val['Survived'].value_counts(normalize = True)) submit.sample(10 )
Titanic - Machine Learning from Disaster
10,545,899
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<sort_values>
%matplotlib inline warnings.filterwarnings('ignore' )
Titanic - Machine Learning from Disaster
10,545,899
df_raw = df_raw.sort_values(['matchId'] )<prepare_x_and_y>
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv") train.describe(include="all" ).T
Titanic - Machine Learning from Disaster
10,545,899
train_cats(df_raw) df, y, nas = proc_df(df_raw, 'winPlacePerc', max_n_cat=5 )<split>
print(pd.isnull(train ).sum() )
Titanic - Machine Learning from Disaster
10,545,899
def split_vals(a,n): return a[:n].copy() , a[n:].copy() n_valid = 444647 n_trn = len(df)-n_valid<compute_test_metric>
train = train.drop(['Cabin'], axis = 1) test = test.drop(['Cabin'], axis = 1) print('Feature Dropped!!' )
Titanic - Machine Learning from Disaster
10,545,899
def rmse(x,y): return math.sqrt(((x-y)**2 ).mean()) def print_score(m): res = [rmse(m.predict(X_train), y_train), rmse(m.predict(X_valid), y_valid), m.score(X_train, y_train), m.score(X_valid, y_valid)] if hasattr(m, 'oob_score_'): res.append(m.oob_score_) print(res )<define_variables>
train = train.drop(['Ticket'], axis = 1) test = test.drop(['Ticket'], axis = 1) print('Feature Dropped!!' )
Titanic - Machine Learning from Disaster
10,545,899
set_rf_samples(100000 )<define_variables>
train = train.fillna({"Embarked": "S"}) train.head()
Titanic - Machine Learning from Disaster
10,545,899
drop_cols = ['totalDistance', 'walkDistance', 'groupId', 'matchId', 'Id']<drop_column>
for dataset in combine: dataset['Title'] = dataset['Title'].replace(['Lady', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Jonkheer', 'Dona'], 'Rare') dataset['Title'] = dataset['Title'].replace(['Countess', 'Lady', 'Sir'], 'Royal') dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') dataset['Title'] = dataset['Title'].replace('Ms', 'Miss') dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') train[['Title', 'Survived']].groupby(['Title'], as_index=False ).mean()
Titanic - Machine Learning from Disaster
10,545,899
df = df.drop(drop_cols, axis=1 )<split>
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Royal": 5, "Rare": 6} for dataset in combine: dataset['Title'] = dataset['Title'].map(title_mapping) dataset['Title'] = dataset['Title'].fillna(0) train.head()
Titanic - Machine Learning from Disaster
10,545,899
X_train, X_valid = split_vals(df, n_trn) y_train, y_valid = split_vals(y, n_trn) X_train.shape<train_model>
mr_age = train[train["Title"] == 1]["AgeGroup"].mode() miss_age = train[train["Title"] == 2]["AgeGroup"].mode() mrs_age = train[train["Title"] == 3]["AgeGroup"].mode() master_age = train[train["Title"] == 4]["AgeGroup"].mode() royal_age = train[train["Title"] == 5]["AgeGroup"].mode() rare_age = train[train["Title"] == 6]["AgeGroup"].mode() age_title_mapping = {1: "Young Adult", 2: "Student", 3: "Adult", 4: "Baby", 5: "Adult", 6: "Adult"} for x in range(len(train["AgeGroup"])) : if train["AgeGroup"][x] == "Unknown": train["AgeGroup"][x] = age_title_mapping[train["Title"][x]] for x in range(len(test["AgeGroup"])) : if test["AgeGroup"][x] == "Unknown": test["AgeGroup"][x] = age_title_mapping[test["Title"][x]]
Titanic - Machine Learning from Disaster
10,545,899
m = RandomForestRegressor(n_estimators=100, max_features=0.5, min_samples_leaf=3, n_jobs=-1) %time m.fit(X_train, y_train) print_score(m )<compute_test_metric>
age_mapping = {'Baby': 1, 'Child': 2, 'Teenager': 3, 'Student': 4, 'Young Adult': 5, 'Adult': 6, 'Senior': 7} train['AgeGroup'] = train['AgeGroup'].map(age_mapping) test['AgeGroup'] = test['AgeGroup'].map(age_mapping) train.head() train = train.drop(['Age'], axis = 1) test = test.drop(['Age'], axis = 1) print('Dropped!!' )
Titanic - Machine Learning from Disaster
10,545,899
fi = rf_feat_importance(m, df); fi[:]<features_selection>
train = train.drop(['Name'], axis = 1) test = test.drop(['Name'], axis = 1) print('Dropped!!' )
Titanic - Machine Learning from Disaster
10,545,899
to_keep = fi[fi.imp>0.0025].cols; len(to_keep )<split>
sex_mapping = {"male": 0, "female": 1} train['Sex'] = train['Sex'].map(sex_mapping) test['Sex'] = test['Sex'].map(sex_mapping) train.head()
Titanic - Machine Learning from Disaster
10,545,899
df_keep = df[to_keep].copy() X_train, X_valid = split_vals(df_keep, n_trn )<save_model>
test = test.fillna({"Embarked": "S"}) embarked_mapping = {"S": 1, "C": 2, "Q": 3} train['Embarked'] = train['Embarked'].map(embarked_mapping) test['Embarked'] = test['Embarked'].map(embarked_mapping) train.head()
Titanic - Machine Learning from Disaster
10,545,899
np.save('keep_cols.npy', np.array(df_keep.columns))<load_pretrained>
predictors = train.drop(['Survived', 'PassengerId'], axis=1) target = train["Survived"] x_train, x_val, y_train, y_val = train_test_split(predictors, target, test_size = 0.22, random_state = 0 )
Titanic - Machine Learning from Disaster
10,545,899
keep_cols = np.load('keep_cols.npy') df_keep = df[keep_cols]<split>
gaussian = GaussianNB() gaussian.fit(x_train, y_train) y_pred = gaussian.predict(x_val) acc_gaussian = round(accuracy_score(y_pred, y_val)* 100, 2 )
Titanic - Machine Learning from Disaster
10,545,899
X_train, X_valid = split_vals(df_keep, n_trn )<train_model>
print('gaussian accuracy score is:',acc_gaussian )
Titanic - Machine Learning from Disaster
10,545,899
m = RandomForestRegressor(n_estimators=70, min_samples_leaf=3, max_features=0.5, n_jobs=-1, oob_score=True) %time m.fit(X_train, y_train) print_score(m )<load_from_csv>
logreg = LogisticRegression() logreg.fit(x_train, y_train) y_pred = logreg.predict(x_val) acc_logreg = round(accuracy_score(y_pred, y_val)* 100, 2) print('logreg accuracy score is:',acc_logreg )
Titanic - Machine Learning from Disaster
10,545,899
df_test = pd.read_csv('.. /input/test_V2.csv' )<sort_values>
svc = SVC() svc.fit(x_train, y_train) y_pred = svc.predict(x_val) acc_svc = round(accuracy_score(y_pred, y_val)* 100, 2) print('svc accuracy score is:',acc_svc )
Titanic - Machine Learning from Disaster
10,545,899
df_test['totalDistance'] = df_test['rideDistance'] + df_test['walkDistance'] + df_test['swimDistance'] ranking_cats = ['killPlace', 'damageDealt', 'kills', 'walkDistance', 'rankPoints', 'weaponsAcquired', 'totalDistance'] for c in ranking_cats: df_test[c+'_ranking'] = df_test.groupby('matchId')[c].rank(ascending=False) df_test.head()<concatenate>
linear_svc = LinearSVC() linear_svc.fit(x_train, y_train) y_pred = linear_svc.predict(x_val) acc_linear_svc = round(accuracy_score(y_pred, y_val)* 100, 2) print('linear svc accuracy score is:',acc_linear_svc )
Titanic - Machine Learning from Disaster
10,545,899
train_cats(df_test )<compute_test_metric>
perceptron = Perceptron() perceptron.fit(x_train, y_train) y_pred = perceptron.predict(x_val) acc_perceptron = round(accuracy_score(y_pred, y_val)* 100, 2) print('perceptron accuracy score is:',acc_perceptron )
Titanic - Machine Learning from Disaster
10,545,899
df_test, y, nas = proc_df(df=df_test, y_fld=None )<load_from_csv>
decisiontree = DecisionTreeClassifier() decisiontree.fit(x_train, y_train) y_pred = decisiontree.predict(x_val) acc_decisiontree = round(accuracy_score(y_pred, y_val)* 100, 2) print('decision tree accuracy score is:',acc_decisiontree )
Titanic - Machine Learning from Disaster
10,545,899
df_submit = df_test[keep_cols] a = m.predict(df_submit) a = pd.Series(a) submission = pd.Series(( pd.read_csv('.. /input/test_V2.csv', low_memory=False)) ['Id']) submission = pd.concat([submission, a], axis=1) submission = submission.rename(columns={0:'winPlacePerc'} )<save_to_csv>
randomforest = RandomForestClassifier() randomforest.fit(x_train, y_train) y_pred = randomforest.predict(x_val) acc_randomforest = round(accuracy_score(y_pred, y_val)* 100, 2) print('random forest accuracy score is:',acc_randomforest )
Titanic - Machine Learning from Disaster
10,545,899
submission.to_csv('submission.csv', index=False )<save_to_csv>
knn = KNeighborsClassifier() knn.fit(x_train, y_train) y_pred = knn.predict(x_val) acc_knn = round(accuracy_score(y_pred, y_val)* 100, 2) print('knn accuracy score is:',acc_knn )
Titanic - Machine Learning from Disaster
10,545,899
submission.to_csv('submission.csv', index=False )<set_options>
sgd = SGDClassifier() sgd.fit(x_train, y_train) y_pred = sgd.predict(x_val) acc_sgd = round(accuracy_score(y_pred, y_val)* 100, 2) print('stochastic gradient descent accuracy score is:',acc_sgd )
Titanic - Machine Learning from Disaster
10,545,899
%matplotlib inline<load_from_csv>
gbk = GradientBoostingClassifier() gbk.fit(x_train, y_train) y_pred = gbk.predict(x_val) acc_gbk = round(accuracy_score(y_pred, y_val)* 100, 2) print('gradient boosting accuracy score is:',acc_gbk )
Titanic - Machine Learning from Disaster
10,545,899
train_df = pd.read_csv('.. /input/train.csv') test_df = pd.read_csv('.. /input/test.csv') submission = pd.read_csv('.. /input/sample_submission.csv' )<count_missing_values>
models = pd.DataFrame({ 'Model': ['Support Vector Machines', 'KNN', 'Logistic Regression', 'Random Forest', 'Naive Bayes', 'Perceptron', 'Linear SVC', 'Decision Tree', 'Stochastic Gradient Descent', 'Gradient Boosting Classifier'], 'Score': [acc_svc, acc_knn, acc_logreg, acc_randomforest, acc_gaussian, acc_perceptron,acc_linear_svc, acc_decisiontree, acc_sgd, acc_gbk]}) models.sort_values(by='Score', ascending=False )
Titanic - Machine Learning from Disaster
10,545,899
<count_missing_values><EOS>
ids = test['PassengerId'] predictions = gbk.predict(test.drop('PassengerId', axis=1)) output = pd.DataFrame({ 'PassengerId' : ids, 'Survived': predictions }) output.to_csv('submission.csv', index=False )
Titanic - Machine Learning from Disaster
8,810,978
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<count_unique_values>
%matplotlib inline plt.style.use('fivethirtyeight')
Titanic - Machine Learning from Disaster
8,810,978
train_df.nunique()<count_unique_values>
df_train = pd.read_csv(".. /input/titanic/train.csv", header=0) df_train.head()
Titanic - Machine Learning from Disaster
8,810,978
test_df.nunique()<concatenate>
df_test = pd.read_csv(".. /input/titanic/test.csv", header=0) df_test.head()
Titanic - Machine Learning from Disaster
8,810,978
concatenated = pd.concat([train_df, test_df]) concatenated.shape<count_unique_values>
print("Training data size:{}".format(df_train.shape)) print("Test data size:{}".format(df_test.shape))
Titanic - Machine Learning from Disaster
8,810,978
concatenated.nunique()<count_values>
df_train.isnull().sum()
Titanic - Machine Learning from Disaster
8,810,978
th10 = pd.DataFrame(train_df.landmark_id.value_counts().head(10)) th10.reset_index(level=0, inplace=True) th10.columns = ['landmark_id','count'] th10<create_dataframe>
df_test.isnull().sum()
Titanic - Machine Learning from Disaster
8,810,978
tb10 = pd.DataFrame(train_df.landmark_id.value_counts().tail(10)) tb10.reset_index(level=0, inplace=True) tb10.columns = ['landmark_id','count'] tb10<feature_engineering>
df_train["Ticket"].value_counts()
Titanic - Machine Learning from Disaster
8,810,978
ll = list() for path in train_df['url']: ll.append(( path.split('//', 1)[1] ).split('/', 1)[0]) train_df['site'] = ll ll = list() for path in test_df['url']: ll.append(( path.split('//', 1)[1] ).split('/', 1)[0]) test_df['site'] = ll<create_dataframe>
df_train["Cabin"].value_counts()
Titanic - Machine Learning from Disaster
8,810,978
train_site = pd.DataFrame(train_df.site.value_counts()) test_site = pd.DataFrame(test_df.site.value_counts() )<save_to_csv>
df_train["Age"].fillna(np.mean(df_train["Age"]), inplace=True) df_train["Fare"].fillna(np.mean(df_train["Fare"]), inplace=True) df_train["Embarked"].fillna('S', inplace=True) df_train.dropna(axis=1, inplace=True) df_test["Age"].fillna(np.mean(df_train["Age"]), inplace=True) df_test["Fare"].fillna(np.mean(df_train["Fare"]), inplace=True) df_test.dropna(axis=1, inplace=True )
Titanic - Machine Learning from Disaster
8,810,978
freq_label = train_df['landmark_id'].value_counts() /train_df['landmark_id'].value_counts().sum() submission['landmarks'] = '%d %2.2f' %(freq_label.index[0], freq_label.values[0]) submission.to_csv('submission.csv', index=False) np.random.seed(2018) r_idx = lambda : np.random.choice(freq_label.index, p = freq_label.values) r_score = lambda idx: '%d %2.4f' %(freq_label.index[idx], freq_label.values[idx]) submission['landmarks'] = submission.id.map(lambda _: r_score(r_idx())) submission.to_csv('rand_submission.csv', index=False )<import_modules>
def sex(x): if x["Sex"] == "male": res = 0 else : res = 1 return res df_train["Sex_cate"] = df_train.apply(sex, axis=1) df_test["Sex_cate"] = df_test.apply(sex, axis=1) def age_band(x): if x["Age"] <= 10: res = 0 elif x["Age"] <= 20 and x["Age"] > 10: res = 1 elif x["Age"] <= 30 and x["Age"] > 20: res = 2 elif x["Age"] <= 40 and x["Age"] > 30: res = 3 elif x["Age"] <= 50 and x["Age"] > 40: res = 4 elif x["Age"] <= 60 and x["Age"] > 50: res = 5 else : res = 6 return res df_train["Age_band"] = df_train.apply(age_band, axis=1) df_test["Age_band"] = df_test.apply(age_band, axis=1) def fare_band(x): if x["Fare"] <= 25: res = 0 elif x["Fare"] <= 50 and x["Fare"] > 25: res = 1 elif x["Fare"] <= 75 and x["Fare"] > 50: res = 2 elif x["Fare"] <= 100 and x["Fare"] > 75: res = 3 elif x["Fare"] <= 125 and x["Fare"] > 100: res = 4 else : res = 5 return res df_train["Fare_band"] = df_train.apply(fare_band, axis=1) df_test["Fare_band"] = df_test.apply(fare_band, axis=1) def embarked_flg(x): if x["Embarked"] == 'S': res = 0 elif x["Embarked"] == 'C': res = 1 else: res = 2 return res df_train["Embarked_flg"] = df_train.apply(embarked_flg, axis=1) df_test["Embarked_flg"] = df_test.apply(embarked_flg, axis=1 )
Titanic - Machine Learning from Disaster
8,810,978
import pandas as pd import numpy as np<import_modules>
X = df_train[['Pclass','SibSp','Parch','Sex_cate', 'Age_band', 'Fare_band', 'Embarked_flg']] y = df_train[['Survived']] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) sc = StandardScaler() sc.fit(X_train) X_train_std = sc.transform(X_train) X_test_std = sc.transform(X_test )
Titanic - Machine Learning from Disaster
8,810,978
import pandas as pd import numpy as np<load_from_csv>
lr = LogisticRegression() param_range = [0.001, 0.01, 0.1, 1.0] penalty = ['l1', 'l2'] param_grid = [{"C":param_range, "penalty":penalty}] gs_lr = GridSearchCV(estimator=lr, param_grid=param_grid, scoring="accuracy", cv=10, n_jobs=-1) gs_lr = gs_lr.fit(X_train_std, y_train) print(gs_lr.best_score_.round(3)) print(gs_lr.best_params_ )
Titanic - Machine Learning from Disaster
8,810,978
train = pd.read_csv('.. /input/competitive-data-science-predict-future-sales/sales_train.csv') test = pd.read_csv('.. /input/competitive-data-science-predict-future-sales/test.csv' )<rename_columns>
svm = SVC(random_state=10, probability=True) param_range = [0.001, 0.01, 0.1, 1.0] param_grid = [{'C':param_range, 'kernel':['linear']}] gs_svm = GridSearchCV(estimator=svm, param_grid=param_grid, scoring='accuracy', cv=10, n_jobs=-1) gs_svm = gs_svm.fit(X_train_std, y_train) print(gs_svm.best_score_.round(3)) print(gs_svm.best_params_ )
Titanic - Machine Learning from Disaster
8,810,978
train.columns = ['date', 'date_block_num', 'shop_id', 'item_id', 'item_price', 'item_cnt_day'] train<sort_values>
knn = KNeighborsClassifier(metric='minkowski') param_range = [5, 10, 15, 20] param_grid = [{"n_neighbors":param_range, "p":[1,2]}] gs_knn = GridSearchCV(estimator=knn, param_grid=param_grid, scoring="accuracy", cv=10, n_jobs=-1) gs_knn = gs_knn.fit(X_train_std, y_train) print(gs_knn.best_score_.round(3)) print(gs_knn.best_params_ )
Titanic - Machine Learning from Disaster
8,810,978
train[train.date_block_num==0].sort_values(['shop_id','item_id'], ascending=True )<feature_engineering>
tree = DecisionTreeClassifier(max_depth=4, random_state=10) param_range = [3, 6, 9, 12] leaf = [10, 15, 20] criterion = ["entropy", "gini", "error"] param_grid = [{"max_depth":param_range, "criterion":criterion, "max_leaf_nodes":leaf}] gs_tree = GridSearchCV(estimator=tree, param_grid=param_grid, scoring="accuracy", cv=10, n_jobs=-1) gs_tree = gs_tree.fit(X_train, y_train) print(gs_tree.best_score_.round(3)) print(gs_tree.best_params_ )
Titanic - Machine Learning from Disaster
8,810,978
train['date_block_num'] += 1 train['date_block_num']<load_from_csv>
forest = RandomForestClassifier(n_estimators=100, random_state=10) param_range = [5, 10, 15] criterion = ["entropy", "gini", "error"] param_grid = [{"n_estimators":param_range, "criterion":criterion}] gs_forest = GridSearchCV(estimator=forest, param_grid=param_grid, scoring="accuracy", cv=10, n_jobs=-1) gs_forest = gs_forest.fit(X_train, y_train) print(gs_forest.best_score_.round(3)) print(gs_forest.best_params_ )
Titanic - Machine Learning from Disaster
8,810,978
shops = pd.read_csv('.. /input/competitive-data-science-predict-future-sales/shops.csv'); print(shops.shape) shops.head()<load_from_csv>
xgbc = xgb.XGBClassifier(random_state=10) max_depth = [10, 15, 20, 25] min_samples_leaf = [1,3,5] min_samples_split = [1,2,4] param_grid = [{"max_depth":max_depth, "min_samples_leaf":min_samples_leaf, "min_samples_split":min_samples_split}] gs_xgb = GridSearchCV(estimator=xgbc, param_grid=param_grid, scoring="accuracy", cv=5, n_jobs=-1) gs_xgb = gs_xgb.fit(X_train, y_train) print(gs_xgb.best_score_) print(gs_xgb.best_params_ )
Titanic - Machine Learning from Disaster
8,810,978
items= pd.read_csv('.. /input/competitive-data-science-predict-future-sales/items.csv'); print(items.shape) items.head()<load_from_csv>
lgbm = lgb.LGBMClassifier() max_depth = [3, 5, 10] min_samples_leaf = [1,3,5,7] min_samples_split = [2, 4, 6, 8] param_grid = [{"max_depth":max_depth, "min_samples_leaf":min_samples_leaf, "min_samples_split":min_samples_split}] gs_lgbm = GridSearchCV(estimator=lgbm, param_grid=param_grid, scoring="accuracy", cv=5, n_jobs=-1) gs_lgbm = gs_lgbm.fit(X_train, y_train) print(gs_lgbm.best_score_.round(3)) print(gs_lgbm.best_params_ )
Titanic - Machine Learning from Disaster
8,810,978
cats= pd.read_csv('.. /input/competitive-data-science-predict-future-sales/item_categories.csv'); print(cats.shape) cats.head()<load_from_csv>
print("-"*50) y_pred = gs_lr.best_estimator_.predict(X_test_std) print("Logistic Regression Result") print("confusion_matrix = ", confusion_matrix(y_true=y_test, y_pred=y_pred)) print("accuracy = %.3f" % accuracy_score(y_true=y_test, y_pred=y_pred)) print("precision = %.3f" % precision_score(y_true=y_test, y_pred=y_pred)) print("recall = %.3f" % recall_score(y_true=y_test, y_pred=y_pred)) print("f1_score = %.3f" % f1_score(y_true=y_test, y_pred=y_pred)) print("-"*50) y_pred = gs_svm.best_estimator_.predict(X_test_std) print("Support vector machine, SVM") print("confusion_matrix = ", confusion_matrix(y_true=y_test, y_pred=y_pred)) print("accuracy = %.3f" % accuracy_score(y_true=y_test, y_pred=y_pred)) print("precision = %.3f" % precision_score(y_true=y_test, y_pred=y_pred)) print("recall = %.3f" % recall_score(y_true=y_test, y_pred=y_pred)) print("f1_score = %.3f" % f1_score(y_true=y_test, y_pred=y_pred)) print("-"*50) y_pred = gs_knn.best_estimator_.predict(X_test_std) print("KNeithborsClassfier") print("confusion_matrix = ", confusion_matrix(y_true=y_test, y_pred=y_pred)) print("accuracy = %.3f" % accuracy_score(y_true=y_test, y_pred=y_pred)) print("precision = %.3f" % precision_score(y_true=y_test, y_pred=y_pred)) print("recall = %.3f" % recall_score(y_true=y_test, y_pred=y_pred)) print("f1_score = %.3f" % f1_score(y_true=y_test, y_pred=y_pred)) print("-"*50) y_pred = gs_tree.best_estimator_.predict(X_test) print("Decision tree") print("confusion_matrix = ", confusion_matrix(y_true=y_test, y_pred=y_pred)) print("accuracy = %.3f" % accuracy_score(y_true=y_test, y_pred=y_pred)) print("precision = %.3f" % precision_score(y_true=y_test, y_pred=y_pred)) print("recall = %.3f" % recall_score(y_true=y_test, y_pred=y_pred)) print("f1_score = %.3f" % f1_score(y_true=y_test, y_pred=y_pred)) print("-"*50) y_pred = gs_forest.best_estimator_.predict(X_test) print("Random Forest") print("confusion_matrix = ", confusion_matrix(y_true=y_test, y_pred=y_pred)) print("accuracy = %.3f" % accuracy_score(y_true=y_test, y_pred=y_pred)) print("precision = %.3f" % precision_score(y_true=y_test, y_pred=y_pred)) print("recall = %.3f" % recall_score(y_true=y_test, y_pred=y_pred)) print("f1_score = %.3f" % f1_score(y_true=y_test, y_pred=y_pred)) print("-"*50) y_pred = gs_xgb.best_estimator_.predict(X_test) print("XGB") print("confusion_matrix = ", confusion_matrix(y_true=y_test, y_pred=y_pred)) print("accuracy = %.3f" % accuracy_score(y_true=y_test, y_pred=y_pred)) print("precision = %.3f" % precision_score(y_true=y_test, y_pred=y_pred)) print("recall = %.3f" % recall_score(y_true=y_test, y_pred=y_pred)) print("f1_score = %.3f" % f1_score(y_true=y_test, y_pred=y_pred)) print("-"*50) y_pred = gs_lgbm.best_estimator_.predict(X_test) print("LGBM") print("confusion_matrix = ", confusion_matrix(y_true=y_test, y_pred=y_pred)) print("accuracy = %.3f" % accuracy_score(y_true=y_test, y_pred=y_pred)) print("precision = %.3f" % precision_score(y_true=y_test, y_pred=y_pred)) print("recall = %.3f" % recall_score(y_true=y_test, y_pred=y_pred)) print("f1_score = %.3f" % f1_score(y_true=y_test, y_pred=y_pred)) print("-"*50 )
Titanic - Machine Learning from Disaster
8,810,978
sample_submission= pd.read_csv('.. /input/competitive-data-science-predict-future-sales/sample_submission.csv'); print(sample_submission.shape) sample_submission.head()<filter>
test = df_test[['Pclass', 'SibSp', 'Parch', 'Sex_cate', 'Age_band', 'Fare_band', 'Embarked_flg']] y_pred_test = gs_tree.best_estimator_.predict(test) submit = pd.DataFrame({"PassengerId":df_test["PassengerId"], "Survived":y_pred_test} )
Titanic - Machine Learning from Disaster
8,810,978
train = train[train.item_price<100000] train = train[train.item_cnt_day<1001]<filter>
submit.to_csv('my_submission.csv', index=False) print("Your submission was successfully saved!" )
Titanic - Machine Learning from Disaster
8,810,978
train[train.item_price<0]<feature_engineering>
y_pred_test_lgbm = gs_lgbm.best_estimator_.predict(test) y_pred_test_xgb = gs_xgb.best_estimator_.predict(test) y_pred_test_en =(y_pred_test*0.3 + y_pred_test_lgbm*0.4 + y_pred_test_xgb*0.3) submit_en = pd.DataFrame({"PassengerId":df_test["PassengerId"], "Survived":y_pred_test_en} ).round(0) submit_en["Survived"] = [int(i)for i in submit_en["Survived"]]
Titanic - Machine Learning from Disaster
8,810,978
median = train[(train.date_block_num==4)&(train.shop_id==32)&(train.item_id==2973)&(train.item_price>0)].item_price.median() train.loc[train.item_price<0, 'item_price'] = median train[train.item_price<0]<feature_engineering>
submit_en.to_csv('my_submission_en.csv', index=False) print("Your submission was successfully saved!" )
Titanic - Machine Learning from Disaster
9,122,235
train.loc[train.shop_id == 0, 'shop_id'] = 57 test.loc[test.shop_id == 0, 'shop_id'] = 57 train.loc[train.shop_id == 1, 'shop_id'] = 58 test.loc[test.shop_id == 1, 'shop_id'] = 58 train.loc[train.shop_id == 10, 'shop_id'] = 11 test.loc[test.shop_id == 10, 'shop_id'] = 11<feature_engineering>
data_train = pd.read_csv("/kaggle/input/titanic/train.csv") data_train data_train.info()
Titanic - Machine Learning from Disaster
9,122,235
shops.loc[shops.shop_name == 'Сергиев Посад ТЦ "7Я"', 'shop_name'] = 'СергиевПосад ТЦ "7Я"' shops['city'] = shops['shop_name'].str.split(' ' ).map(lambda x: x[0]) shops.loc[shops.city == '!Якутск', 'city'] = 'Якутск' shops['city_code'] = LabelEncoder().fit_transform(shops['city']) shops = shops[['shop_id','city_code']]<categorify>
def set_missing_ages(df): age_df = df[['Age','Fare', 'Parch', 'SibSp', 'Pclass']] known_age = age_df[age_df.Age.notnull() ].as_matrix() unknown_age = age_df[age_df.Age.isnull() ].as_matrix() y = known_age[:, 0] X = known_age[:, 1:] rfr = RandomForestRegressor(random_state=0, n_estimators=2000, n_jobs=-1) rfr.fit(X, y) predictedAges = rfr.predict(unknown_age[:, 1::]) df.loc[(df.Age.isnull()), 'Age' ] = predictedAges return df, rfr def set_Cabin_type(df): df.loc[(df.Cabin.notnull()), 'Cabin' ] = "Yes" df.loc[(df.Cabin.isnull()), 'Cabin' ] = "No" return df data_train, rfr = set_missing_ages(data_train) data_train = set_Cabin_type(data_train)
Titanic - Machine Learning from Disaster
9,122,235
cats['split'] = cats['item_category_name'].str.split('-') cats['type'] = cats['split'].map(lambda x: x[0].strip()) cats['subtype'] = cats['split'].map(lambda x: x[1].strip() if len(x)> 1 else x[0].strip()) cats['type_code'] = LabelEncoder().fit_transform(cats['type']) cats['subtype_code'] = LabelEncoder().fit_transform(cats['subtype']) cats = cats[['item_category_id','type_code', 'subtype_code']]<drop_column>
dummies_Cabin = pd.get_dummies(data_train['Cabin'], prefix= 'Cabin') dummies_Embarked = pd.get_dummies(data_train['Embarked'], prefix= 'Embarked') dummies_Sex = pd.get_dummies(data_train['Sex'], prefix= 'Sex') dummies_Pclass = pd.get_dummies(data_train['Pclass'], prefix= 'Pclass') df = pd.concat([data_train, dummies_Cabin, dummies_Embarked, dummies_Sex, dummies_Pclass], axis=1) df.drop(['Pclass', 'Name', 'Sex', 'Ticket', 'Cabin', 'Embarked'], axis=1, inplace=True) df
Titanic - Machine Learning from Disaster
9,122,235
items.drop(['item_name'], axis=1, inplace=True )<define_variables>
scaler = preprocessing.StandardScaler() age_scale_param = scaler.fit(df['Age'].values.reshape(-1,1)) df['Age_scaled'] = scaler.fit_transform(df['Age'].values.reshape(-1,1), age_scale_param) fare_scale_param = scaler.fit(df['Fare'].values.reshape(-1,1)) df['Fare_scaled'] = scaler.fit_transform(df['Fare'].values.reshape(-1,1), fare_scale_param) df.head()
Titanic - Machine Learning from Disaster
9,122,235
ts = time.time() matrix = [] for i in range(34): sales = train[train.date_block_num==i] matrix.append(np.array(list(product([i], sales.shop_id.unique() , sales.item_id.unique())) , dtype='int16')) cols = ['date_block_num','shop_id','item_id'] matrix = pd.DataFrame(np.vstack(matrix), columns=cols) matrix['date_block_num'] = matrix['date_block_num'].astype(np.int8) matrix['shop_id'] = matrix['shop_id'].astype(np.int8) matrix['item_id'] = matrix['item_id'].astype(np.int16) matrix.sort_values(cols,inplace=True) time.time() - ts <feature_engineering>
train_df = df.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*') train_np = train_df.values y = train_np[:, 0] X = train_np[:, 1:] clf = linear_model.LogisticRegression(solver='liblinear',C=1.0, penalty='l1', tol=1e-6) clf.fit(X, y) clf
Titanic - Machine Learning from Disaster
9,122,235
train['revenue'] = train['item_price'] * train['item_cnt_day'] train.head()<data_type_conversions>
data_test = pd.read_csv("/kaggle/input/titanic/test.csv") data_test.loc[(data_test.Fare.isnull()), 'Fare' ] = 0 tmp_df = data_test[['Age','Fare', 'Parch', 'SibSp', 'Pclass']] null_age = tmp_df[data_test.Age.isnull() ].values X = null_age[:, 1:] predictedAges = rfr.predict(X) data_test.loc[(data_test.Age.isnull()), 'Age' ] = predictedAges data_test = set_Cabin_type(data_test) dummies_Cabin = pd.get_dummies(data_test['Cabin'], prefix= 'Cabin') dummies_Embarked = pd.get_dummies(data_test['Embarked'], prefix= 'Embarked') dummies_Sex = pd.get_dummies(data_test['Sex'], prefix= 'Sex') dummies_Pclass = pd.get_dummies(data_test['Pclass'], prefix= 'Pclass') df_test = pd.concat([data_test, dummies_Cabin, dummies_Embarked, dummies_Sex, dummies_Pclass], axis=1) df_test.drop(['Pclass', 'Name', 'Sex', 'Ticket', 'Cabin', 'Embarked'], axis=1, inplace=True) df_test['Age_scaled'] = scaler.fit_transform(df_test['Age'].values.reshape(-1,1), age_scale_param) df_test['Fare_scaled'] = scaler.fit_transform(df_test['Fare'].values.reshape(-1,1), fare_scale_param) df_test.head()
Titanic - Machine Learning from Disaster
9,122,235
matrix = pd.merge(matrix, group, on=cols, how='left') matrix['item_cnt_month'] =(matrix['item_cnt_month'] .fillna(0) .clip(0,20) .astype(np.float16)) matrix.shape<data_type_conversions>
test = df_test.filter(regex='Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*') predictions = clf.predict(test) result = pd.DataFrame({'PassengerId':data_test['PassengerId'].values, 'Survived':predictions.astype(np.int32)}) result.to_csv("/kaggle/working/logical.csv", index=False )
Titanic - Machine Learning from Disaster
9,122,235
test['date_block_num'] = 34 test['date_block_num'] = test['date_block_num'].astype(np.int8) test['shop_id'] = test['shop_id'].astype(np.int8) test['item_id'] = test['item_id'].astype(np.int16) test.head()<concatenate>
pd.DataFrame({"columns":list(train_df.columns)[1:], "coef":list(clf.coef_.T)} )
Titanic - Machine Learning from Disaster
9,122,235
ts = time.time() matrix = pd.concat([matrix, test], ignore_index=True, sort=False, keys=cols) matrix.fillna(0, inplace=True) time.time() - ts matrix[matrix.date_block_num==34]<data_type_conversions>
clf = linear_model.LogisticRegression(solver='liblinear',C=1.0, penalty='l1', tol=1e-6) all_data = df.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*') X = all_data.values[:,1:] y = all_data.values[:,0] print(cross_val_score(clf, X, y, cv=5))
Titanic - Machine Learning from Disaster
9,122,235
ts = time.time() matrix = pd.merge(matrix, shops, on=['shop_id'], how='left') matrix = pd.merge(matrix, items, on=['item_id'], how='left') matrix = pd.merge(matrix, cats, on=['item_category_id'], how='left') matrix['city_code'] = matrix['city_code'].astype(np.int8) matrix['item_category_id'] = matrix['item_category_id'].astype(np.int8) matrix['type_code'] = matrix['type_code'].astype(np.int8) matrix['subtype_code'] = matrix['subtype_code'].astype(np.int8) time.time() - ts matrix<merge>
split_train, split_cv = train_test_split(df, test_size=0.3, random_state=42) train_df = split_train.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*') clf = linear_model.LogisticRegression(solver='liblinear',C=1.0, penalty='l1', tol=1e-6) clf.fit(train_df.values[:,1:], train_df.values[:,0]) cv_df = split_cv.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*') predictions = clf.predict(cv_df.values[:,1:]) origin_data_train = pd.read_csv("/kaggle/input/titanic/train.csv") bad_cases = origin_data_train.loc[origin_data_train['PassengerId'].isin(split_cv[predictions != cv_df.values[:,0]]['PassengerId'].values)] bad_cases.head(10 )
Titanic - Machine Learning from Disaster
9,122,235
def lag_feature(df, lags, col): tmp = df[['date_block_num','shop_id','item_id',col]] for i in lags: shifted = tmp.copy() shifted.columns = ['date_block_num','shop_id','item_id', col+'_lag_'+str(i)] shifted['date_block_num'] += i df = pd.merge(df, shifted, on=['date_block_num','shop_id','item_id'], how='left') return df<statistical_test>
train_df = df.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass.*|Mother|Child|Family|Title') train_np = train_df.values y = train_np[:, 0] X = train_np[:, 1:] clf = linear_model.LogisticRegression(C=1.0, penalty='l2', tol=1e-6) bagging_clf = BaggingClassifier(clf, n_estimators=20, max_samples=0.8, max_features=1.0, bootstrap=True, bootstrap_features=False, n_jobs=-1) bagging_clf.fit(X, y) test = df_test.filter(regex='Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass.*|Mother|Child|Family|Title') predictions = bagging_clf.predict(test) result = pd.DataFrame({'PassengerId':data_test['PassengerId'].values, 'Survived':predictions.astype(np.int32)}) result = data result.to_csv("/kaggle/working/logical.csv", index=False )
Titanic - Machine Learning from Disaster
9,235,765
ts = time.time() matrix = lag_feature(matrix, [1,2,3,6,12], 'item_cnt_month') time.time() - ts<import_modules>
from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier, BaggingClassifier, ExtraTreesClassifier from sklearn.metrics import mean_absolute_error, accuracy_score, f1_score from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import LinearRegression, Lasso from sklearn.model_selection import GridSearchCV, KFold, cross_validate from sklearn.neighbors import KNeighborsClassifier from sklearn.pipeline import make_pipeline, Pipeline from sklearn.preprocessing import OneHotEncoder, MinMaxScaler, Normalizer, OrdinalEncoder, StandardScaler from sklearn.impute import SimpleImputer
Titanic - Machine Learning from Disaster
9,235,765
<prepare_x_and_y>
train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') sub = pd.read_csv('/kaggle/input/titanic/gender_submission.csv' )
Titanic - Machine Learning from Disaster
9,235,765
<load_pretrained>
y = train['Survived']
Titanic - Machine Learning from Disaster
9,235,765
data = pd.read_pickle('/kaggle/input/all-datta5/data2.pkl') print(data.shape) data.head(3 )<count_unique_values>
traintest = pd.concat([train.drop('Survived', axis=1),test] ).reset_index()
Titanic - Machine Learning from Disaster
9,235,765
a = [] for i in range(3,34): b = data[data.date_block_num==i] c = b.sum() ['item_cnt_month'] d = len(b.shop_id.unique()) a.append(c/d) print(a )<create_dataframe>
traintest[traintest['Age'].isnull() ]
Titanic - Machine Learning from Disaster
9,235,765
weekarr = [] t = 2 count = 0 for w in range(3): for i in [31,28,31,30,31,30,31,31,30,31,30,31]: a = [0,0,0,0,0,0,0,count] count+=1 for j in range(i): a[t]+=1 if t==6: t=-1 t+=1 weekarr.append(a) weekarr = pd.DataFrame(np.vstack(weekarr), columns=['week0','week1','week2','week3','week4','week5','week6','date_block_num']) data = pd.merge(data, weekarr, on=['date_block_num'], how='left') del weekarr gc.collect()<prepare_x_and_y>
traintest[traintest['Age']<14]
Titanic - Machine Learning from Disaster
9,235,765
X_zong = data.drop(['item_cnt_month'], axis=1) Y_train = data[data.date_block_num < 33]['item_cnt_month'] Y_valid = data[data.date_block_num == 33]['item_cnt_month'] del data gc.collect()<categorify>
traintest['Title'] = traintest.Name.str.extract('([A-Za-z]+)\.' )
Titanic - Machine Learning from Disaster
9,235,765
minMax = MinMaxScaler() X_zong_std = minMax.fit_transform(X_zong.iloc[:,:-7]) X_zong.iloc[:,:-7] = pd.DataFrame(np.vstack(X_zong_std),columns=X_zong.columns[:-7]) X_zong.iloc[:,:-7] = downcast_dtypes(X_zong.iloc[:,:-7]) X_zong['week0'] = X_zong['week0'].astype(np.int8) X_zong['week1'] = X_zong['week1'].astype(np.int8) X_zong['week2'] = X_zong['week2'].astype(np.int8) X_zong['week3'] = X_zong['week3'].astype(np.int8) X_zong['week4'] = X_zong['week4'].astype(np.int8) X_zong['week5'] = X_zong['week5'].astype(np.int8) X_zong['week6'] = X_zong['week6'].astype(np.int8) del X_zong_std gc.collect()<filter>
traintest_drop3 = traintest.drop(['Name', 'Ticket', 'Cabin'], axis=1)
Titanic - Machine Learning from Disaster
9,235,765
X_train = X_zong[X_zong.date_block_num < 0.96679688] X_valid = X_zong[X_zong.date_block_num == 0.96679688] X_test = X_zong[X_zong.date_block_num == 1] del X_zong gc.collect()<train_model>
traintest_drop3[traintest_drop3['Title']=='Mrs']['Age'].sort_values().value_counts(sort=False,dropna=False )
Titanic - Machine Learning from Disaster
9,235,765
ts = time.time() model = XGBRegressor( max_depth=10, n_estimators=1000, min_child_weight=0.5, colsample_bytree=0.9, subsample=0.8, eta=0.1, seed=1) model.fit( X_train, Y_train, eval_metric="rmse", eval_set=[(X_train, Y_train),(X_valid, Y_valid)], verbose=True, early_stopping_rounds = 10) time.time() - ts<save_to_csv>
traintest_drop3[traintest_drop3['Title']=='Master']['Age'].value_counts(dropna=False )
Titanic - Machine Learning from Disaster
9,235,765
Y_test = model.predict(X_test ).clip(0, 20) test = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv' ).set_index('ID') submission = pd.DataFrame({ "ID": test.index, "item_cnt_month": Y_test }) submission.to_csv('xgb_submission1.csv', index=False )<set_options>
traintest_drop3[traintest_drop3['Title']=='Mr']['Age'].sort_values().value_counts(sort=False,dropna=False )
Titanic - Machine Learning from Disaster
9,235,765
env = riiideducation.make_env()<load_from_csv>
traintest_drop3[traintest_drop3['Title']=='Miss']['Age'].sort_values().value_counts(sort=False,dropna=False )
Titanic - Machine Learning from Disaster
9,235,765
train = pd.read_csv('/kaggle/input/riiid-test-answer-prediction/train.csv', usecols=[1, 2, 3, 4, 5, 7, 8, 9], dtype={'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'task_container_id': 'int16', 'answered_correctly':'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'} ) train.info()<load_from_csv>
master_nan_median = np.median(traintest_drop3[traintest_drop3['Title']=='Master']['Age'].dropna().values) master_nan_median
Titanic - Machine Learning from Disaster
9,235,765
questions_df = pd.read_csv('/kaggle/input/riiid-test-answer-prediction/questions.csv', usecols=[0, 3], dtype={'question_id': 'int16', 'part': 'int8'} ) questions_df<sort_values>
traintest_drop3[traintest_drop3['Title']=='Master']['Age']
Titanic - Machine Learning from Disaster
9,235,765
train = train[train.content_type_id == False].sort_values('timestamp' ).reset_index(drop = True )<groupby>
traintest_drop3.loc[(traintest_drop3['Title']=='Master')&(traintest_drop3['Age'].isna()),'Age'] = master_nan_median
Titanic - Machine Learning from Disaster
9,235,765
group1 = train.loc[(train.content_type_id == False), ['task_container_id', 'user_id']].groupby(['task_container_id'] ).agg(['count']) group1.columns = ['avg_questions'] group2 = train.loc[(train.content_type_id == False), ['task_container_id', 'user_id']].groupby(['task_container_id'] ).agg(['nunique']) group2.columns = ['avg_questions'] group3 = group1 / group2 group3['avg_questions_seen'] = group3.avg_questions.cumsum() print('The amount of questions seen by the average user:') group3.iloc[0].avg_questions_seen<merge>
traintest_drop3.loc[traintest_drop3['Title']=='Master', 'Age']
Titanic - Machine Learning from Disaster
9,235,765
train = pd.merge(train, questions_df, left_on = 'content_id', right_on = 'question_id', how = 'left' )<groupby>
mr_nan_median = np.median(traintest_drop3[traintest_drop3['Title']=='Mr']['Age'].dropna().values) mr_nan_median
Titanic - Machine Learning from Disaster
9,235,765
results_q2_final = train.loc[train.content_type_id == False, ['question_id','part']].groupby(['question_id'] ).agg(['count']) results_q2_final.columns = ['count']<merge>
traintest_drop3.loc[(traintest_drop3['Title']=='Mr')&(traintest_drop3['Age'].isna()),'Age'] = mr_nan_median
Titanic - Machine Learning from Disaster
9,235,765
question2 = pd.merge(questions_df, results_q_final, left_on = 'question_id', right_on = 'question_id', how = 'left') question2 = pd.merge(question2, results_q2_final, left_on = 'question_id', right_on = 'question_id', how = 'left') question2.quest_pct = round(question2.quest_pct,5) question2<groupby>
mrs_nan_median = np.median(traintest_drop3[traintest_drop3['Title']=='Mrs']['Age'].dropna().values) mrs_nan_median
Titanic - Machine Learning from Disaster
9,235,765
prior_mean_user = results_u2_final.explanation_mean_user.mean() prior_mean_user<drop_column>
traintest_drop3.loc[(traintest_drop3['Title']=='Mrs')&(traintest_drop3['Age'].isna()),'Age'] = mrs_nan_median
Titanic - Machine Learning from Disaster
9,235,765
train.drop(['timestamp', 'content_type_id', 'question_id', 'part'], axis=1, inplace=True )<groupby>
miss_nan_median = np.median(traintest_drop3[traintest_drop3['Title']=='Miss']['Age'].dropna().values) miss_nan_median
Titanic - Machine Learning from Disaster
9,235,765
print('The old length of the training set:') print(len(train)) validation = train.groupby('user_id' ).tail(10) train = train[~train.index.isin(validation.index)] print('The length of the training set plus the length of the validation set:') print(len(train)+ len(validation))<groupby>
traintest_drop3.loc[(traintest_drop3['Title']=='Miss')&(traintest_drop3['Age'].isna()),'Age'] = miss_nan_median
Titanic - Machine Learning from Disaster
9,235,765
results_u_val = train[['user_id','answered_correctly']].groupby(['user_id'] ).agg(['mean']) results_u_val.columns = ['answered_correctly_user'] results_u2_val = train[['user_id','prior_question_had_explanation']].groupby(['user_id'] ).agg(['mean']) results_u2_val.columns = ['explanation_mean_user']<groupby>
traintest_drop3['Age'].isna().sum()
Titanic - Machine Learning from Disaster
9,235,765
X = train.groupby('user_id' ).tail(30) train = train[~train.index.isin(X.index)] print('The length of the training set plus the length of the validation set plus the length of the set to be discarded:') print(len(X)+ len(validation)+ len(train))<groupby>
still_nan = np.where(traintest_drop3['Age'].isna())[0].tolist() still_nan
Titanic - Machine Learning from Disaster
9,235,765
results_u_X = train[['user_id','answered_correctly']].groupby(['user_id'] ).agg(['mean']) results_u_X.columns = ['answered_correctly_user'] results_u2_X = train[['user_id','prior_question_had_explanation']].groupby(['user_id'] ).agg(['mean']) results_u2_X.columns = ['explanation_mean_user']<drop_column>
traintest_drop3.iloc[still_nan]
Titanic - Machine Learning from Disaster
9,235,765
del(train )<merge>
traintest_drop3.loc[(traintest_drop3['Title']=='Ms')&(traintest_drop3['Age'].isna()),'Age'] = mrs_nan_median
Titanic - Machine Learning from Disaster
9,235,765
X = pd.merge(X, group3, left_on=['task_container_id'], right_index= True, how="left") X = pd.merge(X, results_u_X, on=['user_id'], how="left") X = pd.merge(X, results_u2_X, on=['user_id'], how="left" )<merge>
traintest_drop3.loc[(traintest_drop3['Title']=='Dr')&(traintest_drop3['Age'].isna()),'Age'] = mr_nan_median
Titanic - Machine Learning from Disaster
9,235,765
validation = pd.merge(validation, group3, left_on=['task_container_id'], right_index= True, how="left") validation = pd.merge(validation, results_u_val, on=['user_id'], how="left") validation = pd.merge(validation, results_u2_val, on=['user_id'], how="left" )<categorify>
fare_nan = np.where(traintest_drop3['Fare'].isna())[0].tolist()
Titanic - Machine Learning from Disaster
9,235,765
lb_make = LabelEncoder() X.prior_question_had_explanation.fillna(False, inplace = True) validation.prior_question_had_explanation.fillna(False, inplace = True) validation["prior_question_had_explanation_enc"] = lb_make.fit_transform(validation["prior_question_had_explanation"]) X["prior_question_had_explanation_enc"] = lb_make.fit_transform(X["prior_question_had_explanation"] )<categorify>
traintest_drop3.iloc[fare_nan]
Titanic - Machine Learning from Disaster
9,235,765
question2.quest_pct = question2.quest_pct.mask(( question2['count'] < 3),.65) question2.quest_pct = question2.quest_pct.mask(( question2.quest_pct <.2)&(question2['count'] < 21),.2) question2.quest_pct = question2.quest_pct.mask(( question2.quest_pct >.95)&(question2['count'] < 21),.95 )<merge>
pclass3_median = np.median(traintest_drop3[traintest_drop3['Pclass']==3]['Fare'].dropna().values) pclass3_median
Titanic - Machine Learning from Disaster
9,235,765
X = pd.merge(X, question2, left_on = 'content_id', right_on = 'question_id', how = 'left') validation = pd.merge(validation, question2, left_on = 'content_id', right_on = 'question_id', how = 'left' )<prepare_x_and_y>
traintest_drop3.loc[(traintest_drop3['Fare'].isna()),'Fare'] = pclass3_median
Titanic - Machine Learning from Disaster
9,235,765
y = X['answered_correctly'] X = X.drop(['answered_correctly'], axis=1) X.head() y_val = validation['answered_correctly'] X_val = validation.drop(['answered_correctly'], axis=1 )<drop_column>
traintest_drop3['Fare'].isna().sum()
Titanic - Machine Learning from Disaster
9,235,765
X = X[['answered_correctly_user', 'explanation_mean_user', 'quest_pct', 'avg_questions_seen', 'prior_question_elapsed_time','prior_question_had_explanation_enc', 'part']] X_val = X_val[['answered_correctly_user', 'explanation_mean_user', 'quest_pct', 'avg_questions_seen', 'prior_question_elapsed_time','prior_question_had_explanation_enc', 'part']]<data_type_conversions>
emb_nan = np.where(traintest_drop3['Embarked'].isna())[0].tolist() traintest_drop3.iloc[emb_nan]
Titanic - Machine Learning from Disaster
9,235,765
X['answered_correctly_user'].fillna(0.65, inplace=True) X['explanation_mean_user'].fillna(prior_mean_user, inplace=True) X['quest_pct'].fillna(content_mean, inplace=True) X['part'].fillna(4, inplace = True) X['avg_questions_seen'].fillna(1, inplace = True) X['prior_question_elapsed_time'].fillna(elapsed_mean, inplace = True) X['prior_question_had_explanation_enc'].fillna(0, inplace = True )<data_type_conversions>
traintest_drop3[traintest_drop3['Fare']==80.0]
Titanic - Machine Learning from Disaster