kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
9,877,320
X_all = pd.get_dummies(all_data) X_train = X_all[:len(y_train)] X_test = X_all[len(y_train):] X_train<import_modules>
for dataset in train_test_data: dataset['Sex'] = dataset['Sex'].map({'female': 1, 'male': 0} ).astype(int )
Titanic - Machine Learning from Disaster
9,877,320
kf = KFold(n_splits=8, random_state=42, shuffle=True) def cv_rmse(model): return -cross_val_score(model, X_train, y_train, scoring='neg_root_mean_squared_error', cv=kf) models = ['Linear', 'SVR', 'Random_Forest', 'XGBR', 'Cat_Boost', 'Ridge', 'Elastic_Net', 'Lasso', 'Stack'] scores = [] lin = LinearRegression() score_lin = cv_rmse(lin) scores.append(score_lin.mean()) svr = SVR() score_svr = cv_rmse(svr) scores.append(score_svr.mean()) rfr = RandomForestRegressor() score_rfr = cv_rmse(rfr) scores.append(score_rfr.mean()) xgb = xg.XGBRegressor() score_xgb = cv_rmse(xgb) scores.append(score_xgb.mean()) catb = CatBoostRegressor(verbose=0, allow_writing_files=False) score_catb = cv_rmse(catb) scores.append(score_catb.mean()) rid = Ridge() score_rid = cv_rmse(rid) scores.append(score_rid.mean()) el = ElasticNet() score_el = cv_rmse(el) scores.append(score_el.mean()) las = Lasso() score_las = cv_rmse(las) scores.append(score_las.mean()) stack_gen = StackingRegressor(regressors=(CatBoostRegressor(verbose=0, allow_writing_files=False), Ridge() , xg.XGBRegressor() , RandomForestRegressor()), meta_regressor=CatBoostRegressor(verbose=0, allow_writing_files=False), use_features_in_secondary=True) score_stack_gen = cv_rmse(stack_gen) scores.append(score_stack_gen.mean()) cv_score = pd.DataFrame(models, columns=['Regressors']) cv_score['RMSE_mean'] = scores cv_score<train_on_grid>
train.Embarked.value_counts()
Titanic - Machine Learning from Disaster
9,877,320
predictions = {} def xgbr(X_train, y_train, X_test): xgbrM = xg.XGBRegressor() params = {'max_depth': [3, 4, 5, 6, 7, 8], 'min_child_weight': [0, 4, 5, 6, 7, 8], 'learning_rate': [0.01, 0.05, 0.1, 0.2, 0.25, 0.8, 1], 'n_estimators': [10, 30, 50, 100, 200, 400, 1000]} grid_search_xg = RandomizedSearchCV(estimator=xgbrM, scoring='neg_root_mean_squared_error', param_distributions=params, n_iter=200, cv=4, verbose=2, random_state=42, n_jobs=-1) grid_search_xg.fit(X_train, y_train) xgbrModel = grid_search_xg.best_estimator_ print('Best params(XGBR):',grid_search_xg.best_params_) print('RMSE(XGBR):', -grid_search_xg.best_score_) return xgbrModel xgbrModel = xg.XGBRegressor(n_estimators=400, min_child_weight=5, max_depth=7, learning_rate=0.05) xgbrModel.fit(X_train, y_train) predictions['XGBR'] = xgbrModel.predict(X_test )<train_on_grid>
for dataset in train_test_data: dataset['Embarked'] = dataset['Embarked'].fillna('S' )
Titanic - Machine Learning from Disaster
9,877,320
def ridge(X_train, y_train, X_test): alpha_ridge = {'alpha': [-3, -2, -1, 1e-15, 1e-10, 1e-8, 1e-5, 1e-4, 1e-3, 1e-2, 0.5, 1, 1.5, 2, 3, 4, 5, 10, 20, 30, 40]} rd = Ridge() grid_search_rd = GridSearchCV(estimator=rd, scoring='neg_root_mean_squared_error', param_grid=alpha_ridge, cv=4, n_jobs=-1, verbose=3) grid_search_rd.fit(X_train, y_train) ridgeModel = grid_search_rd.best_estimator_ print('Best params(Ridge):', grid_search_rd.best_params_) print('RMSE(Ridge):', -grid_search_rd.best_score_) return ridgeModel ridgeModel = Ridge(alpha=10) ridgeModel.fit(X_train, y_train) predictions['Ridge'] = ridgeModel.predict(X_test )<train_on_grid>
for dataset in train_test_data: dataset['Embarked'] = dataset['Embarked'].map({'S': 0, 'C': 1, 'Q': 2} ).astype(int )
Titanic - Machine Learning from Disaster
9,877,320
def catBoost(X_train, y_train, X_test): catM = CatBoostRegressor(verbose=0, allow_writing_files=False) params = {'learning_rate': [0.01, 0.05, 0.005, 0.0005], 'depth': [4, 6, 10], 'l2_leaf_reg': [1, 2, 3, 5, 9]} grid_search_cat = RandomizedSearchCV(estimator=catM, scoring='neg_root_mean_squared_error', param_distributions=params, n_iter=10, cv=4, verbose=2, random_state=42, n_jobs=-1) grid_search_cat.fit(X_train, y_train) catModel = grid_search_cat.best_estimator_ print('Best params(CatBoost):',grid_search_cat.best_params_) print('RMSE(CatBoost):', -grid_search_cat.best_score_) return catModel catModel = CatBoostRegressor(verbose=0, allow_writing_files=False, learning_rate=0.05, l2_leaf_reg=2, depth=4) catModel.fit(X_train, y_train) predictions['CatBoost'] = catModel.predict(X_test )<define_variables>
for dataset in train_test_data: age_avg = dataset['Age'].mean() age_std = dataset['Age'].std() age_null_count = dataset['Age'].isnull().sum() age_null_random_list = np.random.randint(age_avg - age_std, age_avg + age_std, size=age_null_count) dataset['Age'][np.isnan(dataset['Age'])] = age_null_random_list dataset['Age'] = dataset['Age'].astype(int) train['AgeBand'] = pd.cut(train['Age'], 5) print(train[['AgeBand', 'Survived']].groupby(['AgeBand'], as_index=False ).mean() )
Titanic - Machine Learning from Disaster
9,877,320
final_prediction = 0.25 * predictions['XGBR'] + 0.35 * predictions['CatBoost'] + 0.4 * predictions['Ridge']<save_to_csv>
for dataset in train_test_data: dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0 dataset.loc[(dataset['Age'] > 16)&(dataset['Age'] <= 32), 'Age'] = 1 dataset.loc[(dataset['Age'] > 32)&(dataset['Age'] <= 48), 'Age'] = 2 dataset.loc[(dataset['Age'] > 48)&(dataset['Age'] <= 64), 'Age'] = 3 dataset.loc[ dataset['Age'] > 64, 'Age'] = 4
Titanic - Machine Learning from Disaster
9,877,320
result = pd.DataFrame([len(y_train)+ 1 + i for i in range(len(X_test)) ], columns=['Id']) result[target_name] = np.expm1(final_prediction) result.to_csv('result.csv', index=False, header=True) result<set_options>
for dataset in train_test_data: dataset['Fare'] = dataset['Fare'].fillna(train['Fare'].median() )
Titanic - Machine Learning from Disaster
9,877,320
warnings.filterwarnings('ignore') %matplotlib inline<load_from_csv>
for dataset in train_test_data: dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0 dataset.loc[(dataset['Fare'] > 7.91)&(dataset['Fare'] <= 14.454), 'Fare'] = 1 dataset.loc[(dataset['Fare'] > 14.454)&(dataset['Fare'] <= 31), 'Fare'] = 2 dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3 dataset['Fare'] = dataset['Fare'].astype(int )
Titanic - Machine Learning from Disaster
9,877,320
df_train=pd.read_csv('.. /input/house-prices-advanced-regression-techniques/train.csv') df_train.shape<load_from_csv>
for dataset in train_test_data: dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1 print(train[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False ).mean() )
Titanic - Machine Learning from Disaster
9,877,320
df_test=pd.read_csv('.. /input/house-prices-advanced-regression-techniques/test.csv') df_test.shape<concatenate>
for dataset in train_test_data: dataset['IsAlone'] = 0 dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1 print(train[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=False ).mean() )
Titanic - Machine Learning from Disaster
9,877,320
train_test=pd.concat([df_train,df_test],axis=0,sort=False) train_test.head()<define_variables>
features_drop = ['Name', 'SibSp', 'Parch', 'Ticket', 'Cabin', 'FamilySize'] train = train.drop(features_drop, axis=1) test = test.drop(features_drop, axis=1) train = train.drop(['PassengerId', 'AgeBand', 'FareBand'], axis=1 )
Titanic - Machine Learning from Disaster
9,877,320
sales= train_test['SalePrice'] Id= train_test['Id']<drop_column>
X_train = train.drop('Survived', axis=1) y_train = train['Survived'] X_test = test.drop("PassengerId", axis=1 ).copy() X_train.shape, y_train.shape, X_test.shape
Titanic - Machine Learning from Disaster
9,877,320
train_test= train_test.drop(columns=['SalePrice']) train_test= train_test.drop(columns=['Id']) train_test.head()<sort_values>
from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC, LinearSVC from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.naive_bayes import GaussianNB from sklearn.linear_model import Perceptron from sklearn.linear_model import SGDClassifier
Titanic - Machine Learning from Disaster
9,877,320
total = train_test.isnull().sum().sort_values(ascending=False) percent =(train_test.isnull().sum() /train_test.isnull().count() ).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) missing_data.head(20 )<filter>
clf = LogisticRegression() clf.fit(X_train, y_train) y_pred_log_reg = clf.predict(X_test) acc_log_reg = round(clf.score(X_train, y_train)* 100, 2) print("Train Accuracy: " + str(acc_log_reg)+ '%' )
Titanic - Machine Learning from Disaster
9,877,320
df_numeric=train_test.select_dtypes(include=np.number) df_numeric<sort_values>
clf = SVC() clf.fit(X_train, y_train) y_pred_svc = clf.predict(X_test) acc_svc = round(clf.score(X_train, y_train)* 100, 2) print("Train Accuracy: " + str(acc_svc)+ '%' )
Titanic - Machine Learning from Disaster
9,877,320
total = df_numeric.isnull().sum().sort_values(ascending=False) percent =(df_numeric.isnull().sum() /df_numeric.isnull().count() ).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) missing_data.head(5 )<sort_values>
clf = LinearSVC() clf.fit(X_train, y_train) y_pred_linear_svc = clf.predict(X_test) acc_linear_svc = round(clf.score(X_train, y_train)* 100, 2) print("Train Accuracy: " + str(acc_linear_svc)+ '%' )
Titanic - Machine Learning from Disaster
9,877,320
total = df_obj.isnull().sum().sort_values(ascending=False) percent =(df_obj.isnull().sum() /df_obj.isnull().count() ).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) missing_data.head(5 )<drop_column>
clf = KNeighborsClassifier(n_neighbors = 3) clf.fit(X_train, y_train) y_pred_knn = clf.predict(X_test) acc_knn = round(clf.score(X_train, y_train)* 100, 2) print("Train Accuracy: " + str(acc_knn)+ '%' )
Titanic - Machine Learning from Disaster
9,877,320
df_obj = df_obj.drop(columns=['PoolQC','MiscFeature','Alley','Fence','FireplaceQu']) df_obj<data_type_conversions>
clf = DecisionTreeClassifier() clf.fit(X_train, y_train) y_pred_decision_tree = clf.predict(X_test) acc_decision_tree = round(clf.score(X_train, y_train)* 100, 2) print("Train Accuracy: " + str(acc_decision_tree)+ '%' )
Titanic - Machine Learning from Disaster
9,877,320
for column in df_obj.columns: df_obj[column] = df_obj[column].astype('category') df_obj[column] = df_obj[column].cat.codes<concatenate>
clf = RandomForestClassifier(n_estimators=100) clf.fit(X_train, y_train) y_pred_random_forest = clf.predict(X_test) acc_random_forest = round(clf.score(X_train, y_train)* 100, 2) print("Train Accuracy: " + str(acc_random_forest)+ '%' )
Titanic - Machine Learning from Disaster
9,877,320
df_new = pd.concat([df_numeric, df_obj], axis=1) df_new<concatenate>
clf = GaussianNB() clf.fit(X_train, y_train) y_pred_gnb = clf.predict(X_test) acc_gnb = round(clf.score(X_train, y_train)* 100, 2) print("Train Accuracy: " + str(acc_gnb)+ '%' )
Titanic - Machine Learning from Disaster
9,877,320
df1 = pd.concat([df_new, sales], axis=1) df2 = pd.concat([Id , df1], axis=1) df2<prepare_x_and_y>
clf = Perceptron(max_iter=5, tol=None) clf.fit(X_train, y_train) y_pred_perceptron = clf.predict(X_test) acc_perceptron = round(clf.score(X_train, y_train)* 100, 2) print("Train Accuracy: " + str(acc_perceptron)+ '%' )
Titanic - Machine Learning from Disaster
9,877,320
X= train.drop('SalePrice', axis=1) y = train['SalePrice']<import_modules>
clf = SGDClassifier(max_iter=5, tol=None) clf.fit(X_train, y_train) y_pred_sgd = clf.predict(X_test) acc_sgd = round(clf.score(X_train, y_train)* 100, 2) print("Train Accuracy: " + str(acc_sgd)+ '%' )
Titanic - Machine Learning from Disaster
9,877,320
from sklearn.ensemble import GradientBoostingRegressor from sklearn.model_selection import cross_val_score<choose_model_class>
clf = RandomForestClassifier(n_estimators=100) clf.fit(X_train, y_train) y_pred_random_forest_training_set = clf.predict(X_train) acc_random_forest = round(clf.score(X_train, y_train)* 100, 2) print("Accuracy: %i %% "%acc_random_forest) class_names = ['Survived', 'Not Survived'] cnf_matrix = confusion_matrix(y_train, y_pred_random_forest_training_set) np.set_printoptions(precision=2) print('Confusion Matrix in Numbers') print(cnf_matrix) print('') cnf_matrix_percent = cnf_matrix.astype('float')/ cnf_matrix.sum(axis=1)[:, np.newaxis] print('Confusion Matrix in Percentage') print(cnf_matrix_percent) print('') true_class_names = ['True Survived', 'True Not Survived'] predicted_class_names = ['Predicted Survived', 'Predicted Not Survived'] df_cnf_matrix = pd.DataFrame(cnf_matrix, index = true_class_names, columns = predicted_class_names) df_cnf_matrix_percent = pd.DataFrame(cnf_matrix_percent, index = true_class_names, columns = predicted_class_names) plt.figure(figsize =(15,5)) plt.subplot(121) sns.heatmap(df_cnf_matrix, annot=True, fmt='d', cmap = "Blues") plt.subplot(122) sns.heatmap(df_cnf_matrix_percent, annot=True, cmap = "Blues" )
Titanic - Machine Learning from Disaster
9,877,320
GBoost = GradientBoostingRegressor(n_estimators=3000, learning_rate=0.05, max_depth=4, max_features='sqrt', min_samples_leaf=15, min_samples_split=10, loss='huber', random_state =5 )<train_model>
models = pd.DataFrame({ 'Model': ['LR', 'SVM', 'L-SVC', 'KNN', 'DTree', 'RF', 'NB', 'Perceptron', 'SGD'], 'Score': [acc_log_reg, acc_svc, acc_linear_svc, acc_knn, acc_decision_tree, acc_random_forest, acc_gnb, acc_perceptron, acc_sgd] }) models = models.sort_values(by='Score', ascending=False) models
Titanic - Machine Learning from Disaster
9,877,320
<compute_train_metric><EOS>
submission = pd.DataFrame({ "PassengerId": test["PassengerId"], "Survived": y_pred_random_forest }) submission.to_csv('gender_submission.csv', index=False )
Titanic - Machine Learning from Disaster
9,501,907
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<compute_test_metric>
%matplotlib inline warnings.filterwarnings('ignore') for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
Titanic - Machine Learning from Disaster
9,501,907
print("train score: ", GBoost.score(X,y))<load_from_csv>
data_tittrain=pd.read_csv(".. /input/titanic/train.csv") data_tittest=pd.read_csv(".. /input/titanic/test.csv") data_sample=pd.read_csv(".. /input/titanic/gender_submission.csv" )
Titanic - Machine Learning from Disaster
9,501,907
submission = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/sample_submission.csv' )<predict_on_test>
data_sample.head()
Titanic - Machine Learning from Disaster
9,501,907
y_pred = GBoost.predict(test )<prepare_output>
merged = pd.concat([data_tittrain,data_tittest] )
Titanic - Machine Learning from Disaster
9,501,907
submission['SalePrice'] = y_pred submission.head(10 )<save_to_csv>
data_tittrain["Sex"].value_counts()
Titanic - Machine Learning from Disaster
9,501,907
submission.to_csv('submission.csv', index=False )<save_to_csv>
data_tittest["Sex"].value_counts()
Titanic - Machine Learning from Disaster
9,501,907
submission.to_csv('submission.csv', index=False )<import_modules>
data_tittrain["Pclass"].value_counts()
Titanic - Machine Learning from Disaster
9,501,907
from tqdm import tqdm<load_from_csv>
data_tittest['Pclass'].value_counts()
Titanic - Machine Learning from Disaster
9,501,907
train = pd.read_csv('.. /input/ames-housing-dataset/AmesHousing.csv') train.drop(['PID'], axis=1, inplace=True) origin = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/train.csv') train.columns = origin.columns test = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/test.csv') submission = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/sample_submission.csv') print('Train:{} Test:{}'.format(train.shape,test.shape))<drop_column>
data_tittrain = data_tittrain.drop(columns=['Name','Cabin']) data_tittrain['family_member'] = data_tittrain['SibSp'] + data_tittrain['Parch'] data_tittrain = data_tittrain.drop(columns=['SibSp', 'Parch'] )
Titanic - Machine Learning from Disaster
9,501,907
missing = test.isnull().sum() missing = missing[missing>0] train.drop(missing.index, axis=1, inplace=True) train.drop(['Electrical'], axis=1, inplace=True) test.dropna(axis=1, inplace=True) test.drop(['Electrical'], axis=1, inplace=True )<feature_engineering>
data_tittest = data_tittest.drop(columns=['Name','Cabin']) data_tittest['family_member'] = data_tittest['SibSp'] + data_tittest['Parch'] data_tittest = data_tittest.drop(columns=['SibSp', 'Parch'] )
Titanic - Machine Learning from Disaster
9,501,907
l_test = tqdm(range(0, len(test)) , desc='Matching') for i in l_test: for j in range(0, len(train)) : for k in range(1, len(test.columns)) : if test.iloc[i,k] == train.iloc[j,k]: continue else: break else: submission.iloc[i, 1] = train.iloc[j, -1] break l_test.close()<save_to_csv>
X = data_tittrain.drop(columns=['Survived']) Y = data_tittrain['Survived'] cate_features_index = np.where(X.dtypes != float)[0]
Titanic - Machine Learning from Disaster
9,501,907
submission.to_csv('result-with-best.csv', index=False )<install_modules>
from catboost import CatBoostClassifier, cv, Pool from sklearn.model_selection import train_test_split
Titanic - Machine Learning from Disaster
9,501,907
!pip install.. /input/python-datatable/datatable-0.11.0-cp37-cp37m-manylinux2010_x86_64.whl > /dev/null 2>&1<set_options>
xtrain,xtest,ytrain,ytest = train_test_split(X,Y,train_size=0.82,random_state=42 )
Titanic - Machine Learning from Disaster
9,501,907
np.seterr(divide = 'ignore', invalid = 'ignore' )<define_variables>
clf =CatBoostClassifier(eval_metric='Accuracy',use_best_model=True,random_seed=42 )
Titanic - Machine Learning from Disaster
9,501,907
data_types_dict = { 'user_id': 'int32', 'content_id': 'int16', 'answered_correctly': 'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'bool' } target = 'answered_correctly'<load_from_csv>
clf.fit(xtrain,ytrain,cat_features=cate_features_index,eval_set=(xtest,ytest), early_stopping_rounds=50 )
Titanic - Machine Learning from Disaster
9,501,907
train_df = dt.fread('.. /input/riiid-test-answer-prediction/train.csv', columns = set(data_types_dict.keys())).to_pandas()<train_model>
test_id = data_tittest.PassengerId data_tittest.isnull().sum()
Titanic - Machine Learning from Disaster
9,501,907
print('Training dataset detailed information') print('*' * 50) print('Columns:', train_df.columns) print('*' * 50) print('Shape:', train_df.shape) print('*' * 50) print('NA values in each column:', sum(train_df.isna().sum())) print('*' * 50 )<data_type_conversions>
prediction = clf.predict(data_tittest )
Titanic - Machine Learning from Disaster
9,501,907
train_df = train_df[train_df[target] != -1].reset_index(drop = True, inplace = False) train_df['prior_question_had_explanation'].fillna(False, inplace = True) train_df = train_df.astype(data_types_dict )<feature_engineering>
df_sub = pd.DataFrame() df_sub['PassengerId'] = test_id df_sub['Survived'] = prediction.astype(np.int) df_sub.to_csv('gender_submission.csv', index=False )
Titanic - Machine Learning from Disaster
9,501,907
train_df['lag'] = train_df.groupby('user_id')[target].shift() cum = train_df.groupby('user_id')['lag'].agg(['cumsum', 'cumcount']) train_df['user_correctness'] = cum['cumsum'] / cum['cumcount'] train_df.drop(columns = ['lag'], inplace = True )<groupby>
%matplotlib inline warnings.filterwarnings('ignore') for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
Titanic - Machine Learning from Disaster
9,501,907
user_agg = train_df.groupby('user_id')[target].agg(['sum', 'count']) content_agg = train_df.groupby('content_id')[target].agg(['sum', 'count'] )<groupby>
data_tittrain=pd.read_csv(".. /input/titanic/train.csv") data_tittest=pd.read_csv(".. /input/titanic/test.csv") data_sample=pd.read_csv(".. /input/titanic/gender_submission.csv" )
Titanic - Machine Learning from Disaster
9,501,907
train_df = train_df.groupby('user_id' ).tail(24 ).reset_index(drop = True )<load_from_csv>
data_sample.head()
Titanic - Machine Learning from Disaster
9,501,907
questions_df = pd.read_csv( '.. /input/riiid-test-answer-prediction/questions.csv', usecols = [0, 3], dtype = {'question_id': 'int16', 'part': 'int8'} ) train_df = pd.merge(train_df, questions_df, left_on = 'content_id', right_on = 'question_id', how = 'left') train_df.drop(columns = ['question_id'], inplace = True )<data_type_conversions>
merged = pd.concat([data_tittrain,data_tittest] )
Titanic - Machine Learning from Disaster
9,501,907
train_df['content_count'] = train_df['content_id'].map(content_agg['count'] ).astype('int32') train_df['content_id'] = train_df['content_id'].map(content_agg['sum'] / content_agg['count'] )<drop_column>
data_tittrain["Sex"].value_counts()
Titanic - Machine Learning from Disaster
9,501,907
valid_df = train_df.groupby('user_id' ).tail(6) train_df.drop(valid_df.index, inplace = True )<init_hyperparams>
data_tittest["Sex"].value_counts()
Titanic - Machine Learning from Disaster
9,501,907
features = ['content_id', 'prior_question_elapsed_time', 'prior_question_had_explanation', 'user_correctness', 'part', 'content_count'] params = { 'loss_function': 'Logloss', 'eval_metric': 'AUC', 'task_type': 'GPU' if torch.cuda.is_available() else 'CPU', 'grow_policy': 'Lossguide', 'iterations': 2500, 'learning_rate': 4e-2, 'random_seed': 0, 'l2_leaf_reg': 1e-1, 'depth': 15, 'max_leaves': 10, 'border_count': 128, 'verbose': 50, }<define_variables>
data_tittrain["Pclass"].value_counts()
Titanic - Machine Learning from Disaster
9,501,907
train_set = Pool(train_df[features], label = train_df[target]) val_set = Pool(valid_df[features], label = valid_df[target] )<train_model>
data_tittest['Pclass'].value_counts()
Titanic - Machine Learning from Disaster
9,501,907
model = CatBoostClassifier(**params) model.fit(train_set, eval_set = val_set, use_best_model = True )<data_type_conversions>
data_tittrain = data_tittrain.drop(columns=['Name','Cabin']) data_tittrain['family_member'] = data_tittrain['SibSp'] + data_tittrain['Parch'] data_tittrain = data_tittrain.drop(columns=['SibSp', 'Parch'] )
Titanic - Machine Learning from Disaster
9,501,907
user_sum_dict = user_agg['sum'].astype('int16' ).to_dict(defaultdict(int)) user_count_dict = user_agg['count'].astype('int16' ).to_dict(defaultdict(int)) content_sum_dict = content_agg['sum'].astype('int32' ).to_dict(defaultdict(int)) content_count_dict = content_agg['count'].astype('int32' ).to_dict(defaultdict(int))<split>
data_tittest = data_tittest.drop(columns=['Name','Cabin']) data_tittest['family_member'] = data_tittest['SibSp'] + data_tittest['Parch'] data_tittest = data_tittest.drop(columns=['SibSp', 'Parch'] )
Titanic - Machine Learning from Disaster
9,501,907
try: env = riiideducation.make_env() except: pass iter_test = env.iter_test() prior_test_df = None<prepare_output>
X = data_tittrain.drop(columns=['Survived']) Y = data_tittrain['Survived'] cate_features_index = np.where(X.dtypes != float)[0]
Titanic - Machine Learning from Disaster
9,501,907
%%time for(test_df, sample_prediction_df)in iter_test: if prior_test_df is not None: prior_test_df[target] = eval(test_df['prior_group_answers_correct'].iloc[0]) prior_test_df = prior_test_df[prior_test_df[target] != -1].reset_index(drop = True) user_ids = prior_test_df['user_id'].values content_ids = prior_test_df['content_id'].values targets = prior_test_df[target].values for user_id, content_id, answered_correctly in zip(user_ids, content_ids, targets): user_sum_dict[user_id] += answered_correctly user_count_dict[user_id] += 1 content_sum_dict[content_id] += answered_correctly content_count_dict[content_id] += 1 prior_test_df = test_df.copy() test_df = test_df[test_df['content_type_id'] == 0].reset_index(drop = True) test_df = pd.merge(test_df, questions_df, left_on = 'content_id', right_on = 'question_id', how = 'left') test_df['prior_question_had_explanation'] = test_df['prior_question_had_explanation'].fillna(False ).astype('bool') user_sum = np.zeros(len(test_df), dtype = np.int16) user_count = np.zeros(len(test_df), dtype = np.int16) content_sum = np.zeros(len(test_df), dtype = np.int32) content_count = np.zeros(len(test_df), dtype = np.int32) for i,(user_id, content_id)in enumerate(zip(test_df['user_id'].values, test_df['content_id'].values)) : user_sum[i] = user_sum_dict[user_id] user_count[i] = user_count_dict[user_id] content_sum[i] = content_sum_dict[content_id] content_count[i] = content_count_dict[content_id] test_df['user_correctness'] = user_sum / user_count test_df['content_count'] = content_count test_df['content_id'] = content_sum / content_count test_df[target] = model.predict_proba(test_df[features])[:,1] env.predict(test_df[['row_id', target]] )<import_modules>
from catboost import CatBoostClassifier, cv, Pool from sklearn.model_selection import train_test_split
Titanic - Machine Learning from Disaster
9,501,907
import optuna from sklearn.feature_selection import RFECV from lightgbm import LGBMClassifier import pandas as pd import joblib import numpy as np import riiideducation from sklearn.metrics import roc_auc_score from sklearn.model_selection import train_test_split import optuna.integration.lightgbm as lgb from optuna.samplers import TPESampler import matplotlib.pyplot as plt<load_from_csv>
xtrain,xtest,ytrain,ytest = train_test_split(X,Y,train_size=0.82,random_state=42 )
Titanic - Machine Learning from Disaster
9,501,907
questions_df = pd.read_csv( '.. /input/riiid-test-answer-prediction/questions.csv', usecols=[0, 3], dtype={'question_id': 'int16', 'part': 'int8'} ) model = joblib.load('.. /input/lgb-model-2/Final_lgb_2.joblib') selector = joblib.load('.. /input/selector-2/Selector_2.joblib') user_answers_df = pd.read_csv('.. /input/preprocessingcontentuser/user.csv') content_answers_df = pd.read_csv('.. /input/preprocessingcontentuser/content.csv' )<define_variables>
clf =CatBoostClassifier(eval_metric='Accuracy',use_best_model=True,random_seed=42 )
Titanic - Machine Learning from Disaster
9,501,907
features = [ 'timestamp', 'user_id', 'content_id', 'prior_question_elapsed_time', 'prior_question_had_explanation', 'part', 'mean_user_accuracy', 'questions_skew', 'questions_std', 'questions_var', 'questions_sem', 'content_mean', 'content_skew', 'content_std', 'content_var', 'content_sem' ] features = [features[i] for i in range(len(selector.support_)) if selector.support_[i] == True]<split>
clf.fit(xtrain,ytrain,cat_features=cate_features_index,eval_set=(xtest,ytest), early_stopping_rounds=50 )
Titanic - Machine Learning from Disaster
9,501,907
env = riiideducation.make_env() iter_test = env.iter_test()<merge>
test_id = data_tittest.PassengerId data_tittest.isnull().sum()
Titanic - Machine Learning from Disaster
9,501,907
for(test_df, sample_prediction_df)in iter_test: test_df = pd.merge(test_df, questions_df, left_on='content_id', right_on='question_id', how='left') test_df.drop(columns=['question_id'], inplace=True) test_df['prior_question_had_explanation'].fillna(bool(True), inplace=True) test_df = test_df.replace([-np.inf, np.inf], np.nan) test_df = test_df.fillna(test_df.mean()) test_df = test_df[test_df['content_type_id'] != 1] test_df = test_df.merge(user_answers_df, how='left', on='user_id') test_df = test_df.merge(content_answers_df, how='left', on='content_id') test_df['prior_question_had_explanation'] = test_df['prior_question_had_explanation'].astype(bool) test_df = test_df.replace([-np.inf, np.inf], np.nan) test_df = test_df.fillna(test_df.mean()) test_df['answered_correctly'] = model.predict_proba(test_df[features])[:,1] env.predict(test_df.loc[test_df['content_type_id'] == 0, ['row_id', 'answered_correctly']]) <import_modules>
prediction = clf.predict(data_tittest )
Titanic - Machine Learning from Disaster
9,501,907
import numpy as np import pandas as pd <set_options>
df_sub = pd.DataFrame() df_sub['PassengerId'] = test_id df_sub['Survived'] = prediction.astype(np.int) df_sub.to_csv('gender_submission.csv', index=False )
Titanic - Machine Learning from Disaster
6,723,741
print(h2o.__version__) h2o.init(max_mem_size='16G' )<load_from_csv>
%matplotlib inline
Titanic - Machine Learning from Disaster
6,723,741
train = h2o.import_file(".. /input/melanoma-train-test-creator/train_meta_size_3.csv") test = h2o.import_file(".. /input/melanoma-train-test-creator/test_meta_size_3.csv" )<prepare_x_and_y>
train_data = pd.read_csv("/kaggle/input/titanic/train.csv") train_data.head()
Titanic - Machine Learning from Disaster
6,723,741
x = test.columns y = 'target'<feature_engineering>
test_data = pd.read_csv("/kaggle/input/titanic/test.csv" )
Titanic - Machine Learning from Disaster
6,723,741
train[y] = train[y].asfactor()<train_model>
Titanic - Machine Learning from Disaster
6,723,741
aml = H2OAutoML(max_models=10000, seed=47, max_runtime_secs=3600) aml.train(x=x, y=y, training_frame=train, fold_column="fold" )<find_best_params>
def age_fill(columns): age = columns[0] pclass = columns[1] if pd.isnull(age): if pclass == 1: return int(train_data[train_data['Pclass']==1]['Age'].mean()) elif pclass == 2: return int(train_data[train_data['Pclass']==2]['Age'].mean()) else: return int(train_data[train_data['Pclass']==3]['Age'].mean()) else: return age
Titanic - Machine Learning from Disaster
6,723,741
aml.leader<predict_on_test>
train_data['Age'] = train_data[['Age','Pclass']].apply(age_fill, axis=1 )
Titanic - Machine Learning from Disaster
6,723,741
preds = aml.predict(test )<load_from_csv>
Titanic - Machine Learning from Disaster
6,723,741
sample_submission = pd.read_csv('.. /input/siim-isic-melanoma-classification/sample_submission.csv') sample_submission.head()<save_to_csv>
train_data.drop('Cabin', axis=1, inplace=True )
Titanic - Machine Learning from Disaster
6,723,741
sample_submission['target'] = preds['p1'].as_data_frame().values sample_submission.to_csv('submission.csv', index=False )<load_from_csv>
train_data.dropna(inplace=True )
Titanic - Machine Learning from Disaster
6,723,741
def ensemble() : stacked_0 = pd.read_csv('/kaggle/input/siim-isic-melanoma-classification/sample_submission.csv') stacked_1 = pd.read_csv('/kaggle/input/melanoma-submissions2/submissionImage.csv') stacked_2 = pd.read_csv('/kaggle/input/melanoma-submissions2/submissionTabular.csv') stacked_4 = pd.read_csv('/kaggle/input/melanoma-submissions2/submission_multiple_data_source.csv') stacked_3 = pd.read_csv('.. /input/csv001/datasets_766461_1344911_submission_rank-then-blend.csv') stacked_5 = pd.read_csv('.. /input/csv001/submission0.9504.csv') stacked_6 = pd.read_csv('.. /input/csv001/submission_efnet.csv') stacked_7 = pd.read_csv('.. /input/csv001/submission_models_blended.csv') sub = pd.DataFrame() sub['image_name'] = stacked_0['image_name'] sub['target'] = np.exp(np.mean( [ stacked_1['target'].apply(lambda x: np.log2(x)) , \ stacked_2['target'].apply(lambda x: np.log2(x)) , \ stacked_4['target'].apply(lambda x: np.log2(x)) , \ stacked_3['target'].apply(lambda x: np.log2(x)) , stacked_5['target'].apply(lambda x: np.log2(x)) , \ stacked_6['target'].apply(lambda x: np.log2(x)) , \ stacked_7['target'].apply(lambda x: np.log2(x)) , ], axis=0)) sub.to_csv('submission.csv', index=False, float_format='%.6f' )<load_pretrained>
sex = pd.get_dummies(train_data['Sex'],drop_first=True) embark = pd.get_dummies(train_data['Embarked'],drop_first=True )
Titanic - Machine Learning from Disaster
6,723,741
ensemble() <load_from_csv>
train_data.drop(['Sex','Embarked', 'Name', 'Ticket'], axis=1, inplace=True )
Titanic - Machine Learning from Disaster
6,723,741
stacked_n = pd.read_csv('submission.csv') stacked_n.head()<install_modules>
train_data = pd.concat([train_data, sex, embark], axis=1 )
Titanic - Machine Learning from Disaster
6,723,741
!pip install efficientnet-pytorch<import_modules>
test_data['Age'] = test_data[['Age','Pclass']].apply(age_fill, axis=1 )
Titanic - Machine Learning from Disaster
6,723,741
from pathlib import Path import pandas as pd from torch.utils.data import Dataset,DataLoader from PIL import Image from torchvision import transforms as T import torch.nn as nn import torch import torch.nn.functional as F from sklearn.model_selection import GroupKFold import numpy as np from fastprogress.fastprogress import master_bar, progress_bar from sklearn.metrics import accuracy_score, roc_auc_score from efficientnet_pytorch import EfficientNet from torchvision import models import pdb import albumentations as A from albumentations.pytorch.transforms import ToTensor import matplotlib.pyplot as plt import pickle<define_variables>
def fill_fare(col): pclass = col[0] fare = col[1] if pd.isnull(fare): if pclass == 1: return int(train_data[train_data['Pclass']==1]['Fare'].mean()) elif pclass == 2: return int(train_data[train_data['Pclass']==2]['Fare'].mean()) else: return int(train_data[train_data['Pclass']==3]['Fare'].mean()) else: return fare
Titanic - Machine Learning from Disaster
6,723,741
path = Path('.. /input/jpeg-melanoma-256x256/') df_path = Path('.. /input/melanoma-256x256/') im_sz = 256 bs = 16<load_from_csv>
test_data['Fare'] = test_data[['Pclass','Fare']].apply(fill_fare, axis=1 )
Titanic - Machine Learning from Disaster
6,723,741
train_fnames = list_files(path/'train') df = pd.read_csv(df_path/'train.csv') df.head()<count_values>
test_data.drop('Cabin', axis=1, inplace=True )
Titanic - Machine Learning from Disaster
6,723,741
df.target.value_counts() ,df.shape<split>
sex = pd.get_dummies(test_data['Sex'],drop_first=True) embark = pd.get_dummies(test_data['Embarked'],drop_first=True )
Titanic - Machine Learning from Disaster
6,723,741
def get_train_val_split(df): df = df[df.tfrecord != -1].reset_index(drop=True) train_tf_records = list(range(len(df.tfrecord.unique())))[:12] split_cond = df.tfrecord.apply(lambda x: x in train_tf_records) train_df = df[split_cond].reset_index() valid_df = df[~split_cond].reset_index() return train_df,valid_df<categorify>
test_data.drop(['Sex','Embarked', 'Name', 'Ticket'], axis=1, inplace=True )
Titanic - Machine Learning from Disaster
6,723,741
class MelanomaDataset(Dataset): def __init__(self,df,im_path,transforms=None,is_test=False): self.df = df self.im_path = im_path self.transforms = transforms self.is_test = is_test def __getitem__(self,idx): img_path = f"{self.im_path}/{self.df.iloc[idx]['image_name']}.jpg" img = Image.open(img_path) if self.transforms: img = self.transforms(**{"image": np.array(img)})["image"] if self.is_test: return img target = self.df.iloc[idx]['target'] return img,torch.tensor([target],dtype=torch.float32) def __len__(self): return self.df.shape[0] <choose_model_class>
test_data = pd.concat([test_data, sex, embark], axis=1 )
Titanic - Machine Learning from Disaster
6,723,741
class MelanomaEfficientNet(nn.Module): def __init__(self,model_name='efficientnet-b0',pool_type=F.adaptive_avg_pool2d): super().__init__() self.pool_type = pool_type self.backbone = EfficientNet.from_pretrained(model_name) in_features = getattr(self.backbone,'_fc' ).in_features self.classifier = nn.Linear(in_features,1) def forward(self,x): features = self.pool_type(self.backbone.extract_features(x),1) features = features.view(x.size(0),-1) return self.classifier(features )<train_model>
y = train_data["Survived"] features = ['Pclass','Age','SibSp','Parch','Fare','male','Q','S'] X = train_data[features] X_test = test_data[features] model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1) model.fit(X, y) predictions = model.predict(X_test) output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions}) output.to_csv('my_submission.csv', index=False) print("Your submission was successfully saved!" )
Titanic - Machine Learning from Disaster
6,723,741
def get_device() : return torch.device("cuda")if torch.cuda.is_available() else torch.device("cpu") def get_model(model_name='efficientnet-b0',lr=1e-5,wd=0.01,freeze_backbone=False,opt_fn=torch.optim.AdamW,device=None): device = device if device else get_device() model = MelanomaEfficientNet(model_name=model_name) if freeze_backbone: for parameter in model.backbone.parameters() : parameter.requires_grad = False opt = opt_fn(model.parameters() ,lr=lr,weight_decay=wd) model = model.to(device) return model, opt def training_step(xb,yb,model,loss_fn,opt,device,scheduler): xb,yb = xb.to(device), yb.to(device) out = model(xb) opt.zero_grad() loss = loss_fn(out,yb) loss.backward() opt.step() scheduler.step() return loss.item() def validation_step(xb,yb,model,loss_fn,device): xb,yb = xb.to(device), yb.to(device) out = model(xb) loss = loss_fn(out,yb) out = torch.sigmoid(out) return loss.item() ,out def get_data(train_df,valid_df,train_tfms,test_tfms,bs): train_ds = MelanomaDataset(df=train_df,im_path=path/'train',transforms=train_tfms) valid_ds = MelanomaDataset(df=valid_df,im_path=path/'train',transforms=test_tfms) train_dl = DataLoader(dataset=train_ds,batch_size=bs,shuffle=True,num_workers=4) valid_dl = DataLoader(dataset=valid_ds,batch_size=bs*2,shuffle=False,num_workers=4) return train_dl,valid_dl<train_model>
LogModel = LogisticRegression() LogModel.fit(X,y )
Titanic - Machine Learning from Disaster
6,723,741
def fit(epochs,model,train_dl,valid_dl,opt,device=None,loss_fn=F.binary_cross_entropy_with_logits): device = device if device else get_device() scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(opt, len(train_dl)*epochs) val_rocs = [] mb = master_bar(range(epochs)) mb.write(['epoch','train_loss','valid_loss','val_roc'],table=True) for epoch in mb: trn_loss,val_loss = 0.0,0.0 val_preds = np.zeros(( len(valid_dl.dataset),1)) val_targs = np.zeros(( len(valid_dl.dataset),1)) model.train() for xb,yb in progress_bar(train_dl,parent=mb): trn_loss += training_step(xb,yb,model,loss_fn,opt,device,scheduler) trn_loss /= mb.child.total model.eval() with torch.no_grad() : for i,(xb,yb)in enumerate(progress_bar(valid_dl,parent=mb)) : loss,out = validation_step(xb,yb,model,loss_fn,device) val_loss += loss bs = xb.shape[0] val_preds[i*bs:i*bs+bs] = out.cpu().numpy() val_targs[i*bs:i*bs+bs] = yb.cpu().numpy() val_loss /= mb.child.total val_roc = roc_auc_score(val_targs.reshape(-1),val_preds.reshape(-1)) val_rocs.append(val_roc) mb.write([epoch,f'{trn_loss:.6f}',f'{val_loss:.6f}',f'{val_roc:.6f}'],table=True) return model,val_rocs<load_from_csv>
Logpred = LogModel.predict(X_test )
Titanic - Machine Learning from Disaster
6,723,741
<set_options><EOS>
output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': Logpred}) output.to_csv('my_submission.csv', index=False) print("Your submission was successfully saved!" )
Titanic - Machine Learning from Disaster
5,106,186
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<concatenate>
import numpy as np import pandas as pd import os from keras import models, layers, Sequential from sklearn.svm import SVC from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.tree import DecisionTreeClassifier
Titanic - Machine Learning from Disaster
5,106,186
test_tfms = A.Compose([ A.RandomRotate90(p=p), A.Flip(p=p), A.OneOf([ A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, ), A.HueSaturationValue( hue_shift_limit=20, sat_shift_limit=50, val_shift_limit=50) ], p=p), A.OneOf([ A.IAAAdditiveGaussianNoise() , A.GaussNoise() , ], p=p), ToTensor(normalize=imagenet_stats) ] )<load_from_csv>
train_df = pd.read_csv('.. /input/train.csv') train_df.head()
Titanic - Machine Learning from Disaster
5,106,186
test_df = pd.read_csv(path/'test.csv') model, opt = get_model(model_name='efficientnet-b5',lr=1e-4,wd=1e-4) model.load_state_dict(torch.load(f'.. /input/melanomaefficientnetb5/effb5.pth',map_location=device)) test_ds = MelanomaDataset(df=test_df,im_path=path/'test',transforms=test_tfms,is_test=True) test_dl = DataLoader(dataset=test_ds,batch_size=bs*2,shuffle=False,num_workers=4 )<predict_on_test>
test_df = pd.read_csv('.. /input/test.csv') test_df.head()
Titanic - Machine Learning from Disaster
5,106,186
def get_preds(model,device=None,tta=3): if device is None: device = torch.device("cuda")if torch.cuda.is_available() else torch.device("cpu") preds = np.zeros(len(test_ds)) for tta_id in range(tta): test_preds = [] with torch.no_grad() : for xb in test_dl: xb = xb.to(device) out = model(xb) out = torch.sigmoid(out) test_preds.extend(out.cpu().numpy()) preds += np.array(test_preds ).reshape(-1) print(f'TTA {tta_id}') preds /= tta return preds preds = get_preds(model,tta=25 )<save_to_csv>
merged_df = pd.concat(( train_df.drop(['Survived'], axis = 1), test_df)) merged_df.head()
Titanic - Machine Learning from Disaster
5,106,186
subm = pd.read_csv(path/'sample_submission.csv') subm.target = preds subm.to_csv('submission.csv',index=False )<import_modules>
import seaborn as sb from matplotlib import pyplot as plt
Titanic - Machine Learning from Disaster
5,106,186
import pandas as pd import numpy as np from glob import glob import shutil<load_from_csv>
train_df.groupby(['Pclass'])['Survived'].sum() / train_df.groupby(['Pclass'])['Survived'].count()
Titanic - Machine Learning from Disaster
5,106,186
raw_pred_2cls = pd.read_csv('.. /input/vinbigdata-2class-prediction/2-cls test pred.csv') other_pred_2cls = pd.read_csv('.. /input/temp-submission/2-cls test pred.csv' )<feature_engineering>
train_df.groupby(['Sex'])['Survived'].sum() / train_df.groupby(['Sex'])['Survived'].count()
Titanic - Machine Learning from Disaster
5,106,186
tmp_pred_2cls=other_pred_2cls tmp_pred_2cls['target']=1-other_pred_2cls['target']<merge>
train_df.groupby(['SibSp'])['Survived'].sum() / train_df.groupby(['SibSp'])['Survived'].count()
Titanic - Machine Learning from Disaster
5,106,186
compare = pd.merge(raw_pred_2cls, other_pred_2cls, on = 'image_id', how = 'left') compare.head()<feature_engineering>
train_df.groupby(['Parch'])['Survived'].sum() / train_df.groupby(['Parch'])['Survived'].count()
Titanic - Machine Learning from Disaster
5,106,186
compare['dif']=compare['target_x']-compare['target_y']<save_to_csv>
train_df['Family'] = train_df['SibSp'] + train_df['Parch']
Titanic - Machine Learning from Disaster
5,106,186
compare.to_csv('compare.csv',index = False )<load_from_csv>
train_df.groupby(['Family'])['Survived'].sum() / train_df.groupby(['Family'])['Survived'].count()
Titanic - Machine Learning from Disaster
5,106,186
pred_14cls = pd.read_csv('.. /input/vinbigdata-14-class-submission-lb0154/submission.csv') pred_2cls = pd.read_csv('.. /input/vinbigdata-2class-prediction/2-cls test pred.csv') <save_to_csv>
train_df.drop(['Family'], axis = 1, inplace = True )
Titanic - Machine Learning from Disaster
5,106,186
pred_2cls.to_csv('pred_2cls.csv',index = False )<merge>
ticket_num_records = train_df.groupby(['Ticket'] ).size().sort_values(ascending=False ).to_dict() train_df.groupby(['Ticket'] ).size().sort_values(ascending=False ).head()
Titanic - Machine Learning from Disaster
5,106,186
pred_raw = pd.merge(pred_14cls, pred_2cls, on = 'image_id', how = 'left') pred_raw.head()<merge>
train_df['Companion'] = train_df['Ticket'].apply(lambda x: ticket_num_records[x] )
Titanic - Machine Learning from Disaster
5,106,186
pred = pd.merge(pred_14cls, tmp_pred_2cls, on = 'image_id', how = 'left') pred.head()<count_values>
train_df.groupby(['Companion'])['Survived'].sum() / train_df.groupby(['Companion'])['Survived'].count()
Titanic - Machine Learning from Disaster
5,106,186
pred['PredictionString'].value_counts().iloc[[0]]<groupby>
train_df.drop(['Companion'], axis = 1, inplace = True )
Titanic - Machine Learning from Disaster