kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
14,602,777
print(os.listdir()) model = Ensemble(text.vocab.vectors, padding_idx=text.vocab.stoi[text.pad_token], batch_size=batch_size ).cuda() model.load_state_dict(torch.load('checkpoint.pt'))<init_hyperparams>
for dataset in all_data: dataset['Embarked'] = dataset['Embarked'].map({'S': 0, 'C': 1, 'Q': 2} ).astype(int) train.head()
Titanic - Machine Learning from Disaster
14,602,777
print('Threshold:',search_result['threshold']) submission_list = list(torchtext.data.BucketIterator(dataset=submission_x, batch_size=batch_size, sort=False, train=False)) pred = [] with torch.no_grad() : for submission_batch in submission_list: model.eval() x = submission_batch.text.cuda() pred += torch.sigmoid(model(x ).squeeze(1)).cpu().data.numpy().tolist() pred = np.array(pred) df_subm = pd.DataFrame() df_subm['qid'] = [qid.vocab.itos[j] for i in submission_list for j in i.qid.view(-1 ).numpy() ] df_subm['prediction'] =(pred > search_result['threshold'] ).astype(int) print(df_subm.head()) df_subm.to_csv('submission.csv', index=False )<define_variables>
train = train.drop(['Fare'], axis=1) test = test.drop(['Fare'], axis=1) all_data = [train,test] train.head()
Titanic - Machine Learning from Disaster
14,602,777
list_train = [filepath for filepath in list_paths if "train/" in filepath] shuffle(list_train) list_test = [filepath for filepath in list_paths if "test/" in filepath] list_train = list_train list_test = list_test index = [os.path.basename(filepath)for filepath in list_test]<define_variables>
for dataset in all_data: dataset['Title'] = dataset.Name.str.extract('([A-Za-z]+)\.', expand=False) pd.crosstab(train['Title'], train['Sex'])
Titanic - Machine Learning from Disaster
14,602,777
list_classes = list(set([os.path.dirname(filepath ).split(os.sep)[-1] for filepath in list_paths if "train" in filepath]))<define_variables>
for dataset in all_data: dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col',\ 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') dataset['Title'] = dataset['Title'].replace('Ms', 'Miss') dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') train[['Title', 'Survived']].groupby(['Title'], as_index=False ).mean()
Titanic - Machine Learning from Disaster
14,602,777
list_classes = ['Sony-NEX-7', 'Motorola-X', 'HTC-1-M7', 'Samsung-Galaxy-Note3', 'Motorola-Droid-Maxx', 'iPhone-4s', 'iPhone-6', 'LG-Nexus-5x', 'Samsung-Galaxy-S4', 'Motorola-Nexus-6']<categorify>
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5} for dataset in all_data: dataset['Title'] = dataset['Title'].map(title_mapping) dataset['Title'] = dataset['Title'].fillna(0) train.head()
Titanic - Machine Learning from Disaster
14,602,777
def get_class_from_path(filepath): return os.path.dirname(filepath ).split(os.sep)[-1] def read_and_resize(filepath): im_array = np.array(Image.open(( filepath)) , dtype="uint8") pil_im = Image.fromarray(im_array) new_array = np.array(pil_im.resize(( 256, 256))) return new_array/255 def label_transform(labels): labels = pd.get_dummies(pd.Series(labels)) label_index = labels.columns.values return labels, label_index <prepare_x_and_y>
train = train.drop(['Name', 'PassengerId'], axis=1) test = test.drop(['Name'], axis=1) all_data = [train, test]
Titanic - Machine Learning from Disaster
14,602,777
X_train = np.array([read_and_resize(filepath)for filepath in list_train]) X_test = np.array([read_and_resize(filepath)for filepath in list_test] )<categorify>
X_train = train.drop("Survived", axis=1) Y_train = train["Survived"] X_test = test.drop("PassengerId", axis=1 ).copy()
Titanic - Machine Learning from Disaster
14,602,777
labels = [get_class_from_path(filepath)for filepath in list_train] y, label_index = label_transform(labels) y = np.array(y )<choose_model_class>
random_forest= RandomForestClassifier(n_estimators=100, max_features='auto', criterion='entropy', max_depth=10) random_forest.fit(X_train, Y_train) Y_prediction = random_forest.predict(X_test) random_forest.score(X_train, Y_train) acc_random_forest = round(random_forest.score(X_train, Y_train)* 100, 2) print(round(acc_random_forest,2,), "%")
Titanic - Machine Learning from Disaster
14,602,777
input_shape =(256, 256, 3) nclass = len(label_index) def get_model() : nclass = len(label_index) inp = Input(shape=input_shape) norm_inp = BatchNormalization()(inp) img_1 = Convolution2D(16, kernel_size=3, activation=activations.relu, padding="same" )(norm_inp) img_1 = Convolution2D(16, kernel_size=3, activation=activations.relu, padding="same" )(img_1) img_1 = MaxPooling2D(pool_size=(3, 3))(img_1) img_1 = Dropout(rate=0.2 )(img_1) img_1 = Convolution2D(32, kernel_size=3, activation=activations.relu, padding="same" )(img_1) img_1 = Convolution2D(32, kernel_size=3, activation=activations.relu, padding="same" )(img_1) img_1 = MaxPooling2D(pool_size=(3, 3))(img_1) img_1 = Dropout(rate=0.2 )(img_1) img_1 = Convolution2D(64, kernel_size=2, activation=activations.relu, padding="same" )(img_1) img_1 = Convolution2D(20, kernel_size=2, activation=activations.relu, padding="same" )(img_1) img_1 = GlobalMaxPool2D()(img_1) img_1 = Dropout(rate=0.2 )(img_1) dense_1 = Dense(20, activation=activations.relu )(img_1) dense_1 = Dense(nclass, activation=activations.softmax )(dense_1) model = models.Model(inputs=inp, outputs=dense_1) opt = optimizers.Adam() model.compile(optimizer=opt, loss=losses.categorical_crossentropy, metrics=['acc']) model.summary() return model<train_model>
clf = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=1, random_state=0 ).fit(X_train, Y_train) y_prediction= clf.predict(X_test) clf.score(X_train, Y_train) acc_clf = round(clf.score(X_train, Y_train)* 100, 2) print(round(acc_clf,2,), "%")
Titanic - Machine Learning from Disaster
14,602,777
model = get_model() file_path="weights.best.hdf5" checkpoint = ModelCheckpoint(file_path, monitor='val_acc', verbose=1, save_best_only=True, mode='max') early = EarlyStopping(monitor="val_acc", mode="max", patience=1) callbacks_list = [checkpoint, early] history = model.fit(X_train, y, validation_split=0.1, epochs=3, shuffle=True, verbose=2, callbacks=callbacks_list) model.load_weights(file_path) <predict_on_test>
model = LGBMClassifier().fit(X_train, Y_train) y_predict= model.predict(X_test) model.score(X_train, Y_train) acc_model = round(model.score(X_train, Y_train)* 100, 2) print(round(acc_model,2,), "%")
Titanic - Machine Learning from Disaster
14,602,777
predicts = model.predict(X_test) predicts = np.argmax(predicts, axis=1) predicts = [label_index[p] for p in predicts] <save_to_csv>
logreg = LogisticRegression() logreg.fit(X_train, Y_train) Y_pred = logreg.predict(X_test) acc_log = round(logreg.score(X_train, Y_train)* 100, 2) print(round(acc_log,2,), "%")
Titanic - Machine Learning from Disaster
14,602,777
df = pd.DataFrame(columns=['fname', 'camera']) df['fname'] = index df['camera'] = predicts df.to_csv("sub.csv", index=False )<set_options>
decision_tree = DecisionTreeClassifier() decision_tree.fit(X_train, Y_train) Y_pred = decision_tree.predict(X_test) acc_decision_tree = round(decision_tree.score(X_train, Y_train)* 100, 2) print(round(acc_decision_tree,2,), "%")
Titanic - Machine Learning from Disaster
14,602,777
warnings.filterwarnings("ignore") pd.set_option("display.max_columns", 500) pd.set_option("display.max_rows", 500) register_matplotlib_converters() sns.set()<set_options>
params_xgb = {'colsample_bylevel': 0.7, 'learning_rate': 0.03, 'max_depth': 3, 'n_estimators': 400, 'reg_lambda': 15, 'subsample': 0.5} xgb = XGBClassifier(**params_xgb) y_preds = xgb.fit(X_train, Y_train ).predict(X_test) acc_xgb = round(xgb.score(X_train, Y_train)* 100, 2) print(round(acc_xgb,2,), "%")
Titanic - Machine Learning from Disaster
14,602,777
def on_kaggle() : return "KAGGLE_KERNEL_RUN_TYPE" in os.environ<install_modules>
results = pd.DataFrame({ 'Model': ['LGBMClassifier', 'Logistic Regression', 'Random Forest', 'Boosting', 'Decision Tree','xgb'], 'Score': [ acc_model,acc_log, acc_random_forest, acc_clf, acc_decision_tree,acc_xgb]}) result_df = results.sort_values(by='Score', ascending=False) result_df = result_df.set_index('Score') result_df.head(7)
Titanic - Machine Learning from Disaster
14,602,777
if on_kaggle() : os.system("pip install --quiet mlflow_extend" )<categorify>
rf = RandomForestClassifier(n_estimators=100) scores = cross_val_score(rf, X_train, Y_train, cv=10, scoring = "accuracy") print("Scores:", scores) print("Mean:", scores.mean()) print("Standard Deviation:", scores.std())
Titanic - Machine Learning from Disaster
14,602,777
def reduce_mem_usage(df, verbose=False): start_mem = df.memory_usage().sum() / 1024 ** 2 int_columns = df.select_dtypes(include=["int"] ).columns float_columns = df.select_dtypes(include=["float"] ).columns for col in int_columns: df[col] = pd.to_numeric(df[col], downcast="integer") for col in float_columns: df[col] = pd.to_numeric(df[col], downcast="float") end_mem = df.memory_usage().sum() / 1024 ** 2 if verbose: print( "Mem.usage decreased to {:5.2f} Mb({:.1f}% reduction)".format( end_mem, 100 *(start_mem - end_mem)/ start_mem ) ) return df<load_from_csv>
params_xgb = {'colsample_bylevel': 0.7, 'learning_rate': 0.03, 'max_depth': 3, 'n_estimators': 400, 'reg_lambda': 15, 'subsample': 0.5} xgb = XGBClassifier(**params_xgb) y_preds = xgb.fit(X_train, Y_train ).predict(X_test) print("Score: ",xgb.score, 4*100, "%")
Titanic - Machine Learning from Disaster
14,602,777
def read_data() : INPUT_DIR = "/kaggle/input" if on_kaggle() else "input" INPUT_DIR = f"{INPUT_DIR}/m5-forecasting-accuracy" print("Reading files...") calendar = pd.read_csv(f"{INPUT_DIR}/calendar.csv" ).pipe(reduce_mem_usage) prices = pd.read_csv(f"{INPUT_DIR}/sell_prices.csv" ).pipe(reduce_mem_usage) sales = pd.read_csv(f"{INPUT_DIR}/sales_train_validation.csv", ).pipe( reduce_mem_usage ) submission = pd.read_csv(f"{INPUT_DIR}/sample_submission.csv" ).pipe( reduce_mem_usage ) print("sales shape:", sales.shape) print("prices shape:", prices.shape) print("calendar shape:", calendar.shape) print("submission shape:", submission.shape) return sales, prices, calendar, submission<define_variables>
submission = pd.DataFrame({ "PassengerId": test['PassengerId'], "Survived": y_preds }) submission.to_csv('submission.csv', index=False)
Titanic - Machine Learning from Disaster
8,950,656
sales, prices, calendar, submission = read_data() NUM_ITEMS = sales.shape[0] DAYS_PRED = submission.shape[1] - 1<categorify>
plt.style.use('seaborn-whitegrid' )
Titanic - Machine Learning from Disaster
8,950,656
def encode_categorical(df, cols): for col in cols: le = LabelEncoder() not_null = df[col][df[col].notnull() ] df[col] = pd.Series(le.fit_transform(not_null), index=not_null.index) return df calendar = encode_categorical( calendar, ["event_name_1", "event_type_1", "event_name_2", "event_type_2"] ).pipe(reduce_mem_usage) sales = encode_categorical( sales, ["item_id", "dept_id", "cat_id", "store_id", "state_id"], ).pipe(reduce_mem_usage) prices = encode_categorical(prices, ["item_id", "store_id"] ).pipe(reduce_mem_usage )<categorify>
train_data = pd.read_csv("/kaggle/input/titanic/train.csv") train_data.info()
Titanic - Machine Learning from Disaster
8,950,656
def extract_num(ser): return ser.str.extract(r"(\d+)" ).astype(np.int16) def reshape_sales(sales, submission, d_thresh=0, verbose=True): id_columns = ["id", "item_id", "dept_id", "cat_id", "store_id", "state_id"] product = sales[id_columns] sales = sales.melt(id_vars=id_columns, var_name="d", value_name="demand",) sales = reduce_mem_usage(sales) vals = submission[submission["id"].str.endswith("validation")] evals = submission[submission["id"].str.endswith("evaluation")] vals.columns = ["id"] + [f"d_{d}" for d in range(1914, 1914 + DAYS_PRED)] evals.columns = ["id"] + [f"d_{d}" for d in range(1942, 1942 + DAYS_PRED)] evals["id"] = evals["id"].str.replace("_evaluation", "_validation") vals = vals.merge(product, how="left", on="id") evals = evals.merge(product, how="left", on="id") evals["id"] = evals["id"].str.replace("_validation", "_evaluation") if verbose: print("validation") display(vals) print("evaluation") display(evals) vals = vals.melt(id_vars=id_columns, var_name="d", value_name="demand") evals = evals.melt(id_vars=id_columns, var_name="d", value_name="demand") sales["part"] = "train" vals["part"] = "validation" evals["part"] = "evaluation" data = pd.concat([sales, vals, evals], axis=0) del sales, vals, evals data["d"] = extract_num(data["d"]) data = data[data["d"] >= d_thresh] data = data[data["part"] != "evaluation"] gc.collect() if verbose: print("data") display(data) return data def merge_calendar(data, calendar): calendar = calendar.drop(["weekday", "wday", "month", "year"], axis=1) return data.merge(calendar, how="left", on="d") def merge_prices(data, prices): return data.merge(prices, how="left", on=["store_id", "item_id", "wm_yr_wk"] )<categorify>
test_data = pd.read_csv("/kaggle/input/titanic/test.csv") test_data.info()
Titanic - Machine Learning from Disaster
8,950,656
def add_demand_features(df): for diff in [0, 1, 2]: shift = DAYS_PRED + diff df[f"shift_t{shift}"] = df.groupby(["id"])["demand"].transform( lambda x: x.shift(shift) ) for window in [7, 30, 60, 90, 180]: df[f"rolling_std_t{window}"] = df.groupby(["id"])["demand"].transform( lambda x: x.shift(DAYS_PRED ).rolling(window ).std() ) for window in [7, 30, 60, 90, 180]: df[f"rolling_mean_t{window}"] = df.groupby(["id"])["demand"].transform( lambda x: x.shift(DAYS_PRED ).rolling(window ).mean() ) for window in [7, 30, 60]: df[f"rolling_min_t{window}"] = df.groupby(["id"])["demand"].transform( lambda x: x.shift(DAYS_PRED ).rolling(window ).min() ) for window in [7, 30, 60]: df[f"rolling_max_t{window}"] = df.groupby(["id"])["demand"].transform( lambda x: x.shift(DAYS_PRED ).rolling(window ).max() ) df["rolling_skew_t30"] = df.groupby(["id"])["demand"].transform( lambda x: x.shift(DAYS_PRED ).rolling(30 ).skew() ) df["rolling_kurt_t30"] = df.groupby(["id"])["demand"].transform( lambda x: x.shift(DAYS_PRED ).rolling(30 ).kurt() ) return df def add_price_features(df): df["shift_price_t1"] = df.groupby(["id"])["sell_price"].transform( lambda x: x.shift(1) ) df["price_change_t1"] =(df["shift_price_t1"] - df["sell_price"])/( df["shift_price_t1"] ) df["rolling_price_max_t365"] = df.groupby(["id"])["sell_price"].transform( lambda x: x.shift(1 ).rolling(365 ).max() ) df["price_change_t365"] =(df["rolling_price_max_t365"] - df["sell_price"])/( df["rolling_price_max_t365"] ) df["rolling_price_std_t7"] = df.groupby(["id"])["sell_price"].transform( lambda x: x.rolling(7 ).std() ) df["rolling_price_std_t30"] = df.groupby(["id"])["sell_price"].transform( lambda x: x.rolling(30 ).std() ) return df.drop(["rolling_price_max_t365", "shift_price_t1"], axis=1) def add_time_features(df, dt_col): df[dt_col] = pd.to_datetime(df[dt_col]) attrs = [ "year", "quarter", "month", "week", "day", "dayofweek", ] for attr in attrs: dtype = np.int16 if attr == "year" else np.int8 df[attr] = getattr(df[dt_col].dt, attr ).astype(dtype) df["is_weekend"] = df["dayofweek"].isin([5, 6] ).astype(np.int8) return df<split>
train_data[train_data['Fare'] > 500]
Titanic - Machine Learning from Disaster
8,950,656
class CustomTimeSeriesSplitter: def __init__(self, n_splits=5, train_days=80, test_days=20, day_col="d"): self.n_splits = n_splits self.train_days = train_days self.test_days = test_days self.day_col = day_col def split(self, X, y=None, groups=None): SEC_IN_DAY = 3600 * 24 sec =(X[self.day_col] - X[self.day_col].iloc[0])* SEC_IN_DAY duration = sec.max() train_sec = self.train_days * SEC_IN_DAY test_sec = self.test_days * SEC_IN_DAY total_sec = test_sec + train_sec if self.n_splits == 1: train_start = duration - total_sec train_end = train_start + train_sec train_mask =(sec >= train_start)&(sec < train_end) test_mask = sec >= train_end yield sec[train_mask].index.values, sec[test_mask].index.values else: step = DAYS_PRED * SEC_IN_DAY for idx in range(self.n_splits): shift =(self.n_splits -(idx + 1)) * step train_start = duration - total_sec - shift train_end = train_start + train_sec test_end = train_end + test_sec train_mask =(sec > train_start)&(sec <= train_end) if idx == self.n_splits - 1: test_mask = sec > train_end else: test_mask =(sec > train_end)&(sec <= test_end) yield sec[train_mask].index.values, sec[test_mask].index.values def get_n_splits(self): return self.n_splits<define_variables>
test_data[test_data['Fare'] > 500]
Titanic - Machine Learning from Disaster
8,950,656
def show_cv_days(cv, X, dt_col, day_col): for ii,(tr, tt)in enumerate(cv.split(X)) : print(f"----- Fold:({ii + 1} / {cv.n_splits})-----") tr_start = X.iloc[tr][dt_col].min() tr_end = X.iloc[tr][dt_col].max() tr_days = X.iloc[tr][day_col].max() - X.iloc[tr][day_col].min() + 1 tt_start = X.iloc[tt][dt_col].min() tt_end = X.iloc[tt][dt_col].max() tt_days = X.iloc[tt][day_col].max() - X.iloc[tt][day_col].min() + 1 df = pd.DataFrame( { "start": [tr_start, tt_start], "end": [tr_end, tt_end], "days": [tr_days, tt_days], }, index=["train", "test"], ) display(df) def plot_cv_indices(cv, X, dt_col, lw=10): n_splits = cv.get_n_splits() _, ax = plt.subplots(figsize=(20, n_splits)) for ii,(tr, tt)in enumerate(cv.split(X)) : indices = np.array([np.nan] * len(X)) indices[tt] = 1 indices[tr] = 0 ax.scatter( X[dt_col], [ii + 0.5] * len(indices), c=indices, marker="_", lw=lw, cmap=plt.cm.coolwarm, vmin=-0.2, vmax=1.2, ) MIDDLE = 15 LARGE = 20 ax.set_xlabel("Datetime", fontsize=LARGE) ax.set_xlim([X[dt_col].min() , X[dt_col].max() ]) ax.set_ylabel("CV iteration", fontsize=LARGE) ax.set_yticks(np.arange(n_splits)+ 0.5) ax.set_yticklabels(list(range(n_splits))) ax.invert_yaxis() ax.tick_params(axis="both", which="major", labelsize=MIDDLE) ax.set_title("{}".format(type(cv ).__name__), fontsize=LARGE) return ax<define_variables>
train_data["Fare"].replace({ 512.3292 : 7.25}, inplace=True) test_data["Fare"].replace({ 512.3292 : 7.25}, inplace=True )
Titanic - Machine Learning from Disaster
8,950,656
features = [ "item_id", "dept_id", "cat_id", "store_id", "state_id", "event_name_1", "event_type_1", "event_name_2", "event_type_2", "snap_CA", "snap_TX", "snap_WI", "sell_price", "shift_t28", "shift_t29", "shift_t30", "rolling_std_t7", "rolling_std_t30", "rolling_std_t60", "rolling_std_t90", "rolling_std_t180", "rolling_mean_t7", "rolling_mean_t30", "rolling_mean_t60", "rolling_mean_t90", "rolling_mean_t180", "rolling_min_t7", "rolling_min_t30", "rolling_min_t60", "rolling_max_t7", "rolling_max_t30", "rolling_max_t60", "rolling_skew_t30", "rolling_kurt_t30", "price_change_t1", "price_change_t365", "rolling_price_std_t7", "rolling_price_std_t30", "year", "quarter", "month", "week", "day", "dayofweek", "is_weekend", ] is_train = data["d"] < 1914 X_train = data[is_train][[day_col] + features].reset_index(drop=True) y_train = data[is_train]["demand"].reset_index(drop=True) X_test = data[~is_train][features].reset_index(drop=True) id_date = data[~is_train][["id", "date"]].reset_index(drop=True) del data gc.collect() print("X_train shape:", X_train.shape) print("X_test shape:", X_test.shape )<train_model>
train_data = train_data[(train_data.PassengerId != 259)&(train_data.PassengerId != 680)&(train_data.PassengerId != 738)]
Titanic - Machine Learning from Disaster
8,950,656
def train_lgb(bst_params, fit_params, X, y, cv, drop_when_train=None): models = [] if drop_when_train is None: drop_when_train = [] for idx_fold,(idx_trn, idx_val)in enumerate(cv.split(X, y)) : print(f" ----- Fold:({idx_fold + 1} / {cv.get_n_splits() })----- ") X_trn, X_val = X.iloc[idx_trn], X.iloc[idx_val] y_trn, y_val = y.iloc[idx_trn], y.iloc[idx_val] train_set = lgb.Dataset( X_trn.drop(drop_when_train, axis=1), label=y_trn, categorical_feature=["item_id"], ) val_set = lgb.Dataset( X_val.drop(drop_when_train, axis=1), label=y_val, categorical_feature=["item_id"], ) model = lgb.train( bst_params, train_set, valid_sets=[train_set, val_set], valid_names=["train", "valid"], **fit_params, ) models.append(model) del idx_trn, idx_val, X_trn, X_val, y_trn, y_val gc.collect() return models<train_model>
train_data.isna().sum()
Titanic - Machine Learning from Disaster
8,950,656
bst_params = { "boosting_type": "gbdt", "metric": "rmse", "objective": "regression", "n_jobs": -1, "seed": 42, "learning_rate": 0.1, "bagging_fraction": 0.75, "bagging_freq": 10, "colsample_bytree": 0.75, } fit_params = { "num_boost_round": 100_000, "early_stopping_rounds": 50, "verbose_eval": 100, } models = train_lgb( bst_params, fit_params, X_train, y_train, cv, drop_when_train=[day_col] ) del X_train, y_train gc.collect()<compute_test_metric>
test_data.isna().sum()
Titanic - Machine Learning from Disaster
8,950,656
def rmse(y_true, y_pred): return np.sqrt(mean_squared_error(y_true, y_pred))<features_selection>
train_data['Age'] = train_data['Age'].replace(np.nan, 29) test_data['Age'] = test_data['Age'].replace(np.nan, 30 )
Titanic - Machine Learning from Disaster
8,950,656
imp_type = "gain" importances = np.zeros(X_test.shape[1]) preds = np.zeros(X_test.shape[0]) for model in models: preds += model.predict(X_test) importances += model.feature_importance(imp_type) preds = preds / cv.get_n_splits() importances = importances / cv.get_n_splits()<save_to_csv>
train_data = train_data.drop(['Cabin'], axis=1) test_data = test_data.drop(['Cabin'], axis=1 )
Titanic - Machine Learning from Disaster
8,950,656
def make_submission(test, submission): preds = test[["id", "date", "demand"]] preds = preds.pivot(index="id", columns="date", values="demand" ).reset_index() preds.columns = ["id"] + ["F" + str(d + 1)for d in range(DAYS_PRED)] vals = submission[["id"]].merge(preds, how="inner", on="id") evals = submission[submission["id"].str.endswith("evaluation")] final = pd.concat([vals, evals]) assert final.drop("id", axis=1 ).isnull().sum().sum() == 0 assert final["id"].equals(submission["id"]) final.to_csv("submission.csv", index=False )<prepare_output>
train_data['Embarked'] = train_data['Embarked'].replace(np.nan, 'Q') test_data['Fare'] = test_data['Fare'].replace(np.nan, 36.0 )
Titanic - Machine Learning from Disaster
8,950,656
make_submission(id_date.assign(demand=preds), submission )<import_modules>
train_data.isna().sum()
Titanic - Machine Learning from Disaster
8,950,656
import os import sys import time import numpy as np import pandas as pd import seaborn as sns from math import sqrt from sklearn import metrics import matplotlib.pyplot as plt from sklearn.metrics import mean_squared_error<load_from_csv>
test_data.isna().sum()
Titanic - Machine Learning from Disaster
8,950,656
df1 = pd.read_csv(".. /input/elo-blending/3.695.csv") df2 = pd.read_csv(".. /input/elo-blending/3.696.csv") df3 = pd.read_csv(".. /input/submit/submit.csv") df4 = pd.read_csv(".. /input/combined/combining_submission.csv" )<load_from_csv>
train_data['Title'] = train_data.Name.apply(lambda name: name.split(',')[1].split('.')[0].strip()) train_data.Title.value_counts()
Titanic - Machine Learning from Disaster
8,950,656
df_base0 = pd.read_csv('.. /input/elo-blending/3.695.csv', names=["card_id","target0"], skiprows=[0],header=None) df_base1 = pd.read_csv('.. /input/elo-blending/3.696.csv', names=["card_id","target1"], skiprows=[0],header=None) df_base2 = pd.read_csv('.. /input/elo-blending/3.6999.csv', names=["card_id","target2"], skiprows=[0],header=None) df_base3 = pd.read_csv('.. /input/elo-blending/3.69991.csv', names=["card_id","target3"], skiprows=[0],header=None) df_base4 = pd.read_csv('.. /input/elo-blending/3.699992.csv', names=["card_id","target4"], skiprows=[0],header=None) df_base5 = pd.read_csv('.. /input/elo-blending/3.70.csv', names=["card_id","target5"], skiprows=[0],header=None) df_base6 = pd.read_csv('.. /input/elo-blending/3.701.csv', names=["card_id","target6"], skiprows=[0],header=None) df_base7 = pd.read_csv('.. /input/elo-blending/3.702.csv', names=["card_id","target7"], skiprows=[0],header=None) df_base8 = pd.read_csv('.. /input/elo-blending/3.703.csv', names=["card_id","target8"], skiprows=[0],header=None) df_base9 = pd.read_csv('.. /input/elo-blending/3.704.csv', names=["card_id","target9"], skiprows=[0],header=None) df_base10 = pd.read_csv('.. /input/elo-blending/Blending.csv', names=["card_id","target10"], skiprows=[0],header=None) df_base11 = pd.read_csv('.. /input/elo-blending/BlendingRLS.csv',names=["card_id","target11"], skiprows=[0],header=None )<merge>
test_data['Title'] = test_data.Name.apply(lambda name: name.split(',')[1].split('.')[0].strip()) test_data.Title.value_counts()
Titanic - Machine Learning from Disaster
8,950,656
df_base = pd.merge(df_base0,df_base1,how='inner',on='card_id') df_base = pd.merge(df_base,df_base2,how='inner',on='card_id') df_base = pd.merge(df_base,df_base3,how='inner',on='card_id') df_base = pd.merge(df_base,df_base4,how='inner',on='card_id') df_base = pd.merge(df_base,df_base5,how='inner',on='card_id') df_base = pd.merge(df_base,df_base6,how='inner',on='card_id') df_base = pd.merge(df_base,df_base7,how='inner',on='card_id') df_base = pd.merge(df_base,df_base8,how='inner',on='card_id') df_base = pd.merge(df_base,df_base9,how='inner',on='card_id') df_base = pd.merge(df_base,df_base10,how='inner',on='card_id') df_base = pd.merge(df_base,df_base11,how='inner',on='card_id' )<compute_test_metric>
normalized_titles = { "Capt": "Officer", "Col": "Officer", "Major": "Officer", "Jonkheer": "Royal", "Don": "Royal", "Sir" : "Royal", "Dr": "Officer", "Rev": "Officer", "the Countess":"Royal", "Dona": "Royal", "Mme": "Mrs", "Mlle": "Miss", "Ms": "Mrs", "Mr" : "Mr", "Mrs" : "Mrs", "Miss" : "Miss", "Master" : "Master", "Lady" : "Royal" } train_data.Title = train_data.Title.map(normalized_titles) test_data.Title = test_data.Title.map(normalized_titles )
Titanic - Machine Learning from Disaster
8,950,656
M = np.zeros([df_base.iloc[:,1:].shape[1],df_base.iloc[:,1:].shape[1]]) for i in np.arange(M.shape[1]): for j in np.arange(M.shape[1]): M[i,j] = sqrt(metrics.mean_squared_error(df_base.iloc[:,i+1], df_base.iloc[:,j+1]))<load_from_csv>
test_data.Title.value_counts()
Titanic - Machine Learning from Disaster
8,950,656
df_base0 = pd.read_csv('.. /input/elo-blending/3.695.csv', names=["card_id","target0"], skiprows=[0],header=None) df_base1 = pd.read_csv('.. /input/elo-blending/3.696.csv', names=["card_id","target1"], skiprows=[0],header=None) df_base10 = pd.read_csv('.. /input/elo-blending/Blending.csv', names=["card_id","target10"], skiprows=[0],header=None) df_base11 = pd.read_csv('.. /input/elo-blending/BlendingRLS.csv',names=["card_id","target11"], skiprows=[0],header=None) df_base = pd.merge(df_base0,df_base1,how='inner',on='card_id') df_base = pd.merge(df_base0,df_base10,how='inner',on='card_id') df_base = pd.merge(df_base0,df_base11,how='inner',on='card_id') plt.figure(figsize=(12,8)) sns.heatmap(df_base.iloc[:,1:].corr() ,annot=True,fmt=".2f" )<compute_test_metric>
train_data.Title.value_counts()
Titanic - Machine Learning from Disaster
8,950,656
M = np.zeros([df_base.iloc[:,1:].shape[1],df_base.iloc[:,1:].shape[1]]) for i in np.arange(M.shape[1]): for j in np.arange(M.shape[1]): M[i,j] = sqrt(metrics.mean_squared_error(df_base.iloc[:,i+1], df_base.iloc[:,j+1]))<feature_engineering>
le = LabelEncoder() train_data["Sex"] = le.fit_transform(train_data["Sex"].values) test_data["Sex"] = le.fit_transform(test_data["Sex"].values )
Titanic - Machine Learning from Disaster
8,950,656
df_base['target'] = df_base_median df_base['target4'] = df4['target']<count_values>
train_data = train_data.drop(['Ticket'], axis=1) test_data = test_data.drop(['Ticket'], axis=1 )
Titanic - Machine Learning from Disaster
8,950,656
df_final = np.zeros(len(df_base)) a=-10*np.log2(10) thresh = -14 for i in range(len(df3)-1): if df3['target'][i]< thresh: df_final[i]=a else: df_final[i]=df_base['target'][i] pd.Series(df_final ).value_counts().head(1 )<feature_engineering>
train_data["Embarked"].value_counts()
Titanic - Machine Learning from Disaster
8,950,656
for i in range(len(df_final)-1): if df_final[i] > magic: df_final[i] = df_final[i] - abs(np.median(df_final)) /8 +0.001<save_to_csv>
train_data["Embarked"] = le.fit_transform(train_data["Embarked"].values) test_data["Embarked"] = le.fit_transform(test_data["Embarked"].values )
Titanic - Machine Learning from Disaster
8,950,656
df_finall=pd.DataFrame(df_base['card_id']) df_finall['target'] = df_final df_finall[['card_id','target']].to_csv("good_output.csv",index=False )<import_modules>
train_data["Name"] = le.fit_transform(train_data["Name"].values) test_data["Name"] = le.fit_transform(test_data["Name"].values )
Titanic - Machine Learning from Disaster
8,950,656
warnings.simplefilter(action='ignore', category=FutureWarning) plt.style.use('seaborn') sns.set(font_scale=1 )<load_from_csv>
from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from xgboost import XGBClassifier import lightgbm as gbm from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedKFold, cross_val_score from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC, LinearSVC from sklearn.model_selection import GridSearchCV from sklearn.pipeline import Pipeline from sklearn.metrics import accuracy_score from sklearn.metrics import r2_score
Titanic - Machine Learning from Disaster
8,950,656
df_train = pd.read_csv('.. /input/vsb-power-line-fault-detection/metadata_train.csv') df_train = df_train.set_index(['id_measurement', 'phase']) X = np.load(".. /input/folk-base-neural-network-using-lstm/X.npy") y = np.load(".. /input/folk-base-neural-network-using-lstm/y.npy") features = np.load(".. /input/folk-base-neural-network-using-lstm/features.npy" )<categorify>
features = ["Pclass","Sex","Age","SibSp","Fare"] X = train_data[features] y = train_data['Survived'] X_train,X_test, y_train,y_test = train_test_split(X,y,test_size=0.15) sc_X = MinMaxScaler() X_train = sc_X.fit_transform(X_train) X_test = sc_X.transform(X_test )
Titanic - Machine Learning from Disaster
8,950,656
def augment(x,y,t=2): xs,xn = [],[] for i in range(t): mask = y>0 x1 = x[mask].copy() ids = np.arange(x1.shape[0]) for c in range(x1.shape[1]): np.random.shuffle(ids) x1[:,c] = x1[ids][:,c] xs.append(x1) for i in range(t//2): mask = y==0 x1 = x[mask].copy() ids = np.arange(x1.shape[0]) for c in range(x1.shape[1]): np.random.shuffle(ids) x1[:,c] = x1[ids][:,c] xn.append(x1) xs = np.vstack(xs) xn = np.vstack(xn) ys = np.ones(xs.shape[0]) yn = np.zeros(xn.shape[0]) x = np.vstack([x,xs,xn]) y = np.concatenate([y,ys,yn]) return x,y<train_model>
pipelineone = Pipeline([('RandomForest', RandomForestClassifier())]) param_grid = {'RandomForest__max_features': ['auto'], 'RandomForest__min_samples_leaf': [1, 2, 4], 'RandomForest__min_samples_split': [2,3,4], 'RandomForest__n_estimators': [100,200,300]} model = GridSearchCV(pipelineone, param_grid, cv =None) model.fit(X_train, y_train) y_pred_sub = model.predict(test_data[features]) y_pred_random = model.predict(X_test) forest = accuracy_score(y_test, y_pred_random) print(forest )
Titanic - Machine Learning from Disaster
8,950,656
%%time X, y = augment(X,y) print(X.shape, y.shape )<load_pretrained>
pipelinetwo = Pipeline([('logisticregression', LogisticRegression(max_iter=100)) ]) param_grid = {'logisticregression__penalty' : ['l2'], 'logisticregression__C' : [0.001, 0.01, 0.1, 1, 10, 100, 1000], 'logisticregression__solver' : ['liblinear']} model = GridSearchCV(pipelinetwo, param_grid, cv =None) model.fit(X_train, y_train) model = LogisticRegression() model.fit(X_train, y_train) y_pred = model.predict(X_test) logistic = accuracy_score(y_test, y_pred) print(logistic )
Titanic - Machine Learning from Disaster
8,950,656
features = np.load('.. /input/vsb-aug-features/aug_features.npy') features.shape<set_options>
xg = XGBClassifier() xg.fit(X_train, y_train) y_pred = xg.predict(X_test) XGB = accuracy_score(y_test, y_pred) print(XGB )
Titanic - Machine Learning from Disaster
8,950,656
class Attention(Layer): def __init__(self, step_dim, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs): self.supports_masking = True self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.step_dim = step_dim self.features_dim = 0 super(Attention, self ).__init__(**kwargs) def build(self, input_shape): assert len(input_shape)== 3 self.W = self.add_weight(( input_shape[-1],), initializer=self.init, name='{}_W'.format(self.name), regularizer=self.W_regularizer, constraint=self.W_constraint) self.features_dim = input_shape[-1] if self.bias: self.b = self.add_weight(( input_shape[1],), initializer='zero', name='{}_b'.format(self.name), regularizer=self.b_regularizer, constraint=self.b_constraint) else: self.b = None self.built = True def compute_mask(self, input, input_mask=None): return None def call(self, x, mask=None): features_dim = self.features_dim step_dim = self.step_dim eij = K.reshape(K.dot(K.reshape(x,(-1, features_dim)) , K.reshape(self.W,(features_dim, 1))),(-1, step_dim)) if self.bias: eij += self.b eij = K.tanh(eij) a = K.exp(eij) if mask is not None: a *= K.cast(mask, K.floatx()) a /= K.cast(K.sum(a, axis=1, keepdims=True)+ K.epsilon() , K.floatx()) a = K.expand_dims(a) weighted_input = x * a return K.sum(weighted_input, axis=1) def compute_output_shape(self, input_shape): return input_shape[0], self.features_dim<compute_test_metric>
KN = KNeighborsClassifier(n_neighbors=3) KN.fit(X_train, y_train) y_pred = KN.predict(X_test) knn = accuracy_score(y_test, y_pred) print(knn )
Titanic - Machine Learning from Disaster
8,950,656
def matthews_correlation(y_true, y_pred): y_pred_pos = K.round(K.clip(y_pred, 0, 1)) y_pred_neg = 1 - y_pred_pos y_pos = K.round(K.clip(y_true, 0, 1)) y_neg = 1 - y_pos tp = K.sum(y_pos * y_pred_pos) tn = K.sum(y_neg * y_pred_neg) fp = K.sum(y_neg * y_pred_pos) fn = K.sum(y_pos * y_pred_neg) numerator =(tp * tn - fp * fn) denominator = K.sqrt(( tp + fp)*(tp + fn)*(tn + fp)*(tn + fn)) return numerator /(denominator + K.epsilon() )<compute_test_metric>
svc = SVC() svc.fit(X_train, y_train) y_pred = svc.predict(X_test) SVC = accuracy_score(y_test, y_pred) print(SVC )
Titanic - Machine Learning from Disaster
8,950,656
def threshold_search(y_true, y_proba): best_threshold = 0 best_score = 0 for threshold in tqdm([i * 0.01 for i in range(100)]): score = K.eval(matthews_correlation(y_true.astype(np.float64),(y_proba > threshold ).astype(np.float64))) if score > best_score: best_threshold = threshold best_score = score search_result = {'threshold': best_threshold, 'matthews_correlation': best_score} return search_result<choose_model_class>
lsvc = LinearSVC(random_state=0) lsvc.fit(X_train, y_train) y_pred = lsvc.predict(X_test) LSVC = accuracy_score(y_test, y_pred) print(LSVC )
Titanic - Machine Learning from Disaster
8,950,656
def model_lstm(input_shape, feat_shape): inp = Input(shape=(input_shape[1], input_shape[2],)) feat = Input(shape=(feat_shape[1],)) bi_lstm_1 = Bidirectional(CuDNNLSTM(128, return_sequences=True), merge_mode='concat' )(inp) bi_lstm_2 = Bidirectional(CuDNNGRU(64, return_sequences=True), merge_mode='concat' )(bi_lstm_1) attention = Attention(input_shape[1] )(bi_lstm_2) x = concatenate([attention, feat], axis=1) x = Dense(64, activation="relu" )(x) x = Dense(1, activation="sigmoid" )(x) model = Model(inputs=[inp, feat], outputs=x) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[matthews_correlation]) return model <split>
finalleaderboard = { "Random Forest": forest, "logistic regression": logistic, "XGB": XGB, "KNeighborsClassifier": knn, "SVC": SVC, "Linear SVC": LSVC } finalleaderboard = pd.DataFrame.from_dict(finalleaderboard, orient='index', columns=['Accuracy']) print(finalleaderboard )
Titanic - Machine Learning from Disaster
8,950,656
N_SPLITS = 5 splits = list(StratifiedKFold(n_splits=N_SPLITS, shuffle=True, random_state=2019 ).split(X, y)) preds_val = [] y_val = [] for idx,(train_idx, val_idx)in enumerate(splits): K.clear_session() print("Beginning fold {}".format(idx+1)) train_X, train_feat, train_y, val_X, val_feat, val_y = X[train_idx], features[train_idx], y[train_idx], X[val_idx], features[val_idx], y[val_idx] model = model_lstm(train_X.shape, features.shape) ckpt = ModelCheckpoint('weights_{}.h5'.format(idx), save_best_only=True, save_weights_only=True, verbose=1, monitor='val_matthews_correlation', mode='max') model.fit([train_X, train_feat], train_y, batch_size=128, epochs=50, validation_data=([val_X, val_feat], val_y), callbacks=[ckpt]) model.load_weights('weights_{}.h5'.format(idx)) preds_val.append(model.predict([val_X, val_feat], batch_size=512)) y_val.append(val_y) preds_val = np.concatenate(preds_val)[...,0] y_val = np.concatenate(y_val) preds_val.shape, y_val.shape <compute_test_metric>
output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': y_pred_sub}) output.to_csv('my_submission.csv', index=False) print("Your submission was successfully saved!" )
Titanic - Machine Learning from Disaster
8,950,656
def matthews_correlation(y_true, y_pred): y_pred = tf.convert_to_tensor(y_pred, np.float64) y_true = tf.convert_to_tensor(y_true, np.float64) y_pred_pos = K.round(K.clip(y_pred, 0, 1)) y_pred_neg = 1 - y_pred_pos y_pos = K.round(K.clip(y_true, 0, 1)) y_neg = 1 - y_pos tp = K.sum(y_pos * y_pred_pos) tn = K.sum(y_neg * y_pred_neg) fp = K.sum(y_neg * y_pred_pos) fn = K.sum(y_pos * y_pred_neg) numerator =(tp * tn - fp * fn) denominator = K.sqrt(( tp + fp)*(tp + fn)*(tn + fp)*(tn + fn)) return numerator /(denominator + K.epsilon() )<compute_test_metric>
print(np.count_nonzero(y_pred))
Titanic - Machine Learning from Disaster
14,635,724
optimal_values = threshold_search(y_val, preds_val) best_threshold = optimal_values['threshold'] best_score = optimal_values['matthews_correlation']<load_from_csv>
train_data = pd.read_csv("/kaggle/input/titanic/train.csv") train_data.head()
Titanic - Machine Learning from Disaster
14,635,724
meta_test = pd.read_csv('.. /input/vsb-power-line-fault-detection/metadata_test.csv') X_test_input = np.load(".. /input/folk-base-neural-network-using-lstm/X_test.npy") features_test = np.load(".. /input/folk-base-neural-network-using-lstm/features_test.npy") submission = pd.read_csv('.. /input/vsb-power-line-fault-detection/sample_submission.csv' )<predict_on_test>
test_data = pd.read_csv("/kaggle/input/titanic/test.csv") test_data.head()
Titanic - Machine Learning from Disaster
14,635,724
preds_test = [] for i in range(N_SPLITS): model.load_weights('weights_{}.h5'.format(i)) pred = model.predict([X_test_input, features_test], batch_size=300, verbose=1) pred_3 = [] for pred_scalar in pred: for i in range(3): pred_3.append(pred_scalar) preds_test.append(pred_3) <compute_test_metric>
women = train_data.loc[train_data.Sex == 'female']["Survived"] rate_women = sum(women)/len(women) print("% of women who survived:", rate_women )
Titanic - Machine Learning from Disaster
14,635,724
optimal_values = threshold_search(y_val, preds_val) best_threshold = optimal_values['threshold'] best_score = optimal_values['matthews_correlation'] <data_type_conversions>
men = train_data.loc[train_data.Sex == 'male']["Survived"] rate_men = sum(men)/len(men) print("% of men who survived:", rate_men )
Titanic - Machine Learning from Disaster
14,635,724
preds_test =(np.squeeze(np.mean(preds_test, axis=0)) > best_threshold ).astype(np.int) preds_test.shape<save_to_csv>
train_data["Age"] = train_data["Age"].fillna(train_data["Age"].median()) train_data["Embarked"] = train_data["Embarked"].fillna('S') train_data.loc[train_data["Embarked"] == "S", "Embarked"] = 0 train_data.loc[train_data["Embarked"] == "C", "Embarked"] = 1 train_data.loc[train_data["Embarked"] == "Q", "Embarked"] = 2 train_data.loc[train_data["Sex"] == "male", "Sex"] = 0 train_data.loc[train_data["Sex"] == "female", "Sex"] = 1
Titanic - Machine Learning from Disaster
14,635,724
submission['target'] = preds_test submission.to_csv('submission.csv', index=False) submission.head()<load_from_csv>
test_data["Age"] = test_data["Age"].fillna(test_data["Age"].median()) test_data["Fare"] = test_data["Fare"].fillna(test_data["Fare"].median()) test_data.loc[test_data["Embarked"] == "S", "Embarked"] = 0 test_data.loc[test_data["Embarked"] == "C", "Embarked"] = 1 test_data.loc[test_data["Embarked"] == "Q", "Embarked"] = 2 test_data.loc[test_data["Sex"] == "male", "Sex"] = 0 test_data.loc[test_data["Sex"] == "female", "Sex"] = 1
Titanic - Machine Learning from Disaster
14,635,724
v_raw_train = pq.read_pandas('.. /input/vsb-power-line-fault-detection/train.parquet' ).to_pandas().values meta_train = np.loadtxt('.. /input/vsb-power-line-fault-detection/metadata_train.csv', skiprows=1, delimiter=',') y_train = meta_train[:, 3].astype(bool) print(v_raw_train.shape )<define_search_space>
y = train_data["Survived"] features = ["Pclass", "Sex", 'Age',"SibSp", "Parch",'Embarked'] X = pd.get_dummies(train_data[features]) X_test = pd.get_dummies(test_data[features]) RF1 = RandomForestClassifier(n_estimators=700, max_depth=7, max_features=5 , n_jobs=-1) adaboost = AdaBoostClassifier(RF1,n_estimators=10) adaboost.fit(X, y )
Titanic - Machine Learning from Disaster
14,635,724
def compute_spectra(v_raw, *, m = 1000): percentile =(100, 99, 95, 0, 1, 5) n = v_raw.shape[1] length = v_raw.shape[0] // m n_spectra = len(percentile) mean_signal = np.zeros(( n, length), dtype='float32') percentile_spectra = np.zeros(( n, length, n_spectra), dtype='float32') print('computing spectra...', flush=True) for i in tqdm(range(n)) : v = v_raw[:, i].astype('float32' ).reshape(-1, m)/ 128.0 mean = np.mean(v, axis=1) s = np.abs(np.percentile(v, percentile, axis=1)- mean) h = np.percentile(s, 5.0) s = np.maximum(0.0, s - h) mean_signal[i, :] = mean percentile_spectra[i, :, :] = s.T d = {} d['mean'] = mean_signal d['percentile'] = percentile_spectra return d spec_train = compute_spectra(v_raw_train) print('done.' )<init_hyperparams>
scores = model_selection.cross_val_score(adaboost, X, y, cv=10) print('The scores mean is :',scores.mean()) print('The Best Score:',scores.max()) predictions = adaboost.predict(X_test )
Titanic - Machine Learning from Disaster
14,635,724
<normalization><EOS>
output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions}) output.to_csv('my_submission.csv', index=False) print("Your submission was successfully saved!" )
Titanic - Machine Learning from Disaster
14,525,173
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<load_from_csv>
pd.options.mode.chained_assignment = None
Titanic - Machine Learning from Disaster
14,525,173
knowledge_data = pd.read_csv('.. /input/vsb-knowledge-0744/sub_ens_v16.csv' )<load_from_csv>
test_data = pd.read_csv('.. /input/titanic/test.csv') train_data = pd.read_csv('.. /input/titanic/train.csv') comb = [train_data, test_data]
Titanic - Machine Learning from Disaster
14,525,173
if 'v_raw_train' in globals() : del v_raw_train id_test = np.loadtxt('.. /input/vsb-power-line-fault-detection/metadata_test.csv', skiprows=1, delimiter=',')[:, 0].astype(int) n_test = len(id_test) X_tests = [] n_subset = 4 nread = 0 for i_subset in range(n_subset): ibegin = 8712 + 3*int(n_test // 3 *(i_subset/n_subset)) iend = 8712 + 3*int(n_test // 3 *(( i_subset + 1)/n_subset)) print('Loading %d/%d; signal_id %d - %d...' %(i_subset, n_subset, ibegin, iend)) v_raw_test = pq.read_pandas('.. /input/vsb-power-line-fault-detection/test.parquet', columns=[str(i)for i in range(ibegin, iend)] ).to_pandas().values nread += v_raw_test.shape[1] X = compute_features(v_raw_test) X_tests.append(X) print('%d/%d test data processed.' %(nread, n_test)) del v_raw_test X_test = np.concatenate(X_tests, axis=0) assert(X_test.shape[0] == id_test.shape[0] // 3) del X_tests print('X_test computation done.shape', X_test.shape )<concatenate>
for ds in comb: age_avg = ds['Age'].mean() age_std = ds['Age'].std() age_nan = ds['Age'].isnull().sum() age_rand_list = np.random.randint(age_avg - age_std, age_avg + age_std, size=age_nan) ds['Age'][np.isnan(ds['Age'])] = age_rand_list ds['Age'] = ds['Age'].astype(int )
Titanic - Machine Learning from Disaster
14,525,173
len_train=len(X_all3) y_list=knowledge_data['target'].values y_test=[] for j in range(0,len(y_list),3): y_test.append(y_list[j]) y_test=np.asarray(y_test) del knowledge_data print(X_all3.shape,y_all3.shape) print(X_test.shape,y_test.shape) X_all3=np.concatenate([X_all3,X_test]) y_all3=np.concatenate([y_all3,y_test]) <train_model>
emb_mode = comb[0]['Embarked'].mode().at[0] comb[0]['Embarked'] = comb[0]['Embarked'].fillna(emb_mode )
Titanic - Machine Learning from Disaster
14,525,173
n_splits = 5 models = [] scores = np.zeros(n_splits) print('Training...') print('MCC training & cv') seeds=[0,42,1204,2019] for seed in seeds: splits = list(sklearn.model_selection.StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=seed ).split(X_all3, y_all3)) for i,(idx_train, idx_cv)in enumerate(splits): X_train = X_all3[idx_train, :] y_train = y_all3[idx_train] X_cv = X_all3[idx_cv, :] y_cv = y_all3[idx_cv] learning_rate = 0.006 model = CatBoostClassifier(learning_rate=learning_rate, od_type='IncToDec', loss_function='Logloss', use_best_model=True, eval_metric='MCC') model.fit(X_train, y_train.astype(float), silent=True, eval_set=(X_cv, y_cv.astype(float))) y_predict_train = model.predict(X_train) y_predict_cv = model.predict(X_cv) score_cv = sklearn.metrics.matthews_corrcoef(y_cv, y_predict_cv) models.append(model) scores[i] = score_cv print('CV scores %.3f ± %.3f' %(np.mean(scores), np.std(scores)) )<load_from_csv>
fare = float(comb[1]['Fare'].mode()) comb[1]['Fare'] = comb[1]['Fare'].fillna(fare )
Titanic - Machine Learning from Disaster
14,525,173
<predict_on_test>
for ds in comb: ds['Cabin_Null'] = ds['Cabin'].isnull().astype(int) pd.crosstab(comb[0]['Cabin_Null'], comb[0]['Survived'] )
Titanic - Machine Learning from Disaster
14,525,173
y_test_probas = np.empty(( X_test.shape[0], n_splits*len(seeds))) for i, model in enumerate(models): y_test_probas[:, i] = model.predict_proba(X_test)[:, 1] y_test_proba = np.mean(y_test_probas, axis=1) y_submit = np.repeat(y_test_proba > 0.25, 3) print('Positive fraction %d/%d = %.3f' %( np.sum(y_submit), len(y_submit), np.sum(y_submit)/len(y_submit)) )<save_to_csv>
comb[0][['Cabin_Null', 'Survived']].groupby(['Cabin_Null'], as_index=False ).mean().sort_values(by='Survived', ascending=False )
Titanic - Machine Learning from Disaster
14,525,173
assert(len(id_test)== len(y_submit)) n = len(id_test) with open('submission.csv', 'w')as f: f.write('signal_id,target ') for i in range(n): f.write('%d,%d ' %(id_test[i], int(y_submit[i]))) print('submission.csv written' )<import_modules>
for daset in comb: daset['TickType'] = 0 tic_let = daset['Ticket'].str.extract(r'(^\S+)') daset['TickType'] = tic_let daset['TickType'] = daset['TickType'].fillna('Other') comb[0]['TickType'].unique()
Titanic - Machine Learning from Disaster
14,525,173
import os import pandas as pd import numpy as np import pyarrow.parquet as pq from keras.layers import * from keras.callbacks import * from keras.initializers import * from keras import optimizers from keras import backend as K from keras.models import Model import tensorflow as tf from sklearn.model_selection import StratifiedKFold from tqdm import tqdm<define_variables>
for dataset in comb: for letter in "ACFPSW": let_regex = r"^(" + letter + ".*)" l_tic = dataset.TickType.str.extract(let_regex ).dropna(axis=0 ).reset_index(drop=True) tic_l_list = list(l_tic[0].unique()) dataset['TickType'].replace(tic_l_list, letter, inplace=True) pd.crosstab(comb[0].TickType, comb[0].Survived )
Titanic - Machine Learning from Disaster
14,525,173
sample_size = 800000<load_from_csv>
for ds in comb: ds['TickType'] = ds['TickType'].replace('F', 'Other') comb[0][['TickType', 'Survived']].groupby(['TickType'], as_index=False ).mean().sort_values(by='Survived', ascending=False )
Titanic - Machine Learning from Disaster
14,525,173
df_train = pd.read_csv('.. /input/metadata_train.csv') df_train = df_train.set_index(['id_measurement', 'phase']) df_train.head()<normalization>
for ds in comb: ds['RelatAboard'] = ds['SibSp'] + ds['Parch'] comb[0][['RelatAboard', 'Survived']].groupby(['RelatAboard'], as_index=False ).mean().sort_values(by='Survived', ascending=False )
Titanic - Machine Learning from Disaster
14,525,173
max_num = 127 min_num = -128 def min_max_transf(ts, min_data, max_data, range_needed=(-1,1)) : if min_data < 0: ts_std =(ts + abs(min_data)) /(max_data + abs(min_data)) else: ts_std =(ts - min_data)/(max_data - min_data) if range_needed[0] < 0: return ts_std *(range_needed[1] + abs(range_needed[0])) + range_needed[0] else: return ts_std *(range_needed[1] - range_needed[0])+ range_needed[0]<categorify>
for ds in comb: ds['OTTmembers'] = 2 ds.loc[ds['RelatAboard'] == 0, 'OTTmembers'] = 0 ds.loc[(ds['RelatAboard'] == 1)|(ds['RelatAboard'] == 2), 'OTTmembers'] = 1 ds.loc[ds['RelatAboard'] == 3, 'OTTmembers'] = 3 comb[0][['OTTmembers', 'Survived']].groupby(['OTTmembers'], as_index=False ).mean().sort_values(by='Survived', ascending=False )
Titanic - Machine Learning from Disaster
14,525,173
def transform_ts(ts, n_dim=160, min_max=(-1,1)) : ts_std = min_max_transf(ts, min_data=min_num, max_data=max_num) bucket_size = int(sample_size / n_dim) new_ts = [] for i in range(0, sample_size, bucket_size): ts_range = ts_std[i:i + bucket_size] mean = ts_range.mean() std = ts_range.std() std_top = mean + std std_bot = mean - std percentil_calc = np.percentile(ts_range, [0, 1, 25, 50, 75, 99, 100]) max_range = percentil_calc[-1] - percentil_calc[0] relative_percentile = percentil_calc - mean new_ts.append(np.concatenate([np.asarray([mean, std, std_top, std_bot, max_range]),percentil_calc, relative_percentile])) return np.asarray(new_ts )<prepare_x_and_y>
for ds in comb: ds['Rank'] = 0 rank = ds['Name'].str.extract(r'(\w+\.) ') ds['Rank'] = rank ranks = set(comb[0]['Rank'].values) print(ranks )
Titanic - Machine Learning from Disaster
14,525,173
def prep_data(start, end): praq_train = pq.read_pandas('.. /input/train.parquet', columns=[str(i)for i in range(start, end)] ).to_pandas() X = [] y = [] for id_measurement in tqdm(df_train.index.levels[0].unique() [int(start/3):int(end/3)]): X_signal = [] for phase in [0,1,2]: signal_id, target = df_train.loc[id_measurement].loc[phase] if phase == 0: y.append(target) X_signal.append(transform_ts(praq_train[str(signal_id)])) X_signal = np.concatenate(X_signal, axis=1) X.append(X_signal) X = np.asarray(X) y = np.asarray(y) return X, y<concatenate>
ot_list = ['Capt.', 'Col.', 'Countess.', 'Don.', 'Dr.', 'Jonkheer.', 'Rev.', 'Sir.', 'Major.'] miss_list = ['Lady.', 'Mlle.', 'Mme.', 'Ms.'] for dataset in comb: dataset['Rank'].replace(ot_list, 'Other', inplace=True) dataset['Rank'].replace(miss_list, 'Miss.', inplace=True) comb[0][['Rank', 'Survived']].groupby(['Rank'], as_index=False ).mean().sort_values(by='Survived', ascending=False )
Titanic - Machine Learning from Disaster
14,525,173
X = [] y = [] def load_all() : total_size = len(df_train) for ini, end in [(0, int(total_size/2)) ,(int(total_size/2), total_size)]: X_temp, y_temp = prep_data(ini, end) X.append(X_temp) y.append(y_temp) load_all() X = np.concatenate(X) y = np.concatenate(y )<compute_test_metric>
comb[0]['AgeGroup'] = pd.cut(comb[0]['Age'], 5) comb[0][['AgeGroup', 'Survived']].groupby(['AgeGroup'], as_index=False ).mean().sort_values(by='AgeGroup', ascending=False )
Titanic - Machine Learning from Disaster
14,525,173
def matthews_correlation(y_true, y_pred): y_pred = tf.convert_to_tensor(y_pred, np.float32) y_true = tf.convert_to_tensor(y_true, np.float32) y_pred_pos = K.round(K.clip(y_pred, 0, 1)) y_pred_neg = 1 - y_pred_pos y_pos = K.round(K.clip(y_true, 0, 1)) y_neg = 1 - y_pos tp = K.sum(y_pos * y_pred_pos) tn = K.sum(y_neg * y_pred_neg) fp = K.sum(y_neg * y_pred_pos) fn = K.sum(y_pos * y_pred_neg) numerator =(tp * tn - fp * fn) denominator = K.sqrt(( tp + fp)*(tp + fn)*(tn + fp)*(tn + fn)) return numerator /(denominator + K.epsilon() )<set_options>
for ds in comb: ds.loc[ds['Age'] < 16, 'Age'] = 0 ds.loc[(ds['Age'] >= 16)&(ds['Age'] < 48), 'Age'] = 1 ds.loc[(ds['Age'] >= 48)&(ds['Age'] < 64), 'Age'] = 2 ds.loc[ds['Age'] >= 64, 'Age'] = 3 ds['Age'] = ds['Age'].astype(int) comb[0][['Age', 'Survived']].groupby(['Age'], as_index=False ).mean().sort_values(by='Survived', ascending=False )
Titanic - Machine Learning from Disaster
14,525,173
class Attention(Layer): def __init__(self, step_dim, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs): self.supports_masking = True self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.step_dim = step_dim self.features_dim = 0 super(Attention, self ).__init__(**kwargs) def build(self, input_shape): assert len(input_shape)== 3 self.W = self.add_weight(( input_shape[-1],), initializer=self.init, name='{}_W'.format(self.name), regularizer=self.W_regularizer, constraint=self.W_constraint) self.features_dim = input_shape[-1] if self.bias: self.b = self.add_weight(( input_shape[1],), initializer='zero', name='{}_b'.format(self.name), regularizer=self.b_regularizer, constraint=self.b_constraint) else: self.b = None self.built = True def compute_mask(self, input, input_mask=None): return None def call(self, x, mask=None): features_dim = self.features_dim step_dim = self.step_dim eij = K.reshape(K.dot(K.reshape(x,(-1, features_dim)) , K.reshape(self.W,(features_dim, 1))),(-1, step_dim)) if self.bias: eij += self.b eij = K.tanh(eij) a = K.exp(eij) if mask is not None: a *= K.cast(mask, K.floatx()) a /= K.cast(K.sum(a, axis=1, keepdims=True)+ K.epsilon() , K.floatx()) a = K.expand_dims(a) weighted_input = x * a return K.sum(weighted_input, axis=1) def compute_output_shape(self, input_shape): return input_shape[0], self.features_dim<normalization>
for ds in comb: ds.loc[ds['Fare'] <= 10.5, 'Fare'] = 0 ds.loc[(ds['Fare'] > 10.5)&(ds['Fare'] <= 39.688), 'Fare'] = 1 ds.loc[ds['Fare'] > 39.688, 'Fare'] = 2 ds['Fare'] = ds['Fare'].astype(int) comb[0][['Fare', 'Survived']].groupby(['Fare'], as_index=False ).mean().sort_values(by='Survived', ascending=False )
Titanic - Machine Learning from Disaster
14,525,173
def squash(x, axis=-1): s_squared_norm = K.sum(K.square(x), axis, keepdims=True) scale = K.sqrt(s_squared_norm + K.epsilon()) return x / scale class Capsule(Layer): def __init__(self, num_capsule, dim_capsule, routings=3, kernel_size=(9, 1), share_weights=True, activation='default', **kwargs): super(Capsule, self ).__init__(**kwargs) self.num_capsule = num_capsule self.dim_capsule = dim_capsule self.routings = routings self.kernel_size = kernel_size self.share_weights = share_weights if activation == 'default': self.activation = squash else: self.activation = Activation(activation) def build(self, input_shape): super(Capsule, self ).build(input_shape) input_dim_capsule = input_shape[-1] if self.share_weights: self.W = self.add_weight(name='capsule_kernel', shape=(1, input_dim_capsule, self.num_capsule * self.dim_capsule), initializer='glorot_uniform', trainable=True) else: input_num_capsule = input_shape[-2] self.W = self.add_weight(name='capsule_kernel', shape=(input_num_capsule, input_dim_capsule, self.num_capsule * self.dim_capsule), initializer='glorot_uniform', trainable=True) def call(self, u_vecs): if self.share_weights: u_hat_vecs = K.conv1d(u_vecs, self.W) else: u_hat_vecs = K.local_conv1d(u_vecs, self.W, [1], [1]) batch_size = K.shape(u_vecs)[0] input_num_capsule = K.shape(u_vecs)[1] u_hat_vecs = K.reshape(u_hat_vecs,(batch_size, input_num_capsule, self.num_capsule, self.dim_capsule)) u_hat_vecs = K.permute_dimensions(u_hat_vecs,(0, 2, 1, 3)) b = K.zeros_like(u_hat_vecs[:, :, :, 0]) for i in range(self.routings): b = K.permute_dimensions(b,(0, 2, 1)) c = K.softmax(b) c = K.permute_dimensions(c,(0, 2, 1)) b = K.permute_dimensions(b,(0, 2, 1)) outputs = self.activation(tf.keras.backend.batch_dot(c, u_hat_vecs, [2, 2])) if i < self.routings - 1: b = tf.keras.backend.batch_dot(outputs, u_hat_vecs, [2, 3]) return outputs def compute_output_shape(self, input_shape): return(None, self.num_capsule, self.dim_capsule )<choose_model_class>
s_dict = {'male': 0, 'female': 1} for ds in comb: ds['Sex'] = ds['Sex'].map(s_dict) ds['Embarked'] = ds['Embarked'].astype('category' ).cat.codes ds['TickType'] = ds['TickType'].astype('category' ).cat.codes ds['Rank'] = ds['Rank'].astype('category' ).cat.codes
Titanic - Machine Learning from Disaster
14,525,173
def model_lstm(input_shape): inp = Input(shape=(input_shape[1], input_shape[2],)) x = Bidirectional(CuDNNLSTM(128, return_sequences=True, kernel_initializer=glorot_normal(seed=1029), recurrent_initializer=orthogonal(gain=1.0, seed=1029)) )(inp) x = Bidirectional(CuDNNLSTM(128, return_sequences=True, kernel_initializer=glorot_normal(seed=1029), recurrent_initializer=orthogonal(gain=1.0, seed=1029)) )(x) x_1 = Attention(input_shape[1] )(x) x_1 = Dropout(0.5 )(x_1) x_2 = Capsule(num_capsule=8, dim_capsule=8, routings=4, share_weights=True )(x) x_2 = Flatten()(x_2) x_2 = Dropout(0.5 )(x_2) x_rcnn = Conv1D(filters=128, kernel_size=1, kernel_initializer='he_uniform' )(inp) x_rcnn = Activation('relu' )(x_rcnn) x_rcnn_atten = Attention(input_shape[1] )(x_rcnn) x_rcnn_capsule = Capsule(num_capsule=8, dim_capsule=8, routings=4, share_weights=True )(x_rcnn) x_rcnn_capsule = Flatten()(x_rcnn_capsule) conc = concatenate([x_1, x_2, x_rcnn_atten, x_rcnn_capsule]) conc = Dense(512, activation="relu" )(conc) conc = Dropout(0.5 )(conc) outp = Dense(1, activation="sigmoid" )(conc) model = Model(inputs=inp, outputs=outp) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=[matthews_correlation]) return model<train_model>
drop_col_test = ['Cabin', 'Name', 'Parch', 'SibSp', 'RelatAboard', 'Ticket'] drop_col_train = drop_col_test.copy() drop_col_train.extend(['FareGroup', 'AgeGroup', 'PassengerId']) comb[0] = comb[0].drop(columns=drop_col_train) comb[1] = comb[1].drop(columns=drop_col_test )
Titanic - Machine Learning from Disaster
14,525,173
N_SPLITS = 5 splits = list(StratifiedKFold(n_splits=N_SPLITS, shuffle=True, random_state=1029 ).split(X, y)) preds_val = [] y_val = [] best_scores = [] for idx,(train_idx, val_idx)in enumerate(splits): K.clear_session() print("Beginning fold {}".format(idx+1)) train_X, train_y, val_X, val_y = X[train_idx], y[train_idx], X[val_idx], y[val_idx] model = model_lstm(train_X.shape) ckpt = ModelCheckpoint('weights_{}.h5'.format(idx), save_best_only=True, save_weights_only=True, verbose=1, monitor='val_matthews_correlation', mode='max') history = model.fit(train_X, train_y, batch_size=128, epochs=50, validation_data=[val_X, val_y], callbacks=[ckpt]) best_scores.append(np.max(history.history['val_matthews_correlation'])) model.load_weights('weights_{}.h5'.format(idx)) preds_val.append(model.predict(val_X, batch_size=512)) y_val.append(val_y) print(" " + str(best_scores)) print(" " + str(np.mean(best_scores)) )<concatenate>
train = comb[0] test = comb[1]
Titanic - Machine Learning from Disaster
14,525,173
preds_val = np.concatenate(preds_val)[...,0] y_val = np.concatenate(y_val) print(preds_val.shape, y_val.shape )<compute_test_metric>
train[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False ).mean().sort_values(by='Survived', ascending=False )
Titanic - Machine Learning from Disaster
14,525,173
def threshold_search(y_true, y_proba): best_threshold = 0 best_score = 0 for threshold in tqdm([i * 0.01 for i in range(100)]): score = K.eval(matthews_correlation(y_true.astype(np.float64),(y_proba > threshold ).astype(np.float64))) if score > best_score: best_threshold = threshold best_score = score search_result = {'threshold': best_threshold, 'matthews_correlation': best_score} return search_result<compute_test_metric>
train[['Sex', 'Survived']].groupby(['Sex'], as_index=False ).mean().sort_values(by='Survived', ascending=False )
Titanic - Machine Learning from Disaster
14,525,173
best_threshold = threshold_search(y_val, preds_val)['threshold'] print(best_threshold )<load_from_csv>
train[['Age', 'Survived']].groupby(['Age'], as_index=False ).mean().sort_values(by='Survived', ascending=False )
Titanic - Machine Learning from Disaster
14,525,173
meta_test = pd.read_csv('.. /input/metadata_test.csv') meta_test = meta_test.set_index(['signal_id']) meta_test.head()<define_variables>
train[['Fare', 'Survived']].groupby(['Fare'], as_index=False ).mean().sort_values(by='Survived', ascending=False )
Titanic - Machine Learning from Disaster
14,525,173
%%time first_sig = meta_test.index[0] n_parts = 10 max_line = len(meta_test) part_size = int(max_line / n_parts) last_part = max_line % n_parts start_end = [[x, x+part_size] for x in range(first_sig, max_line + first_sig, part_size)] start_end = start_end[:-1] + [[start_end[-1][0], start_end[-1][0] + last_part]] X_test = [] for start, end in start_end: subset_test = pq.read_pandas('.. /input/test.parquet', columns=[str(i)for i in range(start, end)] ).to_pandas() for i in tqdm(subset_test.columns): id_measurement, phase = meta_test.loc[int(i)] subset_test_col = subset_test[i] subset_trans = transform_ts(subset_test_col) X_test.append([i, id_measurement, phase, subset_trans] )<load_from_csv>
train[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False ).mean().sort_values(by='Survived', ascending=False )
Titanic - Machine Learning from Disaster
14,525,173
submission = pd.read_csv('.. /input/sample_submission.csv' )<concatenate>
train[['Cabin_Null', 'Survived']].groupby(['Cabin_Null'], as_index=False ).mean().sort_values(by='Survived', ascending=False )
Titanic - Machine Learning from Disaster
14,525,173
X_test_input = np.asarray([np.concatenate([X_test[i][3],X_test[i+1][3], X_test[i+2][3]], axis=1)for i in range(0,len(X_test), 3)]) X_test_input.shape<predict_on_test>
train[['TickType', 'Survived']].groupby(['TickType'], as_index=False ).mean().sort_values(by='Survived', ascending=False )
Titanic - Machine Learning from Disaster
14,525,173
preds_test = [] for i in range(N_SPLITS): model.load_weights('weights_{}.h5'.format(i)) pred = model.predict(X_test_input, batch_size=300, verbose=1) pred_3 = [] for pred_scalar in pred: for i in range(3): pred_3.append(pred_scalar) preds_test.append(pred_3 )<data_type_conversions>
train[['OTTmembers', 'Survived']].groupby(['OTTmembers'], as_index=False ).mean().sort_values(by='Survived', ascending=False )
Titanic - Machine Learning from Disaster
14,525,173
preds_test =(np.squeeze(np.mean(preds_test, axis=0)) > best_threshold ).astype(np.int) preds_test.shape<save_to_csv>
train[['Rank', 'Survived']].groupby(['Rank'], as_index=False ).mean().sort_values(by='Survived', ascending=False )
Titanic - Machine Learning from Disaster
14,525,173
submission['target'] = preds_test submission.to_csv('submission.csv', index=False) submission.head(10 )<count_values>
y_list = list(train['Survived']) y = pd.Series(y_list, name='Survived') X = train.drop(columns='Survived') X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42, test_size=0.2 )
Titanic - Machine Learning from Disaster
14,525,173
submission.target.value_counts().sort_index()<import_modules>
random_forest = RandomForestClassifier(bootstrap=True, n_estimators=700, criterion='entropy') random_forest.fit(X_train, y_train) print("Accuracy on train data: ", random_forest.score(X_train, y_train)) print("Accuracy on test data: ", random_forest.score(X_test, y_test))
Titanic - Machine Learning from Disaster
14,525,173
import pandas as pd import pyarrow.parquet as pq import os import numpy as np from keras.layers import * from keras.models import * from tqdm import tqdm from sklearn.model_selection import train_test_split from keras import backend as K from keras import optimizers from sklearn.model_selection import GridSearchCV, StratifiedKFold from keras.callbacks import * from keras import activations from keras import regularizers from keras import initializers from keras import constraints from sklearn.preprocessing import MinMaxScaler from numba import jit from math import log, floor from sklearn.neighbors import KDTree from scipy.signal import periodogram, welch from keras.engine import Layer from keras.engine import InputSpec from keras.objectives import categorical_crossentropy from keras.objectives import sparse_categorical_crossentropy <define_variables>
params={'n_neighbors' : range(1, 20), 'leaf_size' : range(1, 50)} knn_grid = GridSearchCV(KNeighborsClassifier() , params, scoring='roc_auc') knn_grid.fit(X_train, y_train) print("Accuracy on train data: ", knn_grid.score(X_train, y_train)) print("Accuracy on test data: ", knn_grid.score(X_test, y_test))
Titanic - Machine Learning from Disaster
14,525,173
N_SPLITS = 5 sample_size = 800000<compute_test_metric>
params2 = {'alpha' : [0.00001, 0.0001, 0.001, 0.01, 1, 10, 100, 1000], 'normalize' : [True, False], 'random_state' : [0, 50, 100, 150, 200]} r_grid = GridSearchCV(RidgeClassifier() , params2, scoring='roc_auc') r_grid.fit(X_train, y_train) print("Accuracy on train data: ", r_grid.score(X_train, y_train)) print("Accuracy on test data: ", r_grid.score(X_test, y_test))
Titanic - Machine Learning from Disaster
14,525,173
def matthews_correlation(y_true, y_pred): y_pred_pos = K.round(K.clip(y_pred, 0, 1)) y_pred_neg = 1 - y_pred_pos y_pos = K.round(K.clip(y_true, 0, 1)) y_neg = 1 - y_pos tp = K.sum(y_pos * y_pred_pos) tn = K.sum(y_neg * y_pred_neg) fp = K.sum(y_neg * y_pred_pos) fn = K.sum(y_pos * y_pred_neg) numerator =(tp * tn - fp * fn) denominator = K.sqrt(( tp + fp)*(tp + fn)*(tn + fp)*(tn + fn)) return numerator /(denominator + K.epsilon() )<choose_model_class>
svm_classifier = svm.SVC(kernel="poly", C=3, degree=6) svm_classifier.fit(X_train, y_train) print("Accuracy on train data: ", svm_classifier.score(X_train, y_train)) print("Accuracy on test data: ", svm_classifier.score(X_test, y_test))
Titanic - Machine Learning from Disaster
14,525,173
class Attention(Layer): def __init__(self, step_dim, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs): self.supports_masking = True self.init = initializers.get('glorot_uniform') self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias self.step_dim = step_dim self.features_dim = 0 super(Attention, self ).__init__(**kwargs) def build(self, input_shape): assert len(input_shape)== 3 self.W = self.add_weight(( input_shape[-1],), initializer=self.init, name='{}_W'.format(self.name), regularizer=self.W_regularizer, constraint=self.W_constraint) self.features_dim = input_shape[-1] if self.bias: self.b = self.add_weight(( input_shape[1],), initializer='zero', name='{}_b'.format(self.name), regularizer=self.b_regularizer, constraint=self.b_constraint) else: self.b = None self.built = True def compute_mask(self, input, input_mask=None): return None def call(self, x, mask=None): features_dim = self.features_dim step_dim = self.step_dim eij = K.reshape(K.dot(K.reshape(x,(-1, features_dim)) , K.reshape(self.W,(features_dim, 1))),(-1, step_dim)) if self.bias: eij += self.b eij = K.tanh(eij) a = K.exp(eij) if mask is not None: a *= K.cast(mask, K.floatx()) a /= K.cast(K.sum(a, axis=1, keepdims=True)+ K.epsilon() , K.floatx()) a = K.expand_dims(a) weighted_input = x * a return K.sum(weighted_input, axis=1) def compute_output_shape(self, input_shape): return input_shape[0], self.features_dim<load_from_csv>
gbm = xgb.XGBClassifier( learning_rate = 0.02, use_label_encoder=False, n_estimators= 2000, max_depth= 4, min_child_weight= 2, gamma=0.9, subsample=0.8, colsample_bytree=0.8, objective= 'binary:logistic', nthread= -1, eval_metric = 'error', scale_pos_weight=1 ).fit(X_train, y_train) print("Accuracy on train data: ", gbm.score(X_train, y_train)) print("Accuracy on test data: ", gbm.score(X_test, y_test))
Titanic - Machine Learning from Disaster
14,525,173
df_train = pd.read_csv('.. /input/metadata_train.csv') df_train = df_train.set_index(['id_measurement', 'phase']) df_train.head()<define_variables>
Y_train_full = train['Survived'] X_train_full = train.drop(columns='Survived') X_test_full = test.drop(columns='PassengerId') X_train_full.shape, X_test_full.shape, Y_train_full.shape
Titanic - Machine Learning from Disaster
14,525,173
max_num = 127 min_num = -128<categorify>
model = svm.SVC(kernel="poly", C=2) model.fit(X_train_full, Y_train_full) print("Accuracy on train data: ", model.score(X_train_full, Y_train_full))
Titanic - Machine Learning from Disaster
14,525,173
def min_max_transf(ts, min_data, max_data, range_needed=(-1,1)) : if min_data < 0: ts_std =(ts + abs(min_data)) /(max_data + abs(min_data)) else: ts_std =(ts - min_data)/(max_data - min_data) if range_needed[0] < 0: return ts_std *(range_needed[1] + abs(range_needed[0])) + range_needed[0] else: return ts_std *(range_needed[1] - range_needed[0])+ range_needed[0]<categorify>
y_test_predicted = model.predict(X_test_full) y_test_predicted
Titanic - Machine Learning from Disaster
14,525,173
def transform_ts(ts, n_dim=160, min_max=(-1,1)) : ts_std = min_max_transf(ts, min_data=min_num, max_data=max_num) bucket_size = int(sample_size / n_dim) new_ts = [] for i in range(0, sample_size, bucket_size): ts_range = ts_std[i:i + bucket_size] mean = ts_range.mean() std = ts_range.std() std_top = mean + std std_bot = mean - std percentil_calc = np.percentile(ts_range, [0, 1, 25, 50, 75, 99, 100]) max_range = percentil_calc[-1] - percentil_calc[0] relative_percentile = percentil_calc - mean new_ts.append(np.concatenate([np.asarray([mean, std, std_top, std_bot, max_range]),percentil_calc, relative_percentile])) return np.asarray(new_ts )<prepare_x_and_y>
sub = pd.DataFrame({ "PassengerId": comb[1]['PassengerId'], "Survived": y_test_predicted }) sub.to_csv('submission.csv', index = False )
Titanic - Machine Learning from Disaster