kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
13,816,070
test_users = pd.read_csv(".. /input/ds2019uec-task2/test_user_ids.csv")["user_id"]<categorify>
train_data["fsize"] = train_data["SibSp"] + train_data["Parch"] + 1 test_data["fsize"] = test_data["SibSp"] + test_data["Parch"] + 1
Titanic - Machine Learning from Disaster
13,816,070
test_user_encoder = LabelEncoder() test_user_encoder.fit(test_users) print(test_user_encoder.classes_.shape )<categorify>
train_data[['fsize', 'Survived']].groupby(['fsize'], as_index=False ).mean().sort_values(by='Survived', ascending=False )
Titanic - Machine Learning from Disaster
13,816,070
X_test = np.zeros(( 3000, 4155)) user_vec_test = test_user_encoder.transform(test_users_game1_df["user_id"]) game_vec = game1_encoder.transform(test_users_game1_df["game_title"]) for i, j in zip(user_vec_test, game_vec): X_test[i, j] = 1<define_variables>
print(train_data.Ticket.nunique()) print(train_data.Ticket.tail() )
Titanic - Machine Learning from Disaster
13,816,070
pred_test_mat = np.zeros(( 3000, 1000))<feature_engineering>
train_data["ticket_prefix"] = pd.Series([len(i.split())> 1 for i in train_data.Ticket], index=train_data.index )
Titanic - Machine Learning from Disaster
13,816,070
X_test[:, X.sum(axis=0)< 100] = 0 X[:, X.sum(axis=0)< 100] = 0<train_model>
train_data[['ticket_prefix', 'Survived']].groupby(['ticket_prefix'], as_index=False ).mean().sort_values(by='Survived', ascending=False)
Titanic - Machine Learning from Disaster
13,816,070
for r in tqdm(range(1000)) : if Y_mat[:, r].sum() > 100: lr = LogisticRegression(solver='lbfgs') lr.fit(X, Y_mat[:, r]) pred_test_mat[:, r] = lr.predict_proba(X_test)[:, 1]<load_from_csv>
train_data.drop("ticket_prefix", axis=1, inplace=True) train_data.drop("Ticket", axis=1, inplace=True) test_data.drop("Ticket", axis=1, inplace=True )
Titanic - Machine Learning from Disaster
13,816,070
res_list = [""] * 3000 for i in range(3000): res_list[i] = " ".join([str(i)for i in np.argsort(-pred_test_mat[i, :])[:10]]) sample_submission_df = pd.read_csv(".. /input/ds2019uec-task2/sample_submission.csv") sub_df = sample_submission_df.copy() sub_df["purchased_games"] = res_list<save_to_csv>
train_data.drop("Cabin", axis=1, inplace=True) test_data.drop("Cabin", axis=1, inplace=True )
Titanic - Machine Learning from Disaster
13,816,070
sub_df.to_csv("logistic_regression_1000.csv", index=False )<compute_test_metric>
train_data["Embarked"] = train_data["Embarked"].fillna("S") train_data[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False ).mean().sort_values(by='Survived', ascending=False )
Titanic - Machine Learning from Disaster
13,816,070
def hamming_accuracy(prediction, true_values): return np.mean(np.sum(np.equal(prediction, true_values)) / float(true_values.size)) def get_score(prediction, true_values): print("\tHamming accuracy: {:.3f}".format(hamming_accuracy(prediction, true_values))) print("\tAccuracy, exact matches: {:.3f}".format(accuracy_score(prediction, true_values))) print("\tMacro F1 Score: {:.3f}".format(f1_score(y_true=true_values, y_pred=prediction, average="macro"))) print("\tMicro F1 Score: {:.3f}".format(f1_score(y_true=true_values, y_pred=prediction, average="micro"))) def build_dataframe(input_data: pd.DataFrame, col_name: str, preserve_int_col_name=False)-> pd.DataFrame: vertices_dict = [] for i, row_i in input_data.iterrows() : features = [int(float(x)) for x in row_i[f"{col_name}s"].split(";")] new_v = {"id": i} for j, f in enumerate(features): new_v[j if preserve_int_col_name else f"{col_name}_{j}"] = f vertices_dict += [new_v] res_df = pd.DataFrame(vertices_dict) return res_df.set_index("id") def bool_to_int(labels: list)-> list: return [i for i, x in enumerate(labels)if x == 1]<load_from_csv>
train_data["Embarked"] = train_data["Embarked"].fillna("S") print(train_data.Embarked.isna().sum() )
Titanic - Machine Learning from Disaster
13,816,070
graph_name = "ppi" embeddings_path = ".. /input/embeddings.csv" embeddings_df = pd.read_csv(embeddings_path, header=None, index_col=0) embeddings_df.columns = ["e_" + str(col)for col in embeddings_df.columns] embeddings_df.head()<load_from_csv>
train_data = train_data.join(pd.get_dummies(train_data['Embarked'], prefix="Embarked_")) test_data = test_data.join(pd.get_dummies(test_data['Embarked'], prefix="Embarked_"))
Titanic - Machine Learning from Disaster
13,816,070
vertices_path = f".. /input/{graph_name}_train.csv" vertices_train = pd.read_csv(vertices_path, sep=",", index_col="id") vertices_train["dataset"] = "train" vertices_train.head()<load_from_csv>
train_data.drop("Embarked", axis=1, inplace=True) test_data.drop("Embarked", axis=1, inplace=True )
Titanic - Machine Learning from Disaster
13,816,070
vertices_path = f".. /input/{graph_name}_test.csv" vertices_test = pd.read_csv(vertices_path, sep=",", index_col="id") vertices_test["dataset"] = "test" vertices_test.head()<prepare_output>
ss = StandardScaler() train_y = train_data["Survived"] train_data.drop("Survived", axis=1, inplace=True) scoring_method = "f1" train_scaled = ss.fit_transform(train_data) test_scaled = ss.transform(test_data )
Titanic - Machine Learning from Disaster
13,816,070
X_train_df = build_dataframe(vertices_train, "feature") X_train_df.head()<prepare_output>
print(train_data.isna().sum()) print(test_data.isna().sum() )
Titanic - Machine Learning from Disaster
13,816,070
X_test_df = build_dataframe(vertices_test, "feature") X_test_df.head()<prepare_output>
model = LogisticRegression(random_state=10, max_iter = 1000) logit_params = { "C": [1, 3, 10, 20, 30, 40], "solver": ["lbfgs", "liblinear"] } logit_gs = GridSearchCV(model, logit_params, scoring="f1", cv = 5, n_jobs=4 )
Titanic - Machine Learning from Disaster
13,816,070
y_train_df = build_dataframe(vertices_train, "label", preserve_int_col_name=True) y_train_df.head()<merge>
logit_gs.fit(train_data, train_y )
Titanic - Machine Learning from Disaster
13,816,070
X_train_df = pd.merge(X_train_df, embeddings_df, left_index=True, right_index=True, how="left") X_test_df = pd.merge(X_test_df, embeddings_df, left_index=True, right_index=True, how="left") X_train_df.head()<choose_model_class>
print(logit_gs.best_params_) print(logit_gs.best_score_ )
Titanic - Machine Learning from Disaster
13,816,070
kfolds = KFold(n_splits=10) seed = random.randint(0, 2**32) sgd = SGDClassifier(loss="log", max_iter=100, tol=1e-3) model = OneVsRestClassifier(sgd, n_jobs=1 )<compute_train_metric>
rf_model = RandomForestClassifier() rf_params ={ 'bootstrap': [True, False], 'max_depth': [10, None], 'max_features': ['auto', 'sqrt'], 'min_samples_leaf': [1, 2, 4], 'min_samples_split': [2, 5, 10], 'n_estimators': [5, 10, 15, 20, 25, 30]} rf_gs = GridSearchCV(rf_model, rf_params, scoring=scoring_method, cv=8, n_jobs=4 )
Titanic - Machine Learning from Disaster
13,816,070
scores = cross_val_score(model, X_train_df, y_train_df, cv=kfolds, n_jobs=32, verbose=2, scoring="f1_micro") print(f"Mean crossvalidation Micro-F1: {np.mean(scores):.3f}" )<predict_on_test>
rf_gs.fit(train_data, train_y )
Titanic - Machine Learning from Disaster
13,816,070
model.fit(X_train_df, y_train_df) print("Train accuracy") y_train_pred = model.predict(X_train_df) get_score(y_train_pred, y_train_df.values) y_test_pred = model.predict(X_test_df )<save_to_csv>
print(rf_gs.best_params_) print(rf_gs.best_score_ )
Titanic - Machine Learning from Disaster
13,816,070
y_pred = [" ".join([str(y)for y in bool_to_int(x)])for x in y_test_pred] y_pred_df = pd.DataFrame(y_pred, columns=["labels"], index=X_test_df.index) y_pred_df.to_csv(f"prediction_{datetime.datetime.now().strftime('%y_%m_%d_%H_%M_%S')}.csv" )<set_options>
svc_model = SVC() test_parameters = { "C": [1, 3, 10, 30, 100], "kernel": ["linear", "poly", "rbf" , "sigmoid"], } svc_gs = GridSearchCV(svc_model, test_parameters, scoring="f1", cv=5, n_jobs=4 )
Titanic - Machine Learning from Disaster
13,816,070
plt.rcParams['figure.figsize'] = [20, 10] %matplotlib inline<load_from_csv>
svc_gs.fit(train_scaled, train_y )
Titanic - Machine Learning from Disaster
13,816,070
dataset = pd.read_csv(FOLDER+'train.csv' )<choose_model_class>
print(svc_gs.best_params_) print(svc_gs.best_score_ )
Titanic - Machine Learning from Disaster
13,816,070
class MeineLinearRegression(linear_model.LinearRegression): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def R(self, y, X): if X.shape[1] == 0: return 0 reg = linear_model.LinearRegression() reg.fit(X, y) y_pred = reg.predict(X) return r2_score(y_pred, y) def fit(self, X, y, sample_weight=None): self = super().fit(X, y, sample_weight) y_pred = self.predict(X) Ryh = r2_score(y, y_pred) self.r2 = Ryh X = np.array(X) y = np.array(y) self.rse = np.sqrt(np.sum(( self.predict(X)- y)** 2, axis=0)/ float(X.shape[0] - X.shape[1] - 1)) self.se = np.array([]) for i, x in enumerate(X.T): n = x - x.mean() Rxg = self.R(x, np.delete(X, i, 1)) Sbk = np.sqrt(( 1 - Ryh)/(1 - Rxg)/(X.shape[0] - X.shape[1] - 1)) *np.std(y)/np.std(x) self.se = np.append(self.se, Sbk) self.t = self.coef_ / self.se self.p = [2 *(1 - stats.t.cdf(np.abs(t_stu), df = y.shape[0] - X.shape[1])) for t_stu in self.t] <prepare_x_and_y>
lgb_model = LGBMClassifier() test_parameters = { "n_estimators": [int(x)for x in np.linspace(5, 30, 6)], "reg_alpha": [0, 0.75, 1, 1.25], "learning_rate": [0.5, 0.4, 0.35, 0.3, 0.25, 0.2], "subsample": [0.5, 0.75, 1] } lgb_gs = GridSearchCV(lgb_model, test_parameters, scoring=scoring_method, cv=8, n_jobs=4 )
Titanic - Machine Learning from Disaster
13,816,070
baseline = dataset.copy() target = 'median_house_value' result = [] np.seterr(all='ignore') for label in baseline.columns[1:-1]: X = baseline[label] y = baseline[target] reg = MeineLinearRegression() reg.fit(X.values.reshape(-1, 1), y) F, p = f_regression(X.values.reshape(-1, 1), y) result.append({'Label': label, 'Beta': '{0:.2f}'.format(reg.coef_[0]), 't':'{0:.2f}'.format(reg.t[0]), 'p-value':'{0:.6f}'.format(p[0]), 'R²':"{0:.2f}%".format(reg.r2 * 100), 'F-value':'{0:.2f}'.format(F[0])}) pd.DataFrame(result, columns=['Label', 'Beta', 'R²', 't', 'F-value', 'p-value'] )<train_on_grid>
lgb_gs.fit(train_data, train_y )
Titanic - Machine Learning from Disaster
13,816,070
X = baseline[baseline.columns[1:-1]] y = baseline[target] reg = MeineLinearRegression() reg.fit(X, y) result_2 = [] for n, label in enumerate(X.columns): result_2.append({'Label': label, 'Beta': '{0:.2f}'.format(reg.coef_[n]), 't':'{0:.2f}'.format(reg.t[n]), 'p-value':'{0:.6f}'.format(reg.p[n])}) print("R²: {0:.2f}%".format(reg.r2 * 100)) pd.DataFrame(result_2 )<train_on_grid>
print(lgb_gs.best_params_) print(lgb_gs.best_score_ )
Titanic - Machine Learning from Disaster
13,816,070
test_1 = dataset[dataset['median_house_value'] < 500000] target = 'median_house_value' X = test_1[test_1.columns[1:-1]] y = test_1[target] evaluation = linear_model.LinearRegression() score = cross_val_score(evaluation, X, y, cv = 10, scoring='r2') print('R² = {0:.3f} ± {1:.3f}'.format(np.mean(score), 2*np.std(score))) reg = MeineLinearRegression() reg.fit(X, y) result_2 = [] for n, label in enumerate(X.columns): result_2.append({'Label': label, 'Beta': '{0:.2f}'.format(reg.coef_[n]), 't':'{0:.2f}'.format(reg.t[n]), 'p-value':'{0:.6f}'.format(reg.p[n])}) pd.DataFrame(result_2 )<choose_model_class>
ensemble_model = VotingClassifier(estimators=[ ("logit", logit_gs.best_estimator_), ("rf", rf_gs.best_estimator_), ("svc", svc_gs.best_estimator_), ("lgb", lgb_gs.best_estimator_), ], voting = "hard" )
Titanic - Machine Learning from Disaster
13,816,070
non_lin = dataset.copy() stat = [] target = 'median_house_value' for label_1 in non_lin.columns[3:-1]: for label_2 in non_lin.columns[3:-1]: if label_2 != label_1: X = non_lin[label_1]/non_lin[label_2] reg = linear_model.LinearRegression() reg.fit(X.values.reshape(-1, 1), non_lin[target]) F, p = f_regression(X.values.reshape(-1, 1), non_lin[target]) y = reg.predict(X.values.reshape(-1, 1)) r2 = r2_score(non_lin[target], y) stat.append({'Label': label_1+'/'+label_2, 'Beta': '{0:.2f}'.format(reg.coef_[0]), 'F':'{0:.0f}'.format(F[0]), 'p-value':'{0:.6f}'.format(p[0]), 'R²':"{0:.2f}%".format(r2 * 100), 'corr':'{0:.5f}'.format(np.corrcoef(X,non_lin[target])[0][1]), 'raw_R²':r2}) output = pd.DataFrame(stat, columns = ['Label', 'Beta', 'R²', 'p-value', 'F', 'corr', 'raw_R²']) output = output[output['raw_R²'] > 0.01 ] output.sort_values(by='raw_R²', ascending=False )<feature_engineering>
ensemble_model.fit(train_data, train_y )
Titanic - Machine Learning from Disaster
13,816,070
def add_features(dset): mean_houses = pd.Series(dset['households']/dset['population'], name = 'mean_households') rooms_ratio = pd.Series(dset['total_rooms']/dset['total_bedrooms'], name = 'ratio') return pd.concat([mean_houses, rooms_ratio, dset], axis = 1 )<train_on_grid>
ensemble_model.score(train_data, train_y )
Titanic - Machine Learning from Disaster
13,816,070
stat = [] target = 'median_house_value' for n, label_1 in enumerate(non_lin.columns[3:-1]): for label_2 in non_lin.columns[3+n:-1]: X = non_lin[label_1]*non_lin[label_2] reg = linear_model.LinearRegression() reg.fit(X.values.reshape(-1, 1), non_lin[target]) F, p = f_regression(X.values.reshape(-1, 1), non_lin[target]) y = reg.predict(X.values.reshape(-1, 1)) r2 = r2_score(non_lin[target], y) stat.append({'Label': label_1+'*'+label_2, 'Beta': '{0:.5f}'.format(reg.coef_[0]), 't':'{0:.2f}'.format(np.sqrt(F[0])) , 'p-value':'{0:.6f}'.format(p[0]), 'R²':"{0:.2f}%".format(r2 * 100), 'corr':'{0:.5f}'.format(np.corrcoef(X,non_lin[target])[0][1]), 'raw_R²':r2}) output = pd.DataFrame(stat, columns = ['Label', 'Beta', 'R²', 'p-value', 't', 'corr', 'raw_R²']) output = output[output['raw_R²'] > 0.01 ] output.sort_values(by='raw_R²', ascending=False )<load_from_csv>
preds = ensemble_model.predict(test_data )
Titanic - Machine Learning from Disaster
13,816,070
test = pd.read_csv(FOLDER+'test.csv') test.head()<compute_train_metric>
output = pd.DataFrame({'PassengerId': test_data.index, 'Survived': preds}) output.to_csv('submission.csv', index=False )
Titanic - Machine Learning from Disaster
14,119,657
def submit_pred(dset, dtest, model = linear_model.LinearRegression() , name = 'submit'): target = 'median_house_value' labels = dset.columns.drop('Id' ).drop(target) X = dset[labels] y = dset[target] score = cross_val_score(model, X, y, cv = 10, scoring='r2') print('R² = {0:.3f} ± {1:.3f}'.format(np.mean(score), 2*np.std(score))) model.fit(X, y) y_test_raw = model.predict(dtest[labels]) y_test = [np.max([entry, 0])for entry in y_test_raw] output = pd.concat([test['Id'], pd.Series(y_test, name=target)], axis=1) output.to_csv('./{0}.csv'.format(name), index=False )<predict_on_test>
warnings.filterwarnings("ignore")
Titanic - Machine Learning from Disaster
14,119,657
submit_pred(dataset, test, name = 'raw_linear_model' )<prepare_output>
train=pd.read_csv('.. /input/titanic/train.csv') test=pd.read_csv('.. /input/titanic/test.csv') y_test=pd.read_csv('.. /input/titanic/gender_submission.csv' )
Titanic - Machine Learning from Disaster
14,119,657
submit_pred(add_features(dataset), add_features(test), name = 'improv_linear_model' )<drop_column>
y_test.head() PassengerId=y_test['PassengerId'] y_test=y_test.drop(['PassengerId'],axis=1)
Titanic - Machine Learning from Disaster
14,119,657
d_dset = add_dist(add_features(dataset)) d_test = add_dist(add_features(test))<compute_test_metric>
train_m=(max(train['Age'])+min(train['Age'])) /2 values={'Cabin':'nocabin','Age':train_m,'Embarked':'notknown'} train=train.fillna(value=values) test_m=(max(test['Age'])+min(test['Age'])) /2 print(test_m) values={'Cabin':'nocabin','Age':test_m,'Embarked':'notknown',"Fare":max(test['Fare'])} test=test.fillna(value=values)
Titanic - Machine Learning from Disaster
14,119,657
submit_pred(d_dset, d_test, name = 'linear_model_dists_gps' )<predict_on_test>
y= train["Survived"] train=train.drop(["Survived"],axis=1 )
Titanic - Machine Learning from Disaster
14,119,657
submit_pred(d_dset.drop('latitude', axis = 1 ).drop('longitude', axis = 1), d_test.drop('latitude', axis = 1 ).drop('longitude', axis = 1), name = 'linear_model_dists' )<choose_model_class>
X_train, X_cv, y_train, y_cv = train_test_split(train, y, stratify=y, test_size=0.2 )
Titanic - Machine Learning from Disaster
14,119,657
submit_pred(d_dset.drop('latitude', axis = 1), d_test.drop('latitude', axis = 1), model = linear_model.LassoLars(alpha=.1), name = 'linear_model_lasso' )<choose_model_class>
vectorizer = CountVectorizer() X_tr_emb =vectorizer.fit_transform(X_train['Embarked']) X_cv_emb =vectorizer.transform(X_cv['Embarked']) X_te_emb =vectorizer.transform(test['Embarked']) enc = OneHotEncoder(handle_unknown='ignore') X_tr_age =enc.fit_transform(np.array(X_train['Age'] ).reshape(-1,1)) X_cv_age =enc.transform(np.array(X_cv['Age'] ).reshape(-1,1)) X_te_age =enc.transform(np.array(test['Age'] ).reshape(-1,1)) X_tr_fare =enc.fit_transform(np.array(X_train['Fare'] ).reshape(-1,1)) X_cv_fare =enc.transform(np.array(X_cv['Fare'] ).reshape(-1,1)) X_te_fare =enc.transform(np.array(test['Fare'] ).reshape(-1,1)) X_tr_Sbp =enc.fit_transform(np.array(X_train['SibSp'] ).reshape(-1,1)) X_cv_Sbp =enc.transform(np.array(X_cv['SibSp'] ).reshape(-1,1)) X_te_Sbp =enc.transform(np.array(test['SibSp'] ).reshape(-1,1)) X_tr_par =enc.fit_transform(np.array(X_train['Parch'] ).reshape(-1,1)) X_cv_par =enc.transform(np.array(X_cv['Parch'] ).reshape(-1,1)) X_te_par=enc.transform(np.array(test['Parch'] ).reshape(-1,1)) X_tr_pclass =enc.fit_transform(np.array(X_train['Pclass'] ).reshape(-1,1)) X_cv_pclass =enc.transform(np.array(X_cv['Pclass'] ).reshape(-1,1)) X_te_pclass =enc.transform(np.array(test['Pclass'] ).reshape(-1,1)) X_tr_sex =vectorizer.fit_transform(X_train['Sex']) X_cv_sex =vectorizer.transform(X_cv['Sex']) X_te_sex =vectorizer.transform(test['Sex']) X_tr_cabin =vectorizer.fit_transform(X_train['Cabin']) X_cv_cabin =vectorizer.transform(X_cv['Cabin']) X_te_cabin =vectorizer.transform(test['Cabin']) X_tr_tkt =vectorizer.fit_transform(X_train['Ticket']) X_cv_tkt =vectorizer.transform(X_cv['Ticket']) X_te_tkt =vectorizer.transform(test['Ticket']) X_tr = hstack(( X_tr_age,X_tr_fare,X_tr_sex,X_tr_pclass,X_tr_emb)).tocsr() X_cv = hstack(( X_cv_age,X_cv_fare,X_cv_sex,X_cv_pclass,X_cv_emb)).tocsr() X_te = hstack(( X_te_age,X_te_fare,X_te_sex,X_te_pclass,X_te_emb)).tocsr() print(X_tr.shape) print(X_te.shape) print(X_cv.shape )
Titanic - Machine Learning from Disaster
14,119,657
submit_pred(d_dset.drop('latitude', axis = 1 ).drop('longitude', axis = 1), d_test.drop('latitude', axis = 1 ).drop('longitude', axis = 1), model = linear_model.LassoLars(alpha=.1), name = 'linear_model_lasso' )<prepare_output>
max_depth=[1,5,10,15] alpha=[1,3,7,15,24,31,60] cv_log_error_array=[] min_i=0 val=0 v=99999 for i in alpha: clf = KNeighborsClassifier(n_neighbors=i,weights='distance') clf.fit(X_tr, y_train) sig_clf = CalibratedClassifierCV(clf, method="sigmoid") sig_clf.fit(X_tr, y_train) predict_y = sig_clf.predict_proba(X_cv) val=log_loss(y_cv, predict_y, labels=clf.classes_, eps=1e-15) if val<v: min_i=i v=val cv_log_error_array.append(val) print('For values of alpha = ', i,"The log loss is:",log_loss(y_cv, predict_y, labels=clf.classes_, eps=1e-15)) clf = KNeighborsClassifier(n_neighbors=i,weights='distance') clf.fit(X_tr, y_train) sig_clf = CalibratedClassifierCV(clf, method="sigmoid") sig_clf.fit(X_tr, y_train) predict_y = sig_clf.predict_proba(X_tr) print('For values of best alpha = ',min_i, "The train log loss is:",log_loss(y_train, predict_y, labels=clf.classes_, eps=1e-15)) predict_y = sig_clf.predict_proba(X_cv) print('For values of best alpha = ',min_i, "The cross validation log loss is:",log_loss(y_cv, predict_y, labels=clf.classes_, eps=1e-15)) predict_y = sig_clf.predict_proba(X_te) print('For values of best alpha = ',min_i, "The test log loss is:",log_loss(y_test, predict_y, labels=clf.classes_, eps=1e-15)) knn=min_i
Titanic - Machine Learning from Disaster
14,119,657
mean = [] stds = [] K = 1 M = 60 for i in range(K, M, 3): knn = neighbors.KNeighborsRegressor(n_neighbors = i) score = cross_val_score(knn, d_dset.drop(target, axis = 1), d_dset[target], scoring = 'r2', cv = 10) mean.append(np.mean(score)) stds.append(np.std(score)) table = pd.DataFrame() table['Mean'] = mean table['Std'] = stds table<compute_test_metric>
pred=sig_clf.predict(X_te) df=pd.DataFrame(zip(PassengerId,pred),columns=['PassengerId',"Survived"]) df df.to_csv('/kaggle/working/knn_output.csv',index=False)
Titanic - Machine Learning from Disaster
14,119,657
submit_pred(d_dset, d_test, model = linear_model.BayesianRidge() , name = 'linear_model_bayes_gps' )<choose_model_class>
d=pd.read_csv('/kaggle/working/knn_output.csv') d
Titanic - Machine Learning from Disaster
14,089,630
submit_pred(d_dset.drop('latitude', axis = 1), d_test.drop('latitude', axis = 1), model = linear_model.BayesianRidge() , name = 'linear_model_bayes' )<predict_on_test>
%matplotlib inline
Titanic - Machine Learning from Disaster
14,089,630
submit_pred(d_dset, d_test, model = ensemble.RandomForestRegressor(n_estimators = 100), name = 'random_forest_gps' )<compute_train_metric>
train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') gen_sub = pd.read_csv('/kaggle/input/titanic/gender_submission.csv' )
Titanic - Machine Learning from Disaster
14,089,630
submit_pred(d_dset.drop('latitude', axis = 1), d_test.drop('latitude', axis = 1), model = ensemble.RandomForestRegressor(n_estimators = 100), name = 'random_forest' )<predict_on_test>
train.columns = [x.lower() for x in train.columns] test.columns = [x.lower() for x in test.columns]
Titanic - Machine Learning from Disaster
14,089,630
submit_pred(d_dset, d_test, model = ensemble.ExtraTreesRegressor(n_estimators = 100), name = 'extra_trees_gps' )<predict_on_test>
train.drop(['passengerid','name','ticket'],axis=1,inplace=True) train.head(2 )
Titanic - Machine Learning from Disaster
14,089,630
submit_pred(d_dset.drop('latitude', axis = 1), d_test.drop('latitude', axis = 1), model = ensemble.ExtraTreesRegressor(n_estimators = 100), name = 'extra_trees' )<choose_model_class>
train.isnull().sum()
Titanic - Machine Learning from Disaster
14,089,630
submit_pred(d_dset, d_test, model = ensemble.AdaBoostRegressor(base_estimator = ensemble.RandomForestRegressor(max_depth=20, n_estimators=10), n_estimators=50), name = 'ada_boost_gps' )<load_from_csv>
train.isnull().sum()
Titanic - Machine Learning from Disaster
14,089,630
train_data = pd.read_csv(".. /input/atividade-3-pmr3508/train.csv") test_data = pd.read_csv(".. /input/atividade-3-pmr3508/test.csv") train_Id = train_data.loc[:,'Id'] test_Id = test_data.loc[:,'Id'] test_data = test_data.drop('Id',axis = 'columns') train_data = train_data.drop('Id',axis = 'columns') train_data <feature_engineering>
train.drop('cabin',axis=1,inplace=True) train.head()
Titanic - Machine Learning from Disaster
14,089,630
def adding_new_features(df): df.loc[:,'mean_rooms'] = df.loc[:,'total_rooms']/df.loc[:,'households'] df.loc[:,'rooms_per_person'] = df.loc[:,'total_rooms']/df.loc[:,'population'] df.loc[:,'mean_bedrooms'] = df.loc[:,'total_bedrooms']/df.loc[:,'households'] df.loc[:,'bedrooms_per_person'] = df.loc[:,'total_bedrooms']/df.loc[:,'households'] df.loc[:,'persons_per_household'] = df.loc[:,'population']/df.loc[:,'households'] df.loc[:, 'median_income_per_person'] = df.loc[:,'median_income']/df.loc[:,'persons_per_household'] adding_new_features(train_data) adding_new_features(test_data) train_data <load_from_csv>
train.isnull().sum()
Titanic - Machine Learning from Disaster
14,089,630
dist2coast = pd.read_csv(".. /input/distance-to-coast/dist2coast.txt",delim_whitespace = True) <create_dataframe>
train.isnull().sum()
Titanic - Machine Learning from Disaster
14,089,630
dist2coast = pd.DataFrame(dist2coast) dist2coast = dist2coast.rename({'-179.98': 'longitude', '89.98': 'latitude', '712.935': 'dist2coast'}, axis='columns') dist2coast = dist2coast.query('-114.00 > longitude > -126.00') dist2coast = dist2coast.query('28.00 < latitude < 42.00') dist2coast <save_to_csv>
age_mean = train['age'].mean() train['age'] = train['age'].fillna(age_mean) train.isnull().sum()
Titanic - Machine Learning from Disaster
14,089,630
def saving(name,y_predict): df = pd.DataFrame() df['Id'] = test_Id df.set_index('Id', inplace=True) df['median_house_value'] =y_predict print(df) return df.to_csv(name )<find_best_model_class>
train.isnull().sum()
Titanic - Machine Learning from Disaster
14,089,630
x_train_data = train_data.drop('median_house_value', axis = 'columns') y_train_data = train_data.loc[:,'median_house_value'] reg1 = tree.DecisionTreeRegressor(max_depth = 1) reg1 = reg1.fit(x_train_data, y_train_data) DTR_y = reg1.predict(test_data) saving("DecisionTreeRegression_1.csv",DTR_y) <create_dataframe>
train.isnull().sum()
Titanic - Machine Learning from Disaster
14,089,630
Scores1 = [] for i in range(1,10): reg1 = tree.DecisionTreeRegressor(max_depth = i) scores1 = cross_val_score(reg1, x_train_data, y_train_data, cv=10) Scores1.append(scores1.mean()) Scores1_df = pd.DataFrame() Scores1_df['Regression Tree Scoring']= Scores1 Scores1_df<find_best_model_class>
train['embarked'].value_counts()
Titanic - Machine Learning from Disaster
14,089,630
reg2 = KNeighborsRegressor(n_neighbors=50) reg2 = reg2.fit(x_train_data, y_train_data) knnR_y = reg2.predict(test_data) saving("50nn Regressor.csv", knnR_y )<train_on_grid>
train[train['embarked'].isnull() ]
Titanic - Machine Learning from Disaster
14,089,630
reg3 = linear_model.LassoLars(alpha=.1, positive = True) reg3.fit(x_train_data, y_train_data) print(reg3.coef_) LASSO_y = reg3.predict(test_data) saving("LASSO LARS.csv", LASSO_y) <train_model>
Titanic - Machine Learning from Disaster
14,089,630
reg4 = MLPRegressor() reg4.fit(x_train_data, y_train_data) MLP_y = reg4.predict(test_data) saving("MultiLayerPerceptrons Regressor.csv", MLP_y )<find_best_model_class>
train['embarked'] = train['embarked'].fillna('S' )
Titanic - Machine Learning from Disaster
14,089,630
reg5 = linear_model.BayesianRidge() reg5.fit(x_train_data, y_train_data) BRR_y = reg5.predict(test_data) scores5 = cross_val_score(reg5, x_train_data, y_train_data, cv = 10) saving("Bayesian Ridge Regressor.csv", BRR_y )<find_best_model_class>
train.isnull().sum()
Titanic - Machine Learning from Disaster
14,089,630
reg6 = RandomForestRegressor() scores6 = cross_val_score(reg6, x_train_data, y_train_data, cv=10) scores6 = scores6.mean() print(scores6) reg6.fit(x_train_data, y_train_data) RF_y = reg6.predict(test_data) saving("RandomForestsRegressor20.csv", RF_y) print(scores6 )<choose_model_class>
train.isnull().sum()
Titanic - Machine Learning from Disaster
14,089,630
reg7 = ExtraTreesRegressor() reg7.fit(x_train_data, y_train_data) ET_y = reg7.predict(test_data) saving("ExtraTreesRegressor.csv", ET_y) <train_model>
train['survived'].value_counts()
Titanic - Machine Learning from Disaster
14,089,630
reg8 = AdaBoostRegressor(base_estimator = RandomForestRegressor(max_depth=20), n_estimators=50) reg8.fit(x_train_data, y_train_data) ADA_y = reg8.predict(test_data) saving("ADA Boost Regression.csv",ADA_y )<find_best_model_class>
train[['pclass','survived']].groupby(['pclass'],as_index=False ).mean().sort_values(by='survived',ascending=False )
Titanic - Machine Learning from Disaster
14,089,630
dist2coast_reg = KNeighborsRegressor(n_neighbors=1) scores_dist = cross_val_score(dist2coast_reg,dist2coast.loc[:,:'latitude'],dist2coast.loc[:,'dist2coast':],cv=10) scores_dist = scores_dist.mean() dist2coast_reg.fit(dist2coast.loc[:,:'latitude'],dist2coast.loc[:,'dist2coast':]) train_pred_dist= dist2coast_reg.predict(train_data.loc[:,:'latitude']) test_pred_dist= dist2coast_reg.predict(test_data.loc[:,:'latitude']) train_data['dist2coast'] = train_pred_dist test_data['dist2coast']= test_pred_dist print(scores_dist) x_train_data = train_data.drop('median_house_value', axis = 'columns') y_train_data = train_data.loc[:,'median_house_value'] train_data<train_model>
train[['sex','survived']].groupby(['sex'],as_index=False ).mean().sort_values(by='survived',ascending=False )
Titanic - Machine Learning from Disaster
14,089,630
reg_F = AdaBoostRegressor(base_estimator = RandomForestRegressor(max_depth=20), n_estimators=50) reg_F.fit(x_train_data, y_train_data) final = reg_F.predict(test_data) saving("Final ADA Boost Regression.csv",final )<import_modules>
train.sibsp.value_counts()
Titanic - Machine Learning from Disaster
14,089,630
import pandas as pd import numpy as np import matplotlib.pyplot as plt import sklearn from sklearn.neighbors import KNeighborsRegressor from sklearn.model_selection import cross_val_score from sklearn.metrics import accuracy_score,classification_report,confusion_matrix,fbeta_score from sklearn import linear_model from sklearn.ensemble import RandomForestRegressor<load_from_csv>
train[["sibsp", "survived"]].groupby(['sibsp'], as_index=False ).mean().sort_values(by='survived', ascending=False )
Titanic - Machine Learning from Disaster
14,089,630
trainDb = pd.read_csv(".. /input/mycalifdb/train.csv") testDb = pd.read_csv(".. /input/mycalifdb/test.csv" )<compute_test_metric>
train.parch.value_counts()
Titanic - Machine Learning from Disaster
14,089,630
def RMSLE(Y, Y_hat): sumError = 0.0 assert len(Y)== len(Y_hat) for i in range(len(Y)) : if Y[i] < 0: Y[i] = 0 sumError +=(np.log(Y[i] + 1)- np.log(Y_hat[i] + 1)) **2 return np.sqrt(sumError/len(Y))<prepare_x_and_y>
train[["parch", "survived"]].groupby(['parch'], as_index=False ).mean().sort_values(by='survived', ascending=False )
Titanic - Machine Learning from Disaster
14,089,630
Xtrain = trainDb.drop("Id", axis = 1 ).drop("median_house_value", axis = 1) Xtest = testDb.drop("Id", axis = 1) Ytrain = trainDb["median_house_value"]<compute_train_metric>
sex = pd.get_dummies(train['sex'],drop_first=True) sex.head()
Titanic - Machine Learning from Disaster
14,089,630
kNNMethod = KNeighborsRegressor(n_neighbors=10) kNNMethod.fit(Xtrain, Ytrain) scores = cross_val_score(kNNMethod, Xtrain, Ytrain, cv=3) Y = kNNMethod.predict(Xtrain) print('Pontuação média kNN: ' + str(scores.mean())) print('RMSLE kNN: ' + str(RMSLE(Y, Ytrain)) )<compute_train_metric>
pclass = pd.get_dummies(train['pclass'],drop_first= True) pclass.head()
Titanic - Machine Learning from Disaster
14,089,630
lassoMethod = linear_model.Lasso(alpha = 0.1) lassoMethod.fit(Xtrain, Ytrain) scores = cross_val_score(lassoMethod, Xtrain, Ytrain, cv=3) Y = lassoMethod.predict(Xtrain) print('Pontuação média Lasso: ' + str(scores.mean())) print('RMSLE Lasso: ' + str(RMSLE(Y, Ytrain)) )<find_best_model_class>
embarked = pd.get_dummies(train['embarked'],drop_first= True) embarked.head()
Titanic - Machine Learning from Disaster
14,089,630
forestMethod = RandomForestRegressor(max_depth=25, random_state=0, n_estimators=100) forestMethod.fit(Xtrain, Ytrain) scores = cross_val_score(forestMethod, Xtrain, Ytrain, cv=3) Y = forestMethod.predict(Xtrain) print('Pontuação média Forest: ' + str(scores.mean())) print('RMSLE Forest: ' + str(RMSLE(Y, Ytrain)) )<save_to_csv>
final_train = pd.concat([train,sex,pclass,embarked],axis = 1) final_train.head()
Titanic - Machine Learning from Disaster
14,089,630
forestMethod = RandomForestRegressor(max_depth=25, random_state=0, n_estimators=100) forestMethod.fit(Xtrain, Ytrain) Ytest = forestMethod.predict(Xtest) predictionTest = pd.DataFrame({"Id":testDb.Id, "median_house_value":Ytest}) predictionTest.to_csv("predictTable.csv", index=False) predictionTest<load_from_csv>
final_train = final_train.drop(['sex','embarked','pclass'],axis = 1 )
Titanic - Machine Learning from Disaster
14,089,630
data = pd.read_csv('/kaggle/input/girlsgoit-competition-2020/train.csv') data.head()<load_from_csv>
final_train.columns = ['survived','age','sibsp','parch','fare','sex_male','pclass_2','pclass_3','embarked_q','embarked_s'] final_train.head()
Titanic - Machine Learning from Disaster
14,089,630
test = pd.read_csv('/kaggle/input/girlsgoit-competition-2020/test.csv') test.head()<load_from_csv>
test.isnull().sum()
Titanic - Machine Learning from Disaster
14,089,630
submit = pd.read_csv('/kaggle/input/girlsgoit-competition-2020/sampleSubmission.csv') submit.info()<groupby>
test.isnull().sum()
Titanic - Machine Learning from Disaster
14,089,630
data[["year", "popularity"]].groupby(["year",'popularity'] ).count().max(level=0 )<groupby>
test.drop(['passengerid','name','ticket'],axis=1,inplace=True) test.drop('cabin',axis=1,inplace=True) age_mean = test['age'].mean() test['age'] = test['age'].fillna(age_mean) test.isnull().sum()
Titanic - Machine Learning from Disaster
14,089,630
max_per_year = data.groupby(['year'])['popularity'].apply(lambda x: x.value_counts().index[0] ).reset_index() max_per_year<data_type_conversions>
test['fare'] = test['fare'].fillna(test['fare'].mean() )
Titanic - Machine Learning from Disaster
14,089,630
toPopularity = {i:j for [i,j] in max_per_year.to_numpy() } toPopularity[2010]<categorify>
test.isnull().sum()
Titanic - Machine Learning from Disaster
14,089,630
def pred_for_year(x): return x['year'].apply(lambda x: toPopularity[x]) pred_for_year(data )<define_variables>
test.isnull().sum()
Titanic - Machine Learning from Disaster
14,089,630
columns = [ 'acousticness', 'danceability', 'duration_ms', 'energy', 'explicit', 'instrumentalness', 'key', 'liveness', 'loudness', 'mode', 'speechiness', 'tempo', 'valence', 'year'] columns = ['year'] X = data[columns] X.head()<prepare_x_and_y>
sex = pd.get_dummies(test['sex'],drop_first=True) pclass = pd.get_dummies(test['pclass'],drop_first= True) embarked = pd.get_dummies(test['embarked'],drop_first= True )
Titanic - Machine Learning from Disaster
14,089,630
Y = data[['popularity']] Y.head()<split>
final_test = pd.concat([test,sex,pclass,embarked],axis = 1) final_test.head()
Titanic - Machine Learning from Disaster
14,089,630
dataX, dataY = X, Y train_ratio = 0.70 validation_ratio = 0.15 test_ratio = 0.15 x_train, x_test, y_train, y_test = train_test_split(dataX, dataY, test_size=1 - train_ratio) x_val, x_test, y_val, y_test = train_test_split(x_test, y_test, test_size=test_ratio/(test_ratio + validation_ratio)) print(x_train.shape, x_val.shape, x_test.shape )<import_modules>
final_test = final_test.drop(['sex','embarked','pclass'],axis = 1) final_test.head()
Titanic - Machine Learning from Disaster
14,089,630
from sklearn.tree import DecisionTreeClassifier <import_modules>
final_test.columns = ['age','sibsp','parch','fare','sex_male','pclass_2','pclass_3','embarked_q','embarked_s'] final_test.head()
Titanic - Machine Learning from Disaster
14,089,630
plt.figure(figsize=(20, 20)) <predict_on_test>
X_train = final_train.drop('survived',axis=1) y_train = final_train['survived']
Titanic - Machine Learning from Disaster
14,089,630
Y_predict = pred_for_year(x_val )<compute_test_metric>
X_test = final_test
Titanic - Machine Learning from Disaster
14,089,630
print("Accuracy score:", accuracy_score(y_val, Y_predict)) print("F1 score:", f1_score(y_val, Y_predict, average='micro')) print("Recall score:", recall_score(y_val, Y_predict, average='micro'))<compute_test_metric>
from sklearn.preprocessing import StandardScaler
Titanic - Machine Learning from Disaster
14,089,630
cm = confusion_matrix(y_val, Y_predict) cm<split>
scaler = StandardScaler()
Titanic - Machine Learning from Disaster
14,089,630
columns = ['year' , 'acousticness', 'danceability', 'energy', 'duration_ms', 'explicit', 'instrumentalness', 'key', 'liveness', 'loudness', 'mode', 'speechiness', 'tempo', 'valence' ] info = [] for c in range(1,4): break for leaf in range(3, 30,3): for deapth in range(3, 30,3): cs = columns[:c] X = data[cs] x_train, x_test, y_train, y_test = train_test_split(X, dataY, test_size=1 - train_ratio) x_val, x_test, y_val, y_test = train_test_split(x_test, y_test, test_size=test_ratio/(test_ratio + validation_ratio)) clf = DecisionTreeClassifier(random_state=1, max_leaf_nodes=leaf, max_depth=deapth) clf.fit(x_train, y_train) Y_predict = clf.predict(x_val) info.append([','.join(cs), leaf, deapth , accuracy_score(y_val, Y_predict), clf]) <sort_values>
scaled_X_train = scaler.fit_transform(X_train) scaled_X_test = scaler.transform(X_test)
Titanic - Machine Learning from Disaster
14,089,630
so = sorted(info, key=lambda x:x[-2])[-5:]<save_to_csv>
y_test = gen_sub['Survived']
Titanic - Machine Learning from Disaster
14,089,630
test_data = pd.read_csv('https://girlsgoitpublic.z6.web.core.windows.net/test.csv') test_data['popularity'] = pred_for_year(test_data[columns]) test_data[['ID', 'popularity']].to_csv('submission.csv',index=False )<import_modules>
from sklearn.linear_model import LogisticRegression
Titanic - Machine Learning from Disaster
14,089,630
warnings.filterwarnings('ignore') %matplotlib inline<load_from_csv>
from sklearn.model_selection import GridSearchCV
Titanic - Machine Learning from Disaster
14,089,630
dataset_treino = pd.read_csv(".. /input/dataset_treino.csv", index_col = "Order") dataset_teste = pd.read_csv(".. /input/dataset_teste.csv", index_col = "OrderId" )<drop_column>
from sklearn.model_selection import GridSearchCV
Titanic - Machine Learning from Disaster
14,089,630
dataset_treino = dataset_treino.drop(["Property Id","Property Name","Parent Property Id", "Parent Property Name", "NYC Borough, Block and Lot(BBL)self-reported", "NYC Building Identification Number(BIN)","Postal Code", "Street Number","Street Name", "Borough", "Address 1(self-reported)", 'Address 2'], axis = 1 )<drop_column>
log_model = LogisticRegression(solver='saga',multi_class="ovr",max_iter=5000 )
Titanic - Machine Learning from Disaster
14,089,630
dataset_treino = dataset_treino.drop(["2nd Largest Property Use Type", "2nd Largest Property Use - Gross Floor Area(ft²)","3rd Largest Property Use Type", "3rd Largest Property Use Type - Gross Floor Area(ft²)", "Fuel Oil "Fuel Oil "Diesel<drop_column>
penalty = ['l1', 'l2'] C = np.logspace(0, 4, 10 )
Titanic - Machine Learning from Disaster
14,089,630
dataset_treino = dataset_treino.drop(["Largest Property Use Type","List of All Property Use Types at Property"], axis = 1 )<data_type_conversions>
grid_model = GridSearchCV(log_model,param_grid={'C':C,'penalty':penalty} )
Titanic - Machine Learning from Disaster
14,089,630
dataset_treino['BBL - 10 digits'] = dataset_treino['BBL - 10 digits'].astype('category') dataset_treino['BBL - 10 digits'] = dataset_treino['BBL - 10 digits'].apply(lambda x: x[0] ).astype('category') dataset_teste['BBL - 10 digits'] = dataset_teste['BBL - 10 digits'].astype('category') dataset_teste['BBL - 10 digits'] = dataset_teste['BBL - 10 digits'].apply(lambda x: x[0] ).astype('category' )<categorify>
grid_model.fit(scaled_X_train,y_train )
Titanic - Machine Learning from Disaster
14,089,630
dataset_treino = dataset_treino.where(( pd.notnull(dataset_treino)) , 0.001) d = {'Not Available': 0.001} df = dataset_treino.replace(d) dataset_teste = dataset_teste.where(( pd.notnull(dataset_teste)) , 0.001) df_teste = dataset_teste.replace(d )<define_variables>
grid_model.best_params_
Titanic - Machine Learning from Disaster
14,089,630
colunas = ["Year Built", "Site EUI(kBtu/ft²)","Weather Normalized Site EUI(kBtu/ft²)", "Weather Normalized Site Electricity Intensity(kWh/ft²)","Weather Normalized Site Natural Gas Intensity(therms/ft²)", "Natural Gas Use(kBtu)","Weather Normalized Site Electricity(kWh)","Total GHG Emissions(Metric Tons CO2e)", "Direct GHG Emissions(Metric Tons CO2e)","Indirect GHG Emissions(Metric Tons CO2e)", "Water Use(All Water Sources )(kgal)","Water Intensity(All Water Sources )(gal/ft²)","Source EUI(kBtu/ft²)", "Weather Normalized Site Natural Gas Use(therms)", "Electricity Use - Grid Purchase(kBtu)", "Natural Gas Use(kBtu)", "Weather Normalized Site Natural Gas Use(therms)","Weather Normalized Source EUI(kBtu/ft²)", "DOF Gross Floor Area","Number of Buildings - Self-reported", "Census Tract", "Largest Property Use Type - Gross Floor Area(ft²)","Property GFA - Self-Reported(ft²)","Occupancy", "Council District","Community Board"] array = df[colunas].values array_teste = df_teste[colunas].values<data_type_conversions>
from sklearn.metrics import accuracy_score,confusion_matrix,classification_report,plot_confusion_matrix
Titanic - Machine Learning from Disaster
14,089,630
array = array.astype(float) array_teste = array_teste.astype(float )<categorify>
y_lr_pred = grid_model.predict(scaled_X_test )
Titanic - Machine Learning from Disaster
14,089,630
imp = SimpleImputer(missing_values = 0.001, strategy = 'median' ).fit(array) dataset_treino[colunas] = imp.transform(array) dataset_teste[colunas] = imp.transform(array_teste )<drop_column>
accuracy_score(y_test,y_lr_pred )
Titanic - Machine Learning from Disaster
14,089,630
dataset_treino = dataset_treino.drop(["DOF Gross Floor Area", "Largest Property Use Type - Gross Floor Area(ft²)", "Weather Normalized Site EUI(kBtu/ft²)","Number of Buildings - Self-reported", "Occupancy","Electricity Use - Grid Purchase(kBtu)", "Weather Normalized Site Electricity(kWh)","Indirect GHG Emissions(Metric Tons CO2e)", 'Property GFA - Self-Reported(ft²)',"Water Use(All Water Sources )(kgal)", "Water Intensity(All Water Sources )(gal/ft²)","Latitude","Longitude", "Community Board", "Council District","Census Tract"], axis = 1 )<create_dataframe>
confusion_matrix(y_test,y_lr_pred )
Titanic - Machine Learning from Disaster
14,089,630
colunas_num = ["ENERGY STAR Score", "Site EUI(kBtu/ft²)", "Year Built", "Weather Normalized Site Electricity Intensity(kWh/ft²)", "Weather Normalized Site Natural Gas Intensity(therms/ft²)", "Natural Gas Use(kBtu)", "Weather Normalized Site Natural Gas Use(therms)", "Total GHG Emissions(Metric Tons CO2e)", "Direct GHG Emissions(Metric Tons CO2e)", "Source EUI(kBtu/ft²)", "Weather Normalized Source EUI(kBtu/ft²)"] vif = pd.DataFrame() vif["VIF Factor"] = [variance_inflation_factor(dataset_treino[colunas_num].values, i)for i in range(dataset_treino[colunas_num].shape[1])] vif["features"] = dataset_treino[colunas_num].columns vif<drop_column>
print(classification_report(y_test,y_lr_pred))
Titanic - Machine Learning from Disaster