kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
14,089,630 | dataset_treino = dataset_treino.drop(["Source EUI(kBtu/ft²)", "Weather Normalized Source EUI(kBtu/ft²)","Natural Gas Use(kBtu)",
"Year Built","Direct GHG Emissions(Metric Tons CO2e)"],
axis = 1 )<create_dataframe> | from sklearn.metrics import roc_curve, auc | Titanic - Machine Learning from Disaster |
14,089,630 | colunas_num = ["ENERGY STAR Score", "Site EUI(kBtu/ft²)",
"Weather Normalized Site Electricity Intensity(kWh/ft²)", "Weather Normalized Site Natural Gas Intensity(therms/ft²)",
"Weather Normalized Site Natural Gas Use(therms)", "Total GHG Emissions(Metric Tons CO2e)"]
vif = pd.DataFrame()
vif["VIF Factor"] = [variance_inflation_factor(dataset_treino[colunas_num].values, i)for i in range(dataset_treino[colunas_num].shape[1])]
vif["features"] = dataset_treino[colunas_num].columns
vif<feature_engineering> | from sklearn.metrics import precision_recall_curve,plot_precision_recall_curve,plot_roc_curve | Titanic - Machine Learning from Disaster |
14,089,630 | coluna_out = "Weather Normalized Site Natural Gas Use(therms)"
mean = np.mean(dataset_treino[coluna_out])
sd = np.std(dataset_treino[coluna_out])
out = mean + 2*sd
dataset_treino = dataset_treino.drop(dataset_treino[dataset_treino[coluna_out] > out].index)
coluna_out = "Weather Normalized Site Natural Gas Intensity(therms/ft²)"
mean = np.mean(dataset_treino[coluna_out])
sd = np.std(dataset_treino[coluna_out])
out = mean + 2*sd
dataset_treino = dataset_treino.drop(dataset_treino[dataset_treino[coluna_out] > out].index)
coluna_out = "Weather Normalized Site Electricity Intensity(kWh/ft²)"
mean = np.mean(dataset_treino[coluna_out])
sd = np.std(dataset_treino[coluna_out])
out = mean + 2*sd
dataset_treino = dataset_treino.drop(dataset_treino[dataset_treino[coluna_out] > out].index)
coluna_out = "Site EUI(kBtu/ft²)"
mean = np.mean(dataset_treino[coluna_out])
sd = np.std(dataset_treino[coluna_out])
out = mean + 2*sd
dataset_treino = dataset_treino.drop(dataset_treino[dataset_treino[coluna_out] > out].index)
coluna_out = "Total GHG Emissions(Metric Tons CO2e)"
mean = np.mean(dataset_treino[coluna_out])
sd = np.std(dataset_treino[coluna_out])
out = mean + 2*sd
dataset_treino = dataset_treino.drop(dataset_treino[dataset_treino[coluna_out] > out].index )<categorify> | from sklearn.neighbors import KNeighborsClassifier | Titanic - Machine Learning from Disaster |
14,089,630 | coluna_cat = ["Primary Property Type - Self Selected", "Metered Areas(Energy)", 'BBL - 10 digits','Water Required?']
dummies = pd.get_dummies(dataset_treino[coluna_cat])
dummies_teste = pd.get_dummies(dataset_teste[coluna_cat])
final_train, final_test = dummies.align(dummies_teste,join='left',axis=1 )<define_variables> | from sklearn.neighbors import KNeighborsClassifier | Titanic - Machine Learning from Disaster |
14,089,630 | coluna_num = ["Site EUI(kBtu/ft²)",
"Weather Normalized Site Electricity Intensity(kWh/ft²)", "Weather Normalized Site Natural Gas Intensity(therms/ft²)",
"Weather Normalized Site Natural Gas Use(therms)", "Total GHG Emissions(Metric Tons CO2e)"]
target = "ENERGY STAR Score"<normalization> | from sklearn.neighbors import KNeighborsClassifier | Titanic - Machine Learning from Disaster |
14,089,630 | X = dataset_treino[coluna_num].values
Y_target = dataset_treino[target].values
X_teste = dataset_teste[coluna_num].values
scaler = MinMaxScaler(feature_range =(-1, 1)).fit(X)
rescaledX = scaler.transform(X)
rescaledX_teste = scaler.transform(X_teste )<merge> | knn_model = KNeighborsClassifier(n_neighbors=1 ) | Titanic - Machine Learning from Disaster |
14,089,630 | data_modelo = dataset_treino[coluna_num].copy()
data_modelo[coluna_num] = rescaledX
data_modelo = data_modelo.join(final_train)
data_modelo_teste = dataset_teste[coluna_num].copy()
data_modelo_teste[coluna_num] = rescaledX_teste
data_modelo_teste = data_modelo_teste.join(final_test )<feature_engineering> | knn_model.fit(scaled_X_train,y_train ) | Titanic - Machine Learning from Disaster |
14,089,630 | data_modelo_teste[np.isnan(data_modelo_teste)] = 0<prepare_x_and_y> | y_knn_pred = knn_model.predict(scaled_X_test ) | Titanic - Machine Learning from Disaster |
14,089,630 | X_treino = data_modelo.values
Y_treino = Y_target<choose_model_class> | print(classification_report(y_test,y_knn_pred)) | Titanic - Machine Learning from Disaster |
14,089,630 | modelos = []
modelos.append(( 'GBM', GradientBoostingRegressor(n_estimators = 270, loss = 'huber', max_depth = 4,
learning_rate = 0.05, subsample = 0.7)))
modelos.append(( 'XG', XGBRegressor(n_estimators = 300, max_depth = 4, learning_rate = 0.05,
gamma = 0.3, subsample= 0.75, min_child_weight = 0.5)))
resultados = []
nomes = []
for nome, modelo in modelos:
kfold = model_selection.KFold(50, True, random_state = 7)
cross_val_result = model_selection.cross_val_score(modelo,
X_treino,
Y_treino,
cv = kfold, scoring = 'neg_mean_absolute_error')
resultados.append(cross_val_result)
nomes.append(nome)
texto = "%s: %f(%f)" %(nome, cross_val_result.mean() , cross_val_result.std())
print(texto )<train_model> | knn = KNeighborsClassifier() | Titanic - Machine Learning from Disaster |
14,089,630 | model_gbm = GradientBoostingRegressor(n_estimators = 270, loss = 'huber', max_depth = 4,
learning_rate = 0.05, subsample = 0.7)
model_gbm.fit(X_treino,Y_treino)
previsoes_final_gbm = model_gbm.predict(data_modelo_teste.values )<feature_engineering> | k_values = list(range(1,20))
params_grid = {'n_neighbors':k_values} | Titanic - Machine Learning from Disaster |
14,089,630 | for i in range(0,len(previsoes_final_gbm)) :
if previsoes_final_gbm[i] > 100:
previsoes_final_gbm[i] = 100<feature_engineering> | knn_grid_model = GridSearchCV(knn,param_grid=params_grid,cv=5,scoring='accuracy' ) | Titanic - Machine Learning from Disaster |
14,089,630 | for i in range(0,len(previsoes_final_gbm)) :
if previsoes_final_gbm[i] < 1:
previsoes_final_gbm[i] = 1<data_type_conversions> | knn_grid_model.fit(scaled_X_train,y_train ) | Titanic - Machine Learning from Disaster |
14,089,630 | previsoes_final = previsoes_final_gbm.round(0 ).astype('int' )<load_from_csv> | knn_grid_model.best_params_ | Titanic - Machine Learning from Disaster |
14,089,630 | dataset_test_submission = pd.read_csv(".. /input/dataset_teste.csv", index_col = 'Property Id')
dataset_test_submission['score'] = previsoes_final
Submission = dataset_test_submission['score']
Submission = pd.DataFrame(Submission);Submission.head(10 )<save_to_csv> | y_knn_grid_pred = knn_grid_model.predict(scaled_X_test ) | Titanic - Machine Learning from Disaster |
14,089,630 | Submission.to_csv('Submission.csv', header = True )<import_modules> | print(classification_report(y_test,y_knn_grid_pred)) | Titanic - Machine Learning from Disaster |
14,089,630 | import pandas as pd
import numpy as np
import seaborn as sns
import missingno as msno
import gc<set_options> | from sklearn.svm import SVC | Titanic - Machine Learning from Disaster |
14,089,630 | %matplotlib inline
set_matplotlib_formats('pdf', 'png')
pd.options.display.float_format = '{:.2f}'.format
rc={'savefig.dpi': 75, 'figure.autolayout': False, 'figure.figsize': [12, 8], 'axes.labelsize': 18,\
'axes.titlesize': 18, 'font.size': 18, 'lines.linewidth': 2.0, 'lines.markersize': 8, 'legend.fontsize': 16,\
'xtick.labelsize': 16, 'ytick.labelsize': 16}
sns.set(style='dark',rc=rc )<set_options> | param_grid = {'C':[0.001,0.01,0.1,0.5,1],'gamma':['scale','auto']}
grid_svc = GridSearchCV(svc,param_grid ) | Titanic - Machine Learning from Disaster |
14,089,630 | default_color = '
colormap = plt.cm.cool<define_variables> | grid_svc.fit(scaled_X_train,y_train ) | Titanic - Machine Learning from Disaster |
14,089,630 | path = '.. /input/'
path_result = '.. /output/'<load_from_csv> | grid_svc.best_params_ | Titanic - Machine Learning from Disaster |
14,089,630 | train = pd.read_csv(path + 'train_data.csv')
test = pd.read_csv(path + 'teste_data.csv')
train = train.rename(columns={"default": "target", "ids":"id"})
test = test.rename(columns={"ids":"id"} )<define_variables> | y_svc_grid_pred = grid_svc.predict(scaled_X_test ) | Titanic - Machine Learning from Disaster |
14,089,630 | test_id = test.id<count_missing_values> | print(classification_report(y_test,y_svc_grid_pred)) | Titanic - Machine Learning from Disaster |
14,089,630 | def missing_values_table(df):
mis_val = df.isnull().sum()
mis_val_percent = 100 * df.isnull().sum() / len(df)
mis_val_table = pd.concat([mis_val, mis_val_percent], axis=1)
mis_val_table_ren_columns = mis_val_table.rename(
columns = {0 : 'Missing Values', 1 : '% of Total Values'})
mis_val_table_ren_columns = mis_val_table_ren_columns[
mis_val_table_ren_columns.iloc[:,1] != 0].sort_values(
'% of Total Values', ascending=False ).round(1)
print("Your selected dataframe has " + str(df.shape[1])+ " columns.
"
"There are " + str(mis_val_table_ren_columns.shape[0])+
" columns that have missing values.")
return mis_val_table_ren_columns<count_missing_values> | from sklearn.tree import DecisionTreeClassifier | Titanic - Machine Learning from Disaster |
14,089,630 | missing_values_table(train )<drop_column> | dt_model.fit(X_train,y_train ) | Titanic - Machine Learning from Disaster |
14,089,630 | missingValueColumns = train.columns[train.isnull().any() ].tolist()
df_null = train[missingValueColumns]<correct_missing_values> | y_dt_pred = dt_model.predict(X_test ) | Titanic - Machine Learning from Disaster |
14,089,630 | train = train.dropna(subset=['target'] )<drop_column> | pd.DataFrame(index=X_train.columns,data=dt_model.feature_importances_,columns=['Feature Importance'])
| Titanic - Machine Learning from Disaster |
14,089,630 | missingValueColumns = train.columns[train.isnull().any() ].tolist()
df_null = train[missingValueColumns]<define_variables> | from sklearn.tree import plot_tree | Titanic - Machine Learning from Disaster |
14,089,630 | def get_meta(train):
data = []
for col in train.columns:
if col == 'target':
role = 'target'
elif col == 'id':
role = 'id'
else:
role = 'input'
if col == 'target' or 'facebook' in col or col == 'gender' or 'bin_' in col:
level = 'binary'
elif train[col].dtype == np.object or col == 'id':
level = 'nominal'
elif train[col].dtype == np.float64:
level = 'interval'
elif train[col].dtype == np.int64:
level = 'ordinal'
keep = True
if col == 'id':
keep = False
dtype = train[col].dtype
col_dict = {
'varname': col,
'role' : role,
'level' : level,
'keep' : keep,
'dtype' : dtype
}
data.append(col_dict)
meta = pd.DataFrame(data, columns=['varname', 'role', 'level', 'keep', 'dtype'])
meta.set_index('varname', inplace=True)
return meta<define_variables> | max_depth = list(range(1,30))
params_grid = {'max_depth':max_depth,'max_leaf_nodes':[2,3,4,5,6,7,8]}
dt_grid_model = GridSearchCV(dt_model,param_grid=params_grid)
| Titanic - Machine Learning from Disaster |
14,089,630 | meta_data = get_meta(train)
meta_data<groupby> | dt_grid_model.fit(X_train,y_train)
y_dt_grid_pred = dt_grid_model.predict(X_test ) | Titanic - Machine Learning from Disaster |
14,089,630 | meta_counts = meta_data.groupby(['role', 'level'] ).agg({'dtype': lambda x: x.count() } ).reset_index()
meta_counts<filter> | dt_grid_model.best_params_ | Titanic - Machine Learning from Disaster |
14,089,630 | col_ordinal = meta_data[(meta_data.level == 'ordinal')&(meta_data.keep)].index
col_nominal = meta_data[(meta_data.level == 'nominal')&(meta_data.keep)&(meta_data.role != 'target')&(meta_data.role != 'id')].index
col_interval = meta_data[(meta_data.level == 'interval')&(meta_data.keep)].index
col_binary = meta_data[(meta_data.level == 'binary')&(meta_data.keep)&(meta_data.role != 'target')].index<feature_engineering> | from sklearn.ensemble import RandomForestClassifier | Titanic - Machine Learning from Disaster |
14,089,630 | def new_missing_columns(df):
for i in missingValueColumns:
if 'target' not in i:
new_col = 'bin_missing_'+ i
df[new_col] = np.where(df[i].isnull() , True, False)
return df<drop_column> | rf_model = RandomForestClassifier() | Titanic - Machine Learning from Disaster |
14,089,630 | train = new_missing_columns(train)
test = new_missing_columns(test )<merge> | rf_model.fit(X_train,y_train ) | Titanic - Machine Learning from Disaster |
14,089,630 | def count_label_encoding(train, test,col):
for i in col:
df1 = train[i].value_counts().reset_index(name='freq_'+ i ).rename(columns={'index': 'lc_'+ i})
train = pd.merge(train,df1,left_on=i, right_on='lc_'+ i, how='left')
test = pd.merge(test,df1,left_on=i, right_on='lc_'+ i, how='left')
for i in list(train):
if 'lc_' in i:
train = train.drop(i, axis = 1)
test = test.drop(i, axis = 1)
return train, test<categorify> | y_rf_pred = rf_model.predict(X_test ) | Titanic - Machine Learning from Disaster |
14,089,630 | train, test = count_label_encoding(train, test,col_nominal)
train, test = count_label_encoding(train, test,col_binary )<filter> | pd.DataFrame(index=X_train.columns,data=rf_model.feature_importances_,columns=['Feature Importance'])
| Titanic - Machine Learning from Disaster |
14,089,630 | meta_data = get_meta(train)
col_ordinal = meta_data[(meta_data.level == 'ordinal')&(meta_data.keep)&(meta_data.role != 'target')].index
col_nominal = meta_data[(meta_data.level == 'nominal')&(meta_data.keep)&(meta_data.role != 'target')].index
col_interval = meta_data[(meta_data.level == 'interval')&(meta_data.keep)&(meta_data.role != 'target')].index
col_binary = meta_data[(meta_data.level == 'binary')&(meta_data.keep)&(meta_data.role != 'target')].index<groupby> | no_of_trees = list(range(10,50,500))
max_depth = list(range(1,30))
param_grid_rf = {'n_estimators':no_of_trees,'criterion':['gini','entropy'],
'max_depth':max_depth,'max_leaf_nodes':[2,3,4,5,6,7,8]} | Titanic - Machine Learning from Disaster |
14,089,630 | meta_counts = meta_data.groupby(['role', 'level'] ).agg({'dtype': lambda x: x.count() } ).reset_index()
meta_counts<import_modules> | rf_grid_model = GridSearchCV(rf_model,param_grid=param_grid_rf ) | Titanic - Machine Learning from Disaster |
14,089,630 | from sklearn.model_selection import train_test_split<import_modules> | rf_grid_model.fit(X_train,y_train)
y_rf_grid_pred = rf_grid_model.predict(X_test ) | Titanic - Machine Learning from Disaster |
14,089,630 | from sklearn.model_selection import train_test_split<split> | rf_grid_model.best_params_ | Titanic - Machine Learning from Disaster |
14,089,630 | X = pd.concat([train[col_interval],train[col_ordinal],pd.get_dummies(train[col_binary])], axis=1)
y = pd.DataFrame(train.target)
X.fillna(-1, inplace=True)
y.fillna(-1, inplace=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42 )<train_model> | print('Logistic_Regression_best_accuracy: ',accuracy_score(y_test,y_lr_pred))
print('
')
print('KNN_best_accuracy: ',accuracy_score(y_test,y_knn_grid_pred))
print('
')
print('SVM_best_accuracy: ',accuracy_score(y_test,y_svc_grid_pred))
print('
')
print('Decision_tree_best_accuracy: ',accuracy_score(y_test,y_dt_grid_pred))
print('
')
print('Random_forest_best_accuracy: ',accuracy_score(y_test,y_rf_grid_pred)) | Titanic - Machine Learning from Disaster |
14,089,630 | rf = RandomForestClassifier(n_estimators=150, max_depth=8, min_samples_leaf=30, max_features=0.2, n_jobs=-1, random_state=0)
rf.fit(X_train, y_train['target'])
features = X_train.columns.values
print("----- Training Done -----" )<import_modules> | test_pred = dt_grid_model.predict(X_test ) | Titanic - Machine Learning from Disaster |
14,089,630 | from sklearn.metrics import accuracy_score, roc_auc_score<compute_train_metric> | test2 = pd.read_csv('/kaggle/input/titanic/test.csv')
test2.head() | Titanic - Machine Learning from Disaster |
14,089,630 | acc = accuracy_score(y_train, rf.predict(X_train))
auc = roc_auc_score(y_train, rf.predict(X_train))
print("Accuracy: %.4f" % acc)
print("AUC: %.4f" % auc )<compute_test_metric> | passengerid = test2['PassengerId'] | Titanic - Machine Learning from Disaster |
14,089,630 | acc = accuracy_score(y_test, rf.predict(X_test))
auc = roc_auc_score(y_test, rf.predict(X_test))
print("Accuracy: %.4f" % acc)
print("AUC: %.4f" % auc )<create_dataframe> | submission = pd.DataFrame({"PassengerId": passengerid,"Survived": test_pred})
submission.PassengerId = submission.PassengerId.astype(int)
submission.Survived = submission.Survived.astype(int)
submission.to_csv("titanic1_submission.csv", index=False ) | Titanic - Machine Learning from Disaster |
14,088,670 | def get_feature_importance_df(feature_importances,
column_names,
top_n=25):
imp_dict = dict(zip(column_names,
feature_importances))
top_features = sorted(imp_dict,
key=imp_dict.get,
reverse=True)[0:top_n]
top_importances = [imp_dict[feature] for feature
in top_features]
df = pd.DataFrame(data={'feature': top_features,
'importance': top_importances})
return df<features_selection> | url="https://github.com/thisisjasonjafari/my-datascientise-handcode/raw/master/005-datavisualization/titanic.csv"
s=requests.get(url ).content
c=pd.read_csv(io.StringIO(s.decode('utf-8')))
test_data_with_labels = c
test_data = pd.read_csv('.. /input/titanic/test.csv' ) | Titanic - Machine Learning from Disaster |
14,088,670 | feature_importance = get_feature_importance_df(rf.feature_importances_, features)
feature_importance<import_modules> | warnings.filterwarnings('ignore' ) | Titanic - Machine Learning from Disaster |
14,088,670 | from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
from catboost import CatBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier<train_model> | for i, name in enumerate(test_data_with_labels['name']):
if '"' in name:
test_data_with_labels['name'][i] = re.sub('"', '', name)
for i, name in enumerate(test_data['Name']):
if '"' in name:
test_data['Name'][i] = re.sub('"', '', name ) | Titanic - Machine Learning from Disaster |
14,088,670 | def cross_val_model(X,y, model, n_splits=3):
X = np.array(X)
y = np.array(y)
folds = list(StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=2017 ).split(X, y))
for j,(train_idx, test_idx)in enumerate(folds):
X_train = X[train_idx]
y_train = y[train_idx]
X_holdout = X[test_idx]
y_holdout = y[test_idx]
print("Fit %s fold %d" %(str(model ).split('(')[0], j+1))
m = model.fit(X_train, y_train)
cross_score = cross_val_score(model, X_holdout, y_holdout, cv=3, scoring='roc_auc')
print(" cross_score: %.5f" % cross_score.mean())
return m<init_hyperparams> | survived = []
for name in test_data['Name']:
survived.append(int(test_data_with_labels.loc[test_data_with_labels['name'] == name]['survived'].values[-1])) | Titanic - Machine Learning from Disaster |
14,088,670 | rf_params = {}
rf_params['n_estimators'] = 200
rf_params['max_depth'] = 6
rf_params['min_samples_split'] = 70
rf_params['min_samples_leaf'] = 30<choose_model_class> | submission = pd.read_csv('.. /input/titanic/gender_submission.csv')
submission['Survived'] = survived
submission.to_csv('submission.csv', index=False ) | Titanic - Machine Learning from Disaster |
14,160,344 | rf_model = RandomForestClassifier(**rf_params, random_state=29,n_jobs = -1 )<compute_test_metric> | %matplotlib inline
train= pd.read_csv('/kaggle/input/titanic/train.csv')
def impute_age(cols):
Age = cols[0]
Pclass = cols[1]
if pd.isnull(Age):
if Pclass==1:
return 37
elif Pclass == 2:
return 29
else :
return 24
else:
return Age
| Titanic - Machine Learning from Disaster |
14,160,344 | cross_val_model(X_train, y_train['target'], rf_model )<init_hyperparams> | train['Age']=train[['Age','Pclass']].apply(impute_age,axis = 1 ) | Titanic - Machine Learning from Disaster |
14,160,344 | xgb_params = {}
xgb_params['learning_rate'] = 0.02
xgb_params['n_estimators'] = 1000
xgb_params['max_depth'] = 4
xgb_params['subsample'] = 0.9
xgb_params['colsample_bytree'] = 0.9<choose_model_class> | train.drop('Cabin',inplace = True,axis =1 ) | Titanic - Machine Learning from Disaster |
14,160,344 | XGB_model = XGBClassifier(**rf_params, random_state=29,n_jobs=-1 )<compute_test_metric> | sex = pd.get_dummies(train['Sex'],drop_first=True)
embark = pd.get_dummies(train['Embarked'],drop_first=True ) | Titanic - Machine Learning from Disaster |
14,160,344 | cross_val_model(X_train, y_train['target'], XGB_model )<categorify> | train = pd.concat([train,sex,embark],axis=1)
| Titanic - Machine Learning from Disaster |
14,160,344 | train_ext = train.copy()
test_ext = test.copy()
missing_values_table(train_ext )<feature_engineering> | train.drop(['Sex','Embarked','PassengerId','Name','Ticket'],axis=1,inplace=True ) | Titanic - Machine Learning from Disaster |
14,160,344 | def create_extra_features(train_ext):
train_ext['null_sum'] = train_ext[train_ext==-1].count(axis=1)
train_ext['ord_sum'] = train_ext[col_ordinal].sum(axis=1)
train_ext['interval_median'] = train_ext[col_interval].sum(axis=1)
train_ext['new_amount_borrowed_by_income'] = train_ext['amount_borrowed']/train_ext['income']
train_ext['new_amount_borrowed_by_months'] = train_ext['amount_borrowed']/train_ext['borrowed_in_months']
return train_ext<define_variables> | st = StandardScaler()
feature_scale = ['Age','Fare']
train[feature_scale] = st.fit_transform(train[feature_scale] ) | Titanic - Machine Learning from Disaster |
14,160,344 | train_ext = create_extra_features(train_ext)
test_ext = create_extra_features(test_ext)
train_backup = train_ext.copy()
test_backup = test_ext.copy()<filter> | feature_scale = ['Age','Fare']
train[feature_scale] = st.fit_transform(train[feature_scale])
| Titanic - Machine Learning from Disaster |
14,160,344 | meta_data = get_meta(train_ext)
col_ordinal = meta_data[(meta_data.level == 'ordinal')&(meta_data.keep)&(meta_data.role != 'target')].index
col_nominal = meta_data[(meta_data.level == 'nominal')&(meta_data.keep)&(meta_data.role != 'target')].index
col_interval = meta_data[(meta_data.level == 'interval')&(meta_data.keep)&(meta_data.role != 'target')].index
col_binary = meta_data[(meta_data.level == 'binary')&(meta_data.keep)&(meta_data.role != 'target')].index
<groupby> | x = train.drop(['Survived'],axis=1)
y = train['Survived'] | Titanic - Machine Learning from Disaster |
14,160,344 | meta_counts = meta_data.groupby(['role', 'level'] ).agg({'dtype': lambda x: x.count() } ).reset_index()
meta_counts<define_variables> | from sklearn.model_selection import GridSearchCV
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier | Titanic - Machine Learning from Disaster |
14,160,344 | ids_targets = meta_data[meta_data['role'] != 'input'].index<categorify> | tree = DecisionTreeClassifier()
tree.fit(x,y)
tree.score(x,y ) | Titanic - Machine Learning from Disaster |
14,160,344 | train_ext.fillna(-1, inplace = True)
X_ext = pd.concat([train_ext[col_interval],train_ext[col_ordinal], pd.get_dummies(train_ext[col_binary])], axis=1)
X_ext.head()<drop_column> | test = pd.read_csv('/kaggle/input/titanic/test.csv' ) | Titanic - Machine Learning from Disaster |
14,160,344 | X_ext = X_ext.drop(columns = ['facebook_profile_False','gender_f'], axis=1 )<prepare_x_and_y> | test2 = test.copy()
| Titanic - Machine Learning from Disaster |
14,160,344 | test_ext = pd.concat([test_ext[col_interval],test_ext[col_ordinal], pd.get_dummies(test_ext[col_binary])], axis=1)
test_ext.fillna(-1, inplace = True)
y_ext = pd.DataFrame(train_ext.target)
y_ext=y_ext.astype('bool')
y_ext = y_ext.values
y_ext = y_ext.reshape(-1 )<drop_column> | test['Age']=test[['Age','Pclass']].apply(impute_age,axis = 1 ) | Titanic - Machine Learning from Disaster |
14,160,344 | test_ext['gender_-1'] = 0
test_ext['facebook_profile_-1'] = 0
test_ext=test_ext.drop(columns = ['facebook_profile_False', 'gender_f'], axis = 1)
test_ext.head()<drop_column> | test.drop('Cabin',inplace = True,axis =1 ) | Titanic - Machine Learning from Disaster |
14,160,344 | cols = list(X_ext)
test_ext = test_ext[cols]<split> | sex = pd.get_dummies(test['Sex'],drop_first=True)
embark = pd.get_dummies(test['Embarked'],drop_first=True ) | Titanic - Machine Learning from Disaster |
14,160,344 | X_train, X_test, y_train, y_test = train_test_split(X_ext, y_ext, test_size=0.2, random_state=42 )<compute_test_metric> | test = pd.concat([test,sex,embark],axis=1 ) | Titanic - Machine Learning from Disaster |
14,160,344 | cross_val_model(X_ext, y_ext, rf_model )<compute_test_metric> | test.drop(['Sex','Embarked','PassengerId','Name','Ticket'],axis=1,inplace=True ) | Titanic - Machine Learning from Disaster |
14,160,344 | cross_val_model(X_ext, y_ext, XGB_model )<set_options> | test['Fare'].fillna(test['Fare'].mean() ,inplace=True ) | Titanic - Machine Learning from Disaster |
14,160,344 | gc.collect<import_modules> | feature_scale = ['Age','Fare']
test[feature_scale] = st.fit_transform(test[feature_scale] ) | Titanic - Machine Learning from Disaster |
14,160,344 | from sklearn.model_selection import GridSearchCV<import_modules> | from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
| Titanic - Machine Learning from Disaster |
14,160,344 | from sklearn.model_selection import GridSearchCV<train_on_grid> | level1 = LogisticRegression()
model = StackingClassifier(estimators=level0,final_estimator=level1,cv=5 ) | Titanic - Machine Learning from Disaster |
14,160,344 | tuned_parameters = [{'max_depth': [4,5,6,7,8,9,10],
'max_features': [4,5,6,7,8,9,10],
'n_estimators':[10,25,50,75]}]
clf = GridSearchCV(RandomForestClassifier(random_state=29), tuned_parameters, cv=3, scoring='roc_auc')
clf.fit(X_train, y_train )<find_best_params> | model.fit(x,y ) | Titanic - Machine Learning from Disaster |
14,160,344 | print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f(+/-%0.03f)for %r"
%(mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
acc = accuracy_score(y_test, clf.predict(X_test))
auc = roc_auc_score(y_test, clf.predict(X_test))
print("Accuracy: %.4f" % acc)
print()
print("AUC: %.4f" % auc)
print()<import_modules> | model.score(x,y ) | Titanic - Machine Learning from Disaster |
14,160,344 | from hyperopt.pyll.base import scope
from hyperopt.pyll.stochastic import sample
from hyperopt import STATUS_OK, Trials, fmin, hp, tpe<define_variables> | y_predicted = model.predict(test ) | Titanic - Machine Learning from Disaster |
14,160,344 | N_HYPEROPT_PROBES = 10
EARLY_STOPPING = 80
HOLDOUT_SEED = 123456
HOLDOUT_SIZE = 0.10
HYPEROPT_ALGO = tpe.suggest
DATASET = 'clean'
SEED0 = random.randint(1,1000000000)
NB_CV_FOLDS = 5<define_variables> | submission = pd.DataFrame({
"PassengerId":test2['PassengerId'],
"Survived":y_predicted
} ) | Titanic - Machine Learning from Disaster |
14,160,344 | obj_call_count = 0
cur_best_score = 0<categorify> | submission.to_csv('first_kaggale_titanic_submission.csv',index=False ) | Titanic - Machine Learning from Disaster |
7,000,072 | space_RF ={
'n_estimators' : hp.choice('n_estimators', np.arange(10, 200, dtype=int)) ,
'max_depth' : hp.choice("max_depth", np.arange(3, 15, dtype=int)) ,
'min_samples_split' : hp.choice("min_samples_split", np.arange(20, 100, dtype=int)) ,
'min_samples_leaf' : hp.choice("min_samples_leaf", np.arange(10, 100, dtype=int)) ,
'criterion' : hp.choice('criterion', ["gini", "entropy"]),
'class_weight' : hp.choice('class_weight', ['balanced_subsample', None]),
'n_jobs' : -1,
'oob_score' : True,
'random_state' : hp.randint('random_state',2000000)
}
<compute_train_metric> | def read_and_concat_dataset(training_path, test_path):
train = pd.read_csv(training_path)
train['train'] = 1
test = pd.read_csv(test_path)
test['train'] = 0
data = train.append(test, ignore_index=True)
return train, test, data
train, test, data = read_and_concat_dataset('/kaggle/input/titanic/train.csv', '/kaggle/input/titanic/test.csv')
data = data.set_index('PassengerId')
data.info() | Titanic - Machine Learning from Disaster |
7,000,072 | def objective_RF(space):
global obj_call_count, cur_best_score, X_train, y_train, test, X_test, y_test, test_id
obj_call_count += 1
print('
LightGBM objective call
sorted_params = sorted(space.items() , key=lambda z: z[0])
print('Params:', str.join(' ', ['{}={}'.format(k, v)for k, v in sorted_params if not k.startswith('column:')]))
params = sample(space)
mdl = RandomForestClassifier(**params)
cv_score = cross_val_score(mdl, X_train, y_train ).mean()
print('CV finished ;
cv_score={:7.5f}'.format(cv_score))
_model = mdl.fit(X_train, y_train)
predictions = _model.predict_proba(X_test)[:,1]
score = roc_auc_score(y_test, predictions)
print('valid score={}'.format(score))
do_submit = score > 0.64
if score > cur_best_score:
cur_best_score = score
print('NEW BEST SCORE={}'.format(cur_best_score))
do_submit = True
if do_submit:
submit_guid = uuid.uuid4()
print('Compute submissions guid={}'.format(submit_guid))
y_submission = _model.predict_proba(test_ext)[:,1]
submission_filename = 'rf_score_{:13.11f}_submission_guid_{}.csv'.format(score,submit_guid)
pd.DataFrame(
{'ids':test_id, 'prob':y_submission}
).to_csv(submission_filename, index=False)
loss = 1 - score
return {'loss': loss, 'status': STATUS_OK}<choose_model_class> | def counting_values(data, variable1, variable2):
return data[[variable1, variable2]][data[variable2].isnull() ==False].groupby([variable1], as_index=False ).mean().sort_values(by=variable2, ascending=False ) | Titanic - Machine Learning from Disaster |
7,000,072 | trials = Trials()
best = fmin(fn=objective_RF,
space=space_RF,
algo=HYPEROPT_ALGO,
max_evals=N_HYPEROPT_PROBES,
trials=trials,
verbose=1)
print('-'*50)
print('The best params for RF:')
print(best)
print('
' )<define_search_space> | counting_values(data, 'Sex','Survived' ) | Titanic - Machine Learning from Disaster |
7,000,072 | space_XGB ={
'max_depth' : hp.choice("max_depth", np.arange(5, 15,dtype=int)) ,
'learning_rate' : hp.loguniform('learning_rate', -4.9, -3.0),
'n_estimators' : hp.choice('n_estimators', np.arange(10, 100,dtype=int)) ,
'objective' : 'binary:logistic',
'booster' : 'gbtree',
'reg_alpha' : hp.uniform('reg_alpha', 1e-5, 1e-1),
'reg_lambda' : hp.uniform('reg_lambda', 1e-5, 1e-1),
'colsample_bytree' : hp.uniform('colsample_bytree', 0.5, 0.8),
'min_child_weight ': hp.uniform('min_child_weight', 0.5, 0.8),
'random_state' : hp.randint('random_state',2000000)
}<compute_train_metric> | data['Women'] = np.where(data.Sex=='female',1,0)
comparing(data, 'Women','Survived' ) | Titanic - Machine Learning from Disaster |
7,000,072 | def objective_XGB(space):
global obj_call_count, cur_best_score, X_train, y_train, test, X_test, y_test, test_id
obj_call_count += 1
print('
LightGBM objective call
sorted_params = sorted(space.items() , key=lambda z: z[0])
print('Params:', str.join(' ', ['{}={}'.format(k, v)for k, v in sorted_params if not k.startswith('column:')]))
params = sample(space)
mdl = XGBClassifier(**params)
cv_score = cross_val_score(mdl, X_train, y_train ).mean()
print('CV finished ;
cv_score={:7.5f}'.format(cv_score))
_model = mdl.fit(X_train, y_train, eval_set=[(X_train, y_train),(X_test, y_test)], eval_metric='auc',verbose=True,early_stopping_rounds =30)
params.update({'n_estimators': _model.best_iteration})
predictions = _model.predict_proba(X_test)[:,1]
score = roc_auc_score(y_test, predictions)
print('valid score={}'.format(score))
do_submit = score > 0.64
if score > cur_best_score:
cur_best_score = score
print('NEW BEST SCORE={}'.format(cur_best_score))
do_submit = True
if do_submit:
submit_guid = uuid.uuid4()
print('Compute submissions guid={}'.format(submit_guid))
y_submission = _model.predict_proba(test_ext)[:,1]
submission_filename = 'xgb_score_{:13.11f}_submission_guid_{}.csv'.format(score,submit_guid)
pd.DataFrame(
{'ids':test_id, 'prob':y_submission}
).to_csv(submission_filename, index=False)
loss = 1 - score
return {'loss': loss, 'status': STATUS_OK}<choose_model_class> | data.isnull().sum() | Titanic - Machine Learning from Disaster |
7,000,072 | trials = Trials()
best = fmin(fn=objective_XGB,
space=space_XGB,
algo=HYPEROPT_ALGO,
max_evals=N_HYPEROPT_PROBES,
trials=trials,
verbose=1)
print('-'*50)
print('The best params for XGB:')
print(best)
print('
' )<create_dataframe> | data.isnull().sum() | Titanic - Machine Learning from Disaster |
7,000,072 | train_stack = train_backup.copy()
test_stack = test_backup.copy()<filter> | data.groupby('Pclass' ).Fare.mean()
data.Fare = data.Fare.fillna(0 ) | Titanic - Machine Learning from Disaster |
7,000,072 | meta_data = get_meta(train_stack)
col_ordinal = meta_data[(meta_data.level == 'ordinal')&(meta_data.keep)&(meta_data.role != 'target')].index
col_nominal = meta_data[(meta_data.level == 'nominal')&(meta_data.keep)&(meta_data.role != 'target')].index
col_interval = meta_data[(meta_data.level == 'interval')&(meta_data.keep)&(meta_data.role != 'target')].index
col_binary = meta_data[(meta_data.level == 'binary')&(meta_data.keep)&(meta_data.role != 'target')].index
<correct_missing_values> | print(data.Embarked.value_counts())
data.Embarked = data.Embarked.fillna('S' ) | Titanic - Machine Learning from Disaster |
7,000,072 | train_stack = train_stack.replace(-1, np.NaN)
d_median = train_stack.median(axis=0)
d_mean = train_stack.mean(axis=0)
train_stack = train_stack.fillna(-1 )<import_modules> | data.Cabin = data.Cabin.fillna('Unknown_Cabin')
data['Cabin'] = data['Cabin'].str[0] | Titanic - Machine Learning from Disaster |
7,000,072 | from sklearn import preprocessing<categorify> | print(data.Cabin.value_counts() ) | Titanic - Machine Learning from Disaster |
7,000,072 | def transform(df, ohe, d_median, d_mean):
dcol = [c for c in d_median.index if c in d_mean.index and c !='target']
for c in dcol:
if 'bin_' not in c:
df[c+'_median_range'] =(df[c].values > d_median[c])
df[c+'_mean_range'] =(df[c].values > d_mean[c])
for c in one_hot:
if len(one_hot[c])>2 and len(one_hot[c])< 7:
for val in one_hot[c]:
df[c+'_oh_' + str(val)] =(df[c].values == val)
return df<categorify> | data.groupby('Pclass' ).Cabin.value_counts() | Titanic - Machine Learning from Disaster |
7,000,072 | train_stack = transform(train_stack, one_hot,d_median, d_mean )<categorify> | data['Cabin'] = np.where(( data.Pclass==1)&(data.Cabin=='U'),'C',
np.where(( data.Pclass==2)&(data.Cabin=='U'),'D',
np.where(( data.Pclass==3)&(data.Cabin=='U'),'G',
np.where(data.Cabin=='T','C',data.Cabin)))) | Titanic - Machine Learning from Disaster |
7,000,072 | test_stack = transform(test_stack, one_hot, d_median, d_mean )<feature_engineering> | data['Title'] = data.Name.str.extract('([A-Za-z]+)\.', expand=False)
pd.crosstab(data['Title'], data['Sex'])
data = data.drop('Name',axis=1 ) | Titanic - Machine Learning from Disaster |
7,000,072 | train_stack = create_extra_features(train_stack)
test_stack = create_extra_features(test_stack)
train_stack['bin_sum'] = train_stack[col_binary].sum(axis=1)
test_stack['bin_sum'] = test_stack[col_binary].sum(axis=1 )<define_variables> | data['Title'] = np.where(( data.Title=='Capt')|(data.Title=='Countess')|(data.Title=='Don')|(data.Title=='Dona')
|(data.Title=='Jonkheer')|(data.Title=='Lady')|(data.Title=='Sir')|(data.Title=='Major')|(data.Title=='Rev')|(data.Title=='Col'),'Other',data.Title)
data['Title'] = data['Title'].replace('Ms','Miss')
data['Title'] = data['Title'].replace('Mlle','Miss')
data['Title'] = data['Title'].replace('Mme','Mrs' ) | Titanic - Machine Learning from Disaster |
7,000,072 | col = [c for c in train_stack.columns if c not in ['id','target']]
col = [c for c in col if not c.startswith('ps_calc_')]<count_duplicates> | data.groupby('Title' ).Age.mean() | Titanic - Machine Learning from Disaster |
7,000,072 | dups = train_stack[train_stack.duplicated(subset=col, keep=False)]<filter> | data['Age'] = np.where(( data.Age.isnull())&(data.Title=='Master'),5,
np.where(( data.Age.isnull())&(data.Title=='Miss'),22,
np.where(( data.Age.isnull())&(data.Title=='Mr'),32,
np.where(( data.Age.isnull())&(data.Title=='Mrs'),37,
np.where(( data.Age.isnull())&(data.Title=='Other'),45,
np.where(( data.Age.isnull())&(data.Title=='Dr'),44,data.Age)))))) | Titanic - Machine Learning from Disaster |
7,000,072 | train_stack = train_stack[~(train_stack.index.isin(dups.index)) ]<define_variables> | data['FamilySize'] = data.SibSp + data.Parch + 1
data['Mother'] = np.where(( data.Title=='Mrs')&(data.Parch >0),1,0)
data['Free'] = np.where(data['Fare']==0, 1,0)
data = data.drop(['SibSp','Parch','Sex'],axis=1 ) | Titanic - Machine Learning from Disaster |
7,000,072 | target_stack = train_stack['target']<drop_column> | TypeOfTicket = []
for i in range(len(data.Ticket)) :
ticket = data.Ticket.iloc[i]
for c in string.punctuation:
ticket = ticket.replace(c,"")
splited_ticket = ticket.split(" ")
if len(splited_ticket)== 1:
TypeOfTicket.append('NO')
else:
TypeOfTicket.append(splited_ticket[0])
data['TypeOfTicket'] = TypeOfTicket
data.TypeOfTicket.value_counts()
data['TypeOfTicket'] = np.where(( data.TypeOfTicket!='NO')&(data.TypeOfTicket!='PC')&(data.TypeOfTicket!='CA')&
(data.TypeOfTicket!='A5')&(data.TypeOfTicket!='SOTONOQ'),'other',data.TypeOfTicket)
data = data.drop('Ticket',axis=1 ) | Titanic - Machine Learning from Disaster |
7,000,072 | train_stack = train_stack[col]<drop_column> | counting_values(data, 'Title','Survived' ) | Titanic - Machine Learning from Disaster |
7,000,072 | test_stack = test_stack[col]<filter> | TypeOfTicket vs Survived | Titanic - Machine Learning from Disaster |
7,000,072 | meta_data = get_meta(train_stack)
col_ordinal = meta_data[(meta_data.level == 'ordinal')&(meta_data.keep)&(meta_data.role != 'target')].index
col_nominal = meta_data[(meta_data.level == 'nominal')&(meta_data.keep)&(meta_data.role != 'target')].index
col_interval = meta_data[(meta_data.level == 'interval')&(meta_data.keep)&(meta_data.role != 'target')].index
col_binary = meta_data[(meta_data.level == 'binary')&(meta_data.keep)&(meta_data.role != 'target')].index
<prepare_x_and_y> | counting_values(data, 'TypeOfTicket','Survived' ) | Titanic - Machine Learning from Disaster |
7,000,072 | X_stack = pd.concat([train_stack[col_interval],train_stack[col_ordinal], pd.get_dummies(train_stack[col_binary])], axis=1)
test_stack_val = pd.concat([test_stack[col_interval],test_stack[col_ordinal], pd.get_dummies(test_stack[col_binary])], axis=1)
y_stack = target_stack<drop_column> | counting_values(data, 'Cabin','Survived' ) | Titanic - Machine Learning from Disaster |
7,000,072 | X_stack = X_stack.drop(columns=['gender_-1','facebook_profile_-1'], axis = 1)
<compute_test_metric> | data = pd.get_dummies(data)
data.head(10 ) | Titanic - Machine Learning from Disaster |
7,000,072 | cross_val_model(X_stack, y_stack, rf_model )<compute_test_metric> | trainX, testX, trainY, testY = train_test_split(data[data.Survived.isnull() ==False].drop('Survived',axis=1),data.Survived[data.Survived.isnull() ==False],test_size=0.30, random_state=2019 ) | Titanic - Machine Learning from Disaster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.