kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
13,290,484 | def build_emb_matrix(word_index, embeddings_index, dim):
embedding_matrix = np.zeros(( len(word_index)+ 1, dim + 1), dtype=np.float32)
embedding_matrix[0, dim] = 1
for word, i in tqdm(word_index.items() , disable=not(VERBOSE)) :
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i, :dim] = embedding_vector
continue
return embedding_matrix<compute_train_metric> | final_tree = DecisionTreeClassifier(max_depth=4,min_impurity_decrease=.004)
final_tree.fit(imputed_X,y ) | Titanic - Machine Learning from Disaster |
13,290,484 | %%time
embedding_matrix = np.concatenate([build_emb_matrix(word_index, embed_fasttext, FASTTEXT_DIM),
build_emb_matrix(word_index, embed_word2vec, WORD2VEC_DIM)], -1)
del embed_fasttext, embed_word2vec
_ = gc.collect()
print("Embeddings memory usage", sys.getsizeof(embedding_matrix)/(1024*1024), "MB" )<load_from_csv> | predictions=final_tree.predict(imputed_test)
output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions})
output.to_csv('my_submission.csv', index=False)
print("Your submission was successfully saved!" ) | Titanic - Machine Learning from Disaster |
13,218,661 | class SimpleReader() :
def __init__(self, file, y_train, batch_size, maxlen, index_filter=lambda x : True):
self.fn = file
self.file = open(file, 'r')
self.k = 0
self.y_train = y_train
self.batch_size = batch_size
self.maxlen = maxlen
self.index_filter = index_filter
def flow(self):
X = []
y = []
self.k = 0
while True:
line = self.file.readline().replace('
', '')
if line is not None and len(line)> 0:
if self.index_filter(self.k):
X.append(list(map(int, line.split(' '))))
y.append(self.y_train[self.k])
self.k += 1
else:
self.file.close()
self.file = open(self.fn, 'r')
self.k = 0
if len(X)== self.batch_size:
X = pad_sequences(X, maxlen=self.maxlen, truncating='post', padding='post')
y = np.array(y)
yield(( X, y))
X = []
y = []<choose_model_class> | path = '/kaggle/input/titanic/train.csv'
train_data = pd.read_csv(path)
train_data | Titanic - Machine Learning from Disaster |
13,218,661 | def create_model(embedding_matrix):
input_tensor = Input(shape=(None,))
output_tensor = input_tensor
output_tensor = Embedding(embedding_matrix.shape[0], embedding_matrix.shape[1], weights=[embedding_matrix], trainable=False )(output_tensor)
trainable_embed = Embedding(embedding_matrix.shape[0], TRAINABLE_EMBED_SIZE, trainable=True )(input_tensor)
output_tensor = Concatenate(-1 )([output_tensor, trainable_embed])
output_tensor = SpatialDropout1D(0.3 )(output_tensor)
output_tensor = Bidirectional(CuDNNLSTM(384, return_sequences=True))(output_tensor)
output_tensor = Add()([Bidirectional(CuDNNGRU(384, return_sequences=True))(output_tensor), output_tensor])
output_tensor = Add()([Bidirectional(CuDNNGRU(384, return_sequences=True))(output_tensor), output_tensor])
output_tensor = Concatenate()([GlobalMaxPooling1D()(output_tensor), GlobalAveragePooling1D()(output_tensor)])
output_tensor = Add()([Dense(384*4, activation='elu' )(output_tensor), output_tensor])
output_tensor = Dense(CATEGORY_CNT, activation='softmax' )(output_tensor)
model = Model(inputs=input_tensor, outputs=output_tensor)
return model<train_model> | path = '/kaggle/input/titanic/test.csv'
test_data = pd.read_csv(path)
test_data | Titanic - Machine Learning from Disaster |
13,218,661 | %%time
train_generator = SimpleReader(".. /input/bonus-x-txt-fasttext-unk/X_train_fasttext_unk.txt", y_train, BATCH_SIZE, MAXLEN, index_filter=lambda x : x % 30 != 7)
val_generator = SimpleReader(".. /input/bonus-x-txt-fasttext-unk/X_train_fasttext_unk.txt", y_train, BATCH_SIZE, MAXLEN, index_filter=lambda x : x % 30 == 7)
lr_scheldure = LearningRateScheduler(lambda epoch, lr: EXP_DECAY_COEF ** epoch * 1e-3, verbose=VERBOSE)
model = create_model(embedding_matrix)
model.compile(optimizer=Adam(lr=1e-3), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit_generator(
train_generator.flow() ,
validation_data=val_generator.flow() ,
steps_per_epoch=STEPS_PER_EPOCH,
validation_steps=VAL_STEPS_PER_EPOCH,
epochs=EPOCHS,
verbose=VERBOSE,
callbacks=[lr_scheldure]
)<predict_on_test> | path = '/kaggle/input/titanic/gender_submission.csv'
gender_submission_data = pd.read_csv(path)
gender_submission_data | Titanic - Machine Learning from Disaster |
13,218,661 | predictions = np.ones(( len(itemid),), dtype=np.int)* -1
gen = SimpleReader(".. /input/bonus-x-txt-fasttext-unk/X_test_fasttext_unk.txt", np.arange(0, len(itemid), 1), PREDICT_BATCH_SIZE, MAXLEN)
flow = gen.flow()
for i in tqdm(range(( len(itemid)+ PREDICT_BATCH_SIZE - 1)// PREDICT_BATCH_SIZE), disable=not(VERBOSE)) :
batch = next(flow)
batch_predictions = model.predict_on_batch(batch[0])
predictions[batch[1]] = batch_predictions.argmax(-1 )<load_from_csv> | df = pd.concat([train_data, test_data], ignore_index=True)
df.reset_index(drop=True, inplace=True)
df | Titanic - Machine Learning from Disaster |
13,218,661 | submission = pd.read_csv(".. /input/texts-classification-ml-hse-2019/sample_submission.csv")
submission['Category'] = predictions
submission['Id'] = itemid<feature_engineering> | Titanic - Machine Learning from Disaster | |
13,218,661 | submission['Category'] = submission['Category'].apply(lambda x : b_cat_map[x] )<save_to_csv> | def NaN_info(df):
global null_view
try:
null_view = df[[col for col in df.columns if df[col].isna().sum() > 0]].isna().sum().sort_values(ascending = True)
null_view = pd.DataFrame(null_view, columns=['NANs'])
null_view[['PERCENT']] = null_view.NANs.apply(lambda x: round(( x/len(df)) *100, 2))
null_view[['TYPE']] = df.dtypes
except:
return null_view
return null_view
NaN_info(df ) | Titanic - Machine Learning from Disaster |
13,218,661 | submission.to_csv("submission.csv", index=False )<save_to_csv> | df['Survived'].value_counts(dropna=False ) | Titanic - Machine Learning from Disaster |
13,218,661 | submission.to_csv("submission.csv", index=False )<load_from_csv> | df['Survived'].replace(to_replace=0, value='no', inplace=True)
df['Survived'].replace(to_replace=1, value='yes', inplace=True)
df['Survived'] = df['Survived'].astype('object')
df['Survived'].value_counts(dropna=False ) | Titanic - Machine Learning from Disaster |
13,218,661 | train_data = pd.read_csv('/kaggle/input/nmlo-contest-3/train.csv')
test_data = pd.read_csv('/kaggle/input/nmlo-contest-3/test.csv')
train = train_data.copy()
test = test_data.copy()<count_missing_values> | def NaN_predict(df,
skip_features_with_missing_data_percentage = 90,
include_features_as_predictors_where_pec_miss_data_less = 10,
apply_fast_predict_where_missing_data_less_than_percent = 1
):
if not sys.warnoptions:
warnings.simplefilter("ignore")
global counter_all_predicted_values
counter_all_predicted_values = 0
global numeric_features
numeric_features = []
PARAMS = {'num_leaves': np.arange(50, 1000, step=50),
'max_depth': np.arange(10, 26, step=2),
'learning_rate': [0.001, 0.01, 0.1],
'n_estimators': np.arange(500, 2000, step=100),
'subsample': [0.1, 0.2, 0.3, 0.4, 0.5],
'feature_fraction': [0.1, 0.2, 0.3, 0.4, 0.5],
'bagging_fraction': [0.1, 0.2, 0.3, 0.4, 0.5],
'bagging_seed': np.arange(1, 20, step=1),
'lambda_l1': [0.1, 0.2, 0.3],
'lambda_l2': [0.1, 0.2, 0.3],
'min_child_samples': np.arange(5, 50, step=5),
'min_split_gain': [0.00001, 0.0001, 0.001]}
def NaN_info(df):
global null_view
try:
null_view = df[[col for col in df.columns if df[col].isna().sum() > 0]].isna().sum().sort_values(ascending = True)
null_view = pd.DataFrame(null_view, columns=['NANs'])
null_view[['PERCENT']] = null_view.NANs.apply(lambda x: round(( x/len(df)) *100, 2))
null_view[['TYPE']] = df.dtypes
except:
return null_view
return null_view
def encoding(work_predictors, df):
feature_power = 0.5
for j in work_predictors:
el_type = df[j].dtype
if el_type == 'object':
df[j].replace(np.nan, '0', inplace=True)
labelencoder = LabelEncoder()
df.loc[:, j] = labelencoder.fit_transform(df.loc[:, j])
else:
df[j] = df[j]**feature_power
return df, work_predictors
def imput_missing_value_to_main_df(df, miss_indeces, pred_miss, el):
counter = 0
for idx in miss_indeces:
df.loc[idx, el] = pred_miss[counter]
counter += 1
return df
def hyperparms_tuning_regressor(X_train, X_test, y_train, y_test, n_iter_for_RandomizedSearchCV, PARAMS):
global best_params
lgbm = lgb.LGBMRegressor(n_jobs = -1)
lgbm_randomized_mse = RandomizedSearchCV(estimator=lgbm,
param_distributions=PARAMS,
n_iter=n_iter_for_RandomizedSearchCV,
scoring='neg_mean_squared_error',
cv=3,
verbose=0,
n_jobs = -1)
lgbm_randomized_mse.fit(X_train, y_train)
best_params = lgbm_randomized_mse.best_params_
print(best_params)
pred_test_lgb = lgbm_randomized_mse.predict(X_test)
MAE = mean_absolute_error(y_test,pred_test_lgb)
y_te = list(round(y_test[:10], 1))
y_pred = list(np.round(pred_test_lgb[:10], 1))
print(f'first 10 y_test: {y_te}')
print(f'first 10 y_pred: {y_pred}')
print(f'mean_absolute_error: {MAE}')
return best_params
def predict_regressor(best_params, X, y, miss_df):
global pred_miss
lgbm = lgb.LGBMRegressor(**best_params, n_jobs = -1)
lgbm.fit(X, y)
pred_miss = list(lgbm.predict(miss_df))
print('-------------------------------')
print(f"The first 100 predicted missing values:
{pred_miss[:100]}")
return pred_miss
def hyperparms_tuning_classifier(X_train, X_test, y_train, y_test, n_iter_for_RandomizedSearchCV, PARAMS):
global best_params
lgbm = lgb.LGBMClassifier(n_jobs = -1)
lgbm_randomized_mse = RandomizedSearchCV(estimator=lgbm,
param_distributions=PARAMS,
n_iter=n_iter_for_RandomizedSearchCV,
scoring='f1_weighted',
cv=3,
verbose=0,
n_jobs = -1)
lgbm_randomized_mse.fit(X_train, y_train)
best_params = lgbm_randomized_mse.best_params_
print(best_params)
pred_test_lgb = lgbm_randomized_mse.predict(X_test)
accuracy = accuracy_score(y_test, pred_test_lgb)
print(f'first 10 y_test: {y_test[:10]}')
print(f'first 10 y_pred: {pred_test_lgb[:10]}')
f1 = f1_score(y_test, pred_test_lgb, average='weighted')
print(f'accuracy_score: {accuracy}')
print(f'f1_score(weighted): {f1}')
return best_params
def predict_classifier(best_params, X, y, miss_df):
global pred_miss
lgbm = lgb.LGBMClassifier(**best_params, n_jobs = -1)
lgbm.fit(X, y)
pred_miss = list(lgbm.predict(miss_df))
print('-------------------------------')
print(f"The first 100 predicted missing values:
{pred_miss[:100]}")
return pred_miss
def light_predict_regressor(X, y, miss_df):
global pred_miss
lgbm = lgb.LGBMRegressor()
lgbm.fit(X, y)
pred_miss = list(lgbm.predict(miss_df))
print('-------------------------------')
print(f"The first 100 predicted missing values:
{pred_miss[:100]}")
return pred_miss
def light_predict_classifier(X, y, miss_df):
global pred_miss
lgbm = lgb.LGBMClassifier()
lgbm.fit(X, y)
pred_miss = list(lgbm.predict(miss_df))
print('-------------------------------')
print(f"The first 100 predicted missing values:
{pred_miss[:100]}")
return pred_miss
def numeric_features(df):
num_features = [feature for feature in df.columns if df[feature].dtype in ['int64', 'float64']]
return num_features
def integer_features(df):
global int_features
int_features = []
int_features = [feature for feature in df.columns if df[feature].dtype in ['int64']]
return int_features
print(NaN_info(df))
print('
')
all_features = list(df.columns)
df_indeces = list(df.index)
integer_features(df)
delete_miss_features = list(( null_view.loc[null_view['PERCENT'] > skip_features_with_missing_data_percentage] ).index)
print(f'Exclude from the prediction, because missing data more than \
{skip_features_with_missing_data_percentage}% :
{delete_miss_features}')
print('')
all_miss_features = list(null_view.index)
for delete_feature in delete_miss_features:
all_miss_features.remove(delete_feature)
for el in all_miss_features:
print('
')
NaN_info(df)
lot_of_miss_features = list(( null_view.loc[null_view['PERCENT'] > include_features_as_predictors_where_pec_miss_data_less] ).index)
now_predictors = list(set(all_features)-set(lot_of_miss_features))
work_predictors = list(set(now_predictors)- set([el]))
miss_indeces = list(( df[pd.isnull(df[el])] ).index)
miss_df = df.iloc[miss_indeces][:]
miss_df = miss_df[work_predictors]
encoding(work_predictors, df=miss_df)
work_indeces = list(set(df_indeces)- set(miss_indeces))
work_df = df.iloc[work_indeces][:]
encoding(work_predictors, df=work_df)
X = work_df[work_predictors]
y = work_df[el]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=0)
feature_type = df[el].dtypes
percent = null_view['PERCENT'][el]
print(f'feature: {el}, type: {feature_type}, missing values: {percent}%')
print(f'X.shape: {X.shape}, y.shape: {y.shape}')
print(f'Unused for prediction, because missing data more than \
{include_features_as_predictors_where_pec_miss_data_less}% :')
print(lot_of_miss_features)
print('')
if percent < apply_fast_predict_where_missing_data_less_than_percent:
print('light prediction without tuning hyperparameters, because missing data less than 1 %')
if feature_type == 'object' or feature_type == 'bool':
print('FAST_classifier:')
light_predict_classifier(X, y, miss_df)
counter_all_predicted_values += len(miss_indeces)
imput_missing_value_to_main_df(df, miss_indeces, pred_miss, el)
print(f'miss_indexes:
{miss_indeces}')
elif feature_type == 'float64' or feature_type == 'int64':
print('FAST_regressor:')
light_predict_regressor(X, y, miss_df)
counter_all_predicted_values += len(miss_indeces)
imput_missing_value_to_main_df(df, miss_indeces, pred_miss, el)
print(f'miss_indexes:
{miss_indeces}')
else:
print(f"unprocessed feature: {el} - {feature_type} type")
else:
n_iter_for_RandomizedSearchCV = int(300 + percent * 5)
print(f'Iteration for RandomizedSearchCV: {n_iter_for_RandomizedSearchCV}')
if feature_type == 'object' or feature_type == 'bool':
print('ADVANCED classifier evaluation:')
labelencoder = LabelEncoder()
y_train = labelencoder.fit_transform(y_train)
y_test = labelencoder.fit_transform(y_test)
hyperparms_tuning_classifier(X_train, X_test, y_train, y_test, n_iter_for_RandomizedSearchCV, PARAMS)
predict_classifier(best_params, X, y, miss_df)
counter_all_predicted_values += len(miss_indeces)
imput_missing_value_to_main_df(df, miss_indeces, pred_miss, el)
elif feature_type == 'float64' or feature_type == 'int64':
print('ADVANCED regressor evaluation:')
hyperparms_tuning_regressor(X_train, X_test, y_train, y_test, n_iter_for_RandomizedSearchCV, PARAMS)
print(f'mean for {el}: {df[el].mean() }')
predict_regressor(best_params, X, y, miss_df)
counter_all_predicted_values += len(miss_indeces)
imput_missing_value_to_main_df(df, miss_indeces, pred_miss, el)
else:
print(f"unprocessed feature: {el} - {feature_type} type")
for feature in int_features:
df[[feature]] = df[[feature]].astype('int64')
print('
')
print(f'These features have not been processed, because missing data more than {skip_features_with_missing_data_percentage}%')
print(NaN_info(df))
print('
')
print(f'{counter_all_predicted_values} values have been predicted and replaced')
print('
')
return df | Titanic - Machine Learning from Disaster |
13,218,661 | train.isnull().sum()<count_missing_values> | NaN_predict(df,
skip_features_with_missing_data_percentage = 70 ) | Titanic - Machine Learning from Disaster |
13,218,661 | train.isnull().sum()<import_modules> | target_column = ['Survived']
predictors = list(set(list(df.columns)) -set(target_column))
predictors | Titanic - Machine Learning from Disaster |
13,218,661 | from sklearn.preprocessing import StandardScaler<normalization> | df['Age'] = df['Age'].astype('int64')
df['Fare'] = df['Fare'].astype('int64' ) | Titanic - Machine Learning from Disaster |
13,218,661 | cases_scaled = StandardScaler().fit_transform(train['cases'][:,np.newaxis]);
low_range = cases_scaled[cases_scaled[:,0].argsort() ][:10]
high_range= cases_scaled[cases_scaled[:,0].argsort() ][-10:]
print('outer range(low)of the distribution:')
print(low_range)
print('
outer range(high)of the distribution:')
print(high_range )<import_modules> | for el in predictors:
print(f'======================= {el} =======================')
print(df[el].value_counts(dropna=False)) | Titanic - Machine Learning from Disaster |
13,218,661 | from scipy.stats import norm, skew
from scipy import stats<count_values> | df['AgeCategory'] = df['Age']/5
df['AgeCategory'] = df['AgeCategory'].astype('int64')
df['AgeCategory'] | Titanic - Machine Learning from Disaster |
13,218,661 | train['cases'].replace([0], 1, inplace = True)
train['ed'].replace([0], 1, inplace = True)
train['inc'].replace([0], 1, inplace = True)
train['pop'].replace([0], 1, inplace = True)
print(( train['cases']==0 ).value_counts())
test['ed'].replace([0], 1, inplace = True)
test['inc'].replace([0], 1, inplace = True)
test['pop'].replace([0], 1, inplace = True )<feature_engineering> | indices_1 = list(np.where([df['Ticket'].str.contains(r'^\w.+\s', regex=True)])[1])
indices_2 = list(np.where([df['Ticket'].str.contains(r'^\w.\s', regex=True)])[1])
indices_3 = list(np.where([df['Ticket'].str.contains(r'^\w\s', regex=True)])[1])
indices = list(set(indices_1 + indices_2 + indices_3))
print(len(indices))
print(indices ) | Titanic - Machine Learning from Disaster |
13,218,661 | train['cases'] = np.log(train['cases'])
<feature_engineering> | for el in indices[:20]:
ticket = df.loc[el, 'Ticket']
print(ticket ) | Titanic - Machine Learning from Disaster |
13,218,661 | res = stats.probplot(train['inc'], plot=plt)
train['inc'] = np.log(train['inc'])
test['inc'] = np.log(test['inc'])
<feature_engineering> | for el in indices:
ticket = df.loc[el, 'Ticket']
ticket = ticket.split(sep=' ', maxsplit=2)
if len(ticket)== 3:
ticket[0] = ticket[0] + ticket[1]
ticket[1] = ticket[2]
del ticket[2]
print(f'{ticket[0]} {ticket[1]}')
df.loc[el, 'Ticket'] = ticket[1]
df.loc[el, 'TicketSeries'] = ticket[0]
| Titanic - Machine Learning from Disaster |
13,218,661 | res = stats.probplot(train['pop'], plot=plt)
train['pop'] = np.log(train['pop'])
test['pop'] = np.log(test['pop'])
<categorify> | df['TicketSeries'].replace(np.nan, 'without', inplace=True)
df['Ticket'] = df['Ticket'].astype('int64' ) | Titanic - Machine Learning from Disaster |
13,218,661 | train = pd.get_dummies(train)
print(train.shape )<import_modules> | values = df['Ticket'].value_counts()
print(values[:40] ) | Titanic - Machine Learning from Disaster |
13,218,661 | from sklearn.model_selection import train_test_split<prepare_x_and_y> | LINE = df.loc[df['Ticket'] == 'LINE']
indeses = list(LINE.index)
indeses | Titanic - Machine Learning from Disaster |
13,218,661 | train_y = train['cases']
train.drop('id', axis = 1, inplace = True)
train.drop('cases', axis = 1, inplace = True )<split> | for el in indeses:
print(el, 'LINE')
df.loc[el, 'Ticket'] = 1000
df.loc[el, 'TicketSeries'] = 'LINE' | Titanic - Machine Learning from Disaster |
13,218,661 | X_train, X_test, y_train, y_test = train_test_split(train, train_y, test_size=0.2, shuffle=True )<import_modules> | df['TicketSeries'].replace(np.nan, 'without', inplace=True)
df['Ticket'] = df['Ticket'].astype('int64' ) | Titanic - Machine Learning from Disaster |
13,218,661 | from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone
from sklearn.model_selection import KFold, cross_val_score, train_test_split
from sklearn.metrics import mean_squared_error
import xgboost as xgb
import lightgbm as lgb<compute_train_metric> | indexes = list(df.index)
lastName = []
for el in indexes:
last_name = df.loc[el, 'Name']
last_name = last_name.split(sep=',', maxsplit=1)
last_name = last_name[0]
lastName.append(last_name)
df.loc[el, 'LastName'] = last_name
lastName = list(set(lastName))
print(f'Total lastName: {len(lastName)}')
print('')
print(f'{lastName[:10]}...' ) | Titanic - Machine Learning from Disaster |
13,218,661 | n_folds = 5
def rmsle_cv(model):
kf = KFold(n_folds, shuffle=True, random_state=42 ).get_n_splits(X_train)
rmse= np.sqrt(-cross_val_score(model, X_train.values, y_train, scoring="neg_mean_squared_error", cv = kf))
return(rmse )<choose_model_class> | LastName = df['LastName'].value_counts()
LastName[:20] | Titanic - Machine Learning from Disaster |
13,218,661 | lasso = make_pipeline(RobustScaler() , Lasso(alpha =0.0005, random_state=1))
<choose_model_class> | b = df['Cabin'].value_counts()
b[:10] | Titanic - Machine Learning from Disaster |
13,218,661 | ENet = make_pipeline(RobustScaler() , ElasticNet(alpha=0.0005, l1_ratio=.9, random_state=3))
<choose_model_class> | indexes = list(df.index)
for el in indexes:
Cabin = df.loc[el, 'Cabin']
level = Cabin[0]
df.loc[el, 'CabinLevel'] = level | Titanic - Machine Learning from Disaster |
13,218,661 | KRR = KernelRidge(alpha=0.6, kernel='polynomial', degree=2, coef0=2.5)
<choose_model_class> | df['CabinLevel'].value_counts(dropna=False ) | Titanic - Machine Learning from Disaster |
13,218,661 | GBoost = GradientBoostingRegressor(n_estimators=500, learning_rate=0.05,
max_depth=4, max_features='sqrt',
min_samples_leaf=15, min_samples_split=10,
loss='huber', random_state =5 )<choose_model_class> | target_column = ['Survived']
predictors = list(set(list(df.columns)) -set(target_column))
predictors | Titanic - Machine Learning from Disaster |
13,218,661 | model_xgb = xgb.XGBRegressor(colsample_bytree=0.4603, gamma=0.0468,
learning_rate=0.05, max_depth=3,
min_child_weight=1.7817, n_estimators=2200,
reg_alpha=0.4640, reg_lambda=0.8571,
subsample=0.5213, silent=1,
random_state =7, nthread = -1 )<choose_model_class> | for el in predictors:
el_type = df[el].dtype
if el_type == 'object':
labelencoder = LabelEncoder()
df.loc[:, el] = labelencoder.fit_transform(df.loc[:, el])
df[el] = df[el]**0.5
| Titanic - Machine Learning from Disaster |
13,218,661 | model_lgb = lgb.LGBMRegressor(objective='regression',num_leaves=5,
learning_rate=0.05, n_estimators=720,
max_bin = 55, bagging_fraction = 0.8,
bagging_freq = 5, feature_fraction = 0.2319,
feature_fraction_seed=9, bagging_seed=9,
min_data_in_leaf =6, min_sum_hessian_in_leaf = 11 )<train_model> | df['Survived'].replace(to_replace='no', value=0, inplace=True)
df['Survived'].replace(to_replace='yes', value=1, inplace=True)
df['Survived'] = df['Survived'].astype('int64')
df['Survived'].head(3 ) | Titanic - Machine Learning from Disaster |
13,218,661 | model_xgb.fit(X_train, y_train )<choose_model_class> | NaN_predict(df ) | Titanic - Machine Learning from Disaster |
13,218,661 | xg = xgb.XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,
colsample_bynode=1, colsample_bytree=0.4603, gamma=0.0468,
gpu_id=-1, importance_type='gain', interaction_constraints='',
learning_rate=0.05, max_delta_step=0, max_depth=3,
min_child_weight=1.7817, missing=np.nan, monotone_constraints='() ',
n_estimators=2200, n_jobs=-1, nthread=-1, num_parallel_tree=1,
random_state=7, reg_alpha=0.464, reg_lambda=0.8571,
scale_pos_weight=1, silent=1, subsample=0.5213,
tree_method='exact', validate_parameters=1, verbosity=None)
<compute_test_metric> | target_column = ['Survived']
predictors = list(set(list(df.columns)) -set(target_column))
predictors | Titanic - Machine Learning from Disaster |
13,218,661 | score_lasso = rmsle_cv(lasso)
score_lasso.mean()
Lasso score: {:.4f}({:.4f})
".format(score.mean() , score.std()))
<compute_test_metric> | X = df.loc[:, predictors]
X | Titanic - Machine Learning from Disaster |
13,218,661 | score = rmsle_cv(model_xgb)
print("ElasticNet score: {:.4f}({:.4f})
".format(score.mean() , score.std()))
<compute_test_metric> | y = df.loc[:,target_column]
y | Titanic - Machine Learning from Disaster |
13,218,661 | score = rmsle_cv(ENet)
print("ElasticNet score: {:.4f}({:.4f})
".format(score.mean() , score.std()))<compute_test_metric> | def features_selection(X, y):
if not sys.warnoptions:
warnings.simplefilter("ignore")
global droped_features
droped_features = []
def cross_val(X, y, features):
CV = ShuffleSplit(n_splits=5, test_size=0.25, random_state=0)
test_df = X[features]
model = lgb.LGBMClassifier(random_state=0)
model.fit(test_df, y)
cv_score = cross_val_score(model,
test_df,
y,
cv=CV,
scoring='f1')
cv_score = cv_score.mean()
return cv_score
features = list(X.columns)
SCORES = pd.DataFrame(index = features, columns=['cv_score'])
score_with_all_features = cross_val(X, y, features)
print(f'{score_with_all_features} General cv_score with all features')
cv_score_of_monstrous_feature = 1
while score_with_all_features <= cv_score_of_monstrous_feature:
print('
')
features = list(X.columns)
SCORES = pd.DataFrame(index = features, columns=['cv_score'])
score_with_all_features = cross_val(X, y, features)
print(f'{len(features)} number of features')
print(f'{score_with_all_features} score with all features')
')
for without_feature in features:
without_feature = [without_feature]
fit_faetures = features[:]
fit_faetures = list(set(fit_faetures)- set(without_feature))
score = cross_val(X, y, fit_faetures)
SCORES.loc[without_feature] = score
SCORES = SCORES.sort_values(by=['cv_score'], ascending=False)
print('
__SORTED SCORES__')
print(SCORES)
monstrous_feature = SCORES.index[0]
cv_score_of_monstrous_feature = SCORES.iloc[0][0]
if score_with_all_features <= cv_score_of_monstrous_feature:
X.drop([monstrous_feature], axis=1, inplace=True)
droped_features.append(monstrous_feature)
print('--------------------------------------------')
print(f' DROP ==== {monstrous_feature}')
print('--------------------------------------------')
else:
print('
')
print(f'These features have been droped:
{droped_features}')
print('
')
return X, droped_features
| Titanic - Machine Learning from Disaster |
13,218,661 | score = rmsle_cv(model_lgb)
print(" score: {:.4f}({:.4f})
".format(score.mean() , score.std()))<compute_test_metric> | features_selection(X, y ) | Titanic - Machine Learning from Disaster |
13,218,661 | score = rmsle_cv(lasso)
print(" score: {:.4f}({:.4f})
".format(score.mean() , score.std()))<compute_train_metric> | df.drop(droped_features, axis=1, inplace=True ) | Titanic - Machine Learning from Disaster |
13,218,661 | score = rmsle_cv(GBoost)
GBoost.fit(X_train, y_train)
sample = GBoost.predict(test)
sample = np.exp(sample)
print(" score: {:.4f}({:.4f})
".format(score.mean() , score.std()))<predict_on_test> | from lightgbm import LGBMClassifier
from xgboost import XGBClassifier
from catboost import CatBoostClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier | Titanic - Machine Learning from Disaster |
13,218,661 | class AveragingModels(BaseEstimator, RegressorMixin, TransformerMixin):
def __init__(self, models):
self.models = models
def fit(self, X, y):
self.models_ = [clone(x)for x in self.models]
for model in self.models_:
model.fit(X, y)
return self
def predict(self, X):
predictions = np.column_stack([
model.predict(X)for model in self.models_
])
return np.mean(predictions, axis=1 )<compute_train_metric> | def constriction_hyperparameters_space(X,
y,
algorithm,
params,
n_iter_RandomizedSearchCV=100,
times_for_estimate=10):
global best_parameters
print('scores:
')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=0)
CV = ShuffleSplit(n_splits=4, test_size=0.25, random_state=0)
estimator = algorithm()
RESULTS_DF = None
PARAMS_DICT = None
PARAMS_DICT = {}
for i in range(0, times_for_estimate):
RANDOMIZED_MODEL = RandomizedSearchCV(estimator=estimator,
param_distributions=params,
n_iter=n_iter_RandomizedSearchCV,
scoring='f1',
cv=CV,
verbose=0,
n_jobs = -1)
RANDOMIZED_FITED_BEST_MODEL = RANDOMIZED_MODEL.fit(X_train, y_train)
best_parameters = RANDOMIZED_FITED_BEST_MODEL.best_params_
pred_test = RANDOMIZED_FITED_BEST_MODEL.predict(X_test)
f1 = f1_score(y_test,pred_test)
print(f' f1: {f1}')
model = algorithm(**best_parameters)
cv_score = cross_val_score(model, X, y, cv=CV)
std_per =(cv_score.std())* 2
cv_score = cv_score.mean()
print(f'cv_score: {cv_score}')
print(f' params: {best_parameters}')
PARAMS_DICT[cv_score] = best_parameters
columns = ['cv_score', 'std_per', 'f1',]
values = []
values.append(cv_score)
values.append(std_per)
values.append(f1)
for k,v in best_parameters.items() :
columns.append(k)
values.append(v)
if RESULTS_DF is None:
RESULTS_DF = pd.DataFrame(columns=columns)
else:
pass
RESULTS_DF.loc[i] = values
print('
')
RESULTS_DF = RESULTS_DF.sort_values(by='cv_score', ascending = False)
RESULTS_DF.reset_index(drop=True, inplace=True)
return RESULTS_DF, best_parameters | Titanic - Machine Learning from Disaster |
13,218,661 | averaged_models = AveragingModels(models =(ENet, GBoost, KRR, lasso))
score = rmsle_cv(averaged_models)
print(" Averaged base models score: {:.4f}({:.4f})
".format(score.mean() , score.std()))
averaged_models.fit(X_train, y_train)
submission = averaged_models.predict(test)
submission = np.exp(submission)
<train_model> | Titanic - Machine Learning from Disaster | |
13,218,661 | class StackingAveragedModels(BaseEstimator, RegressorMixin, TransformerMixin):
def __init__(self, base_models, meta_model, n_folds=5):
self.base_models = base_models
self.meta_model = meta_model
self.n_folds = n_folds
def fit(self, X, y):
self.base_models_ = [list() for x in self.base_models]
self.meta_model_ = clone(self.meta_model)
kfold = KFold(n_splits=self.n_folds, shuffle=True, random_state=156)
out_of_fold_predictions = np.zeros(( X.shape[0], len(self.base_models)))
for i, model in enumerate(self.base_models):
for train_index, holdout_index in kfold.split(X, y):
instance = clone(model)
self.base_models_[i].append(instance)
instance.fit(X[train_index], y[train_index])
y_pred = instance.predict(X[holdout_index])
out_of_fold_predictions[holdout_index, i] = y_pred
self.meta_model_.fit(out_of_fold_predictions, y)
return self<compute_test_metric> | params = {'num_leaves': np.arange(600, 750, step=10),
'max_depth': [4, 5, 6, 7, 8],
'learning_rate': [0.01],
'n_estimators': np.arange(2000, 2300, step=20),
'subsample': [0.9, 1.0],
'feature_fraction': [0.3, 0.4],
'bagging_fraction': [0.7, 0.8],
'bagging_seed': np.arange(16, 19, step=1),
'lambda_l1': [0.1],
'lambda_l2': [0.1],
'min_child_samples': [2, 5],
'min_split_gain': [0.01],
'n_jobs':[-1],
}
constriction_hyperparameters_space(X,
y,
LGBMClassifier,
params,
n_iter_RandomizedSearchCV=100,
times_for_estimate=1)
| Titanic - Machine Learning from Disaster |
13,218,661 | stacked_averaged_models = StackingAveragedModels(base_models =(ENet, GBoost, KRR),
meta_model = lasso)
score = rmsle_cv(stacked_averaged_models)
print("Stacking Averaged models score: {:.4f}({:.4f})".format(score.mean() , score.std()))<compute_test_metric> | Titanic - Machine Learning from Disaster | |
13,218,661 | def rmsle(y, y_pred):
return np.sqrt(mean_squared_error(y, y_pred))<choose_model_class> | Titanic - Machine Learning from Disaster | |
13,218,661 | I decided to use the stock lasso regression.Xgboost and Lgb were both banned for this competition .<save_to_csv> | target_column = ['Survived']
predictors = list(set(list(df.columns)) -set(target_column))
predictors | Titanic - Machine Learning from Disaster |
13,218,661 | sub = pd.DataFrame()
sub['id'] = range(0,993)
sub['cases'] = sample
sub.to_csv('submission.csv',index=False)
print(sub )<import_modules> | X = df.loc[:890, predictors]
X | Titanic - Machine Learning from Disaster |
13,218,661 | import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.model_selection import KFold
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn import svm
from sklearn.naive_bayes import GaussianNB<load_from_csv> | y = df.loc[:890,target_column]
y | Titanic - Machine Learning from Disaster |
13,218,661 | file = '.. /input/iris-train.csv'
df = pd.read_csv(file, delimiter = ',', index_col='Id')
X = df[['SepalLengthCm','SepalWidthCm','PetalLengthCm','PetalWidthCm']]
Y = df['Species']<choose_model_class> | test_X = df.loc[891:, predictors]
test_X | Titanic - Machine Learning from Disaster |
13,218,661 | models = []
models.append(( 'Support Vector Machines - linear', svm.SVC(kernel='linear', random_state=0, gamma=.10, C=2.0)))
models.append(( 'Logistic Regression - lbfgs', LogisticRegression(solver='lbfgs', multi_class='auto', max_iter=1000)))
models.append(( 'KNeighborsClassifier', KNeighborsClassifier()))
models.append(( 'DecisionTreeClassifier', DecisionTreeClassifier()))
models.append(( 'RandomForestClassifier', RandomForestClassifier(n_estimators=100)))
models.append(( 'Gaussian Naïve Bayes', GaussianNB()))
<import_modules> | from sklearn.metrics import f1_score | Titanic - Machine Learning from Disaster |
13,218,661 | from sklearn.model_selection import cross_val_score<load_from_csv> | BAGGING_DF = pd.DataFrame() | Titanic - Machine Learning from Disaster |
13,218,661 | model = models[0][1]
filename = models[0][0]
X_train = df.drop(['Species'], axis=1)
Y_train = df['Species']
model.fit(X_train, Y_train)
dfPredict = pd.read_csv('.. /input/iris-test.csv', delimiter=',', index_col='Id')
xPredict = dfPredict[['SepalLengthCm','SepalWidthCm','PetalLengthCm','PetalWidthCm']]
yPredict = model.predict(xPredict)
submission = pd.DataFrame({
'Id': xPredict.index,
'Species': yPredict
})
submission.set_index('Id', inplace=True)
submission.to_csv(filename+'.csv' )<import_modules> | result = BAGGING_DF.mode(axis='columns')
result.tail(10 ) | Titanic - Machine Learning from Disaster |
13,218,661 | import numpy as np
import pandas as pd<load_from_csv> | predictions = np.array(result[0])
predictions | Titanic - Machine Learning from Disaster |
13,218,661 | train_data = pd.read_csv('.. /input/iris-train.csv', index_col='Id')
train_data.head()<load_from_csv> | output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions})
output.to_csv('my_submission.csv', index=False)
print("Submission was successfully saved!" ) | Titanic - Machine Learning from Disaster |
13,167,892 | test_data = pd.read_csv('.. /input/iris-test.csv', index_col='Id')
test_data.head()<prepare_x_and_y> | read_train_data = pd.read_csv('.. /input/titanic/train.csv')
print(read_train_data ) | Titanic - Machine Learning from Disaster |
13,167,892 | X_train = train_data.drop(['Species'], axis=1)
y_train = train_data['Species']
print('Forma dos dados de treino:', X_train.shape, y_train.shape )<import_modules> | numeric_train_data = read_train_data
print(numeric_train_data)
print(type(numeric_train_data.columns))
feature_names = ['Pclass', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']
print("-------------------feature names-------------------")
print(feature_names)
y = numeric_train_data.Survived
X = pd.DataFrame(numeric_train_data[feature_names])
print(X)
print(y ) | Titanic - Machine Learning from Disaster |
13,167,892 | from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier<train_model> | col_with_missing_values_categorical_val = [col for col in X.columns
if(X[col].isnull().any() &(X[col].dtype == 'object')) ]
print(col_with_missing_values_categorical_val)
X[col_with_missing_values_categorical_val] = X[col_with_missing_values_categorical_val].fillna("Unknown")
print(X ) | Titanic - Machine Learning from Disaster |
13,167,892 | model = LogisticRegression()
model.fit(X_train, y_train)
print(model )<predict_on_test> | col_with_categorical_data = [col for col in X.columns
if(X[col].dtype=='object')]
dummies = pd.get_dummies(X[col_with_categorical_data])
merged = pd.concat([X,dummies], axis=1)
X = merged.drop(col_with_categorical_data, axis=1)
print(X.head(62)) | Titanic - Machine Learning from Disaster |
13,167,892 | y_pred = model.predict(X_test)
print('Exemplos de previsões:
', y_pred[:10] )<prepare_output> | imputed_X_train, imputed_X_test, y_train, y_test = train_test_split(imputed_X, y, test_size=0.2, random_state=0)
print(imputed_X_train)
print(imputed_X_test)
print(y_train)
print(y_test ) | Titanic - Machine Learning from Disaster |
13,167,892 | submission = pd.DataFrame({
'Id': X_test.index,
'Species': y_pred
})
submission.set_index('Id', inplace=True)
submission.head(10 )<save_to_csv> | sc_age = StandardScaler()
sc_fare = StandardScaler()
imputed_X_train['Age'] = sc_age.fit_transform(imputed_X_train['Age'].values.reshape(-1,1))
imputed_X_train['Fare'] = sc_fare.fit_transform(imputed_X_train['Fare'].values.reshape(-1,1))
imputed_X_test['Age'] = sc_age.transform(imputed_X_test['Age'].values.reshape(-1,1))
imputed_X_test['Fare'] = sc_fare.transform(imputed_X_test['Fare'].values.reshape(-1,1))
print(imputed_X_train)
print(imputed_X_test ) | Titanic - Machine Learning from Disaster |
13,167,892 | submission.to_csv('iris-submission.csv' )<set_options> | model = RandomForestClassifier(bootstrap=True, max_depth=80, max_features=3, min_samples_leaf=3, min_samples_split=10, n_estimators=1000, random_state=0)
model.fit(imputed_X_train, y_train)
predicted_y = model.predict(imputed_X_test)
print(predicted_y)
calculate_mae = mean_absolute_error(y_test, predicted_y)
print(calculate_mae ) | Titanic - Machine Learning from Disaster |
13,167,892 | <import_modules><EOS> | test_data = pd.read_csv('.. /input/titanic/test.csv')
print(feature_names)
X_test = pd.DataFrame(test_data[feature_names])
col_with_missing_values_categorical_val_test = [col for col in X_test.columns
if(X_test[col].isnull().any() &(X_test[col].dtype == 'object')) ]
print(col_with_missing_values_categorical_val_test)
col_with_categorical_data_test = [col for col in X_test.columns
if(X_test[col].dtype=='object')]
dummies_test = pd.get_dummies(X_test[col_with_categorical_data_test])
merged_test = pd.concat([X_test,dummies_test], axis=1)
X_test = merged_test.drop(col_with_categorical_data_test, axis=1)
X_test['Embarked_Unknown'] = 0
print(X_test)
col_with_missing_values_test = [col for col in X_test.columns
if(X_test[col].isnull().any())]
print(col_with_missing_values_test)
imputed_X_test_data = pd.DataFrame(my_imputer.fit_transform(X_test))
imputed_X_test_data.columns = X_test.columns
print(imputed_X_test)
imputed_X_test_data['Age'] = sc_age.transform(imputed_X_test_data['Age'].values.reshape(-1,1))
imputed_X_test_data['Fare'] = sc_fare.transform(imputed_X_test_data['Fare'].values.reshape(-1,1))
print(imputed_X_test_data)
test_preds = model.predict(imputed_X_test_data)
print(type(test_preds))
print(test_preds)
output = pd.DataFrame({'PassengerId': test_data.PassengerId,
'Survived': test_preds})
output.to_csv('./submission.csv', index=False)
print(pd.read_csv('./submission.csv')) | Titanic - Machine Learning from Disaster |
12,642,986 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<compute_test_metric> | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeClassifier, plot_tree
from sklearn.metrics import confusion_matrix
| Titanic - Machine Learning from Disaster |
12,642,986 | class SigmoidNeuron:
def __init__(self):
self.w = None
self.b = None
def perceptron(self, x):
return np.dot(x, self.w.T)+ self.b
def sigmoid(self, x):
return 1.0/(1.0 + np.exp(-x))
def grad_w_mse(self, x, y):
y_pred = self.sigmoid(self.perceptron(x))
return(y_pred - y)* y_pred *(1 - y_pred)* x
def grad_b_mse(self, x, y):
y_pred = self.sigmoid(self.perceptron(x))
return(y_pred - y)* y_pred *(1 - y_pred)
def grad_w_ce(self, x, y):
y_pred = self.sigmoid(self.perceptron(x))
if y == 0:
return y_pred * x
elif y == 1:
return -1 *(1 - y_pred)* x
else:
raise ValueError("y should be 0 or 1")
def grad_b_ce(self, x, y):
y_pred = self.sigmoid(self.perceptron(x))
if y == 0:
return y_pred
elif y == 1:
return -1 *(1 - y_pred)
else:
raise ValueError("y should be 0 or 1")
def fit_train_valid(self, X_train, Y_train,X_test,Y_test,epochs=1, learning_rate=1, initialise=True, loss_fn="mse", display_loss=False,lr_decay=False):
i = 0
if initialise:
np.random.seed(100)
self.w = np.random.randn(1, X_train.shape[1])
np.random.seed(100)
self.b = np.random.randn()
if display_loss:
loss = {}
valid_loss = {}
while(i<epochs):
dw = 0
db = 0
for x, y in zip(X_train, Y_train):
if loss_fn == "mse":
dw += self.grad_w_mse(x, y)
db += self.grad_b_mse(x, y)
elif loss_fn == "ce":
dw += self.grad_w_ce(x, y)
db += self.grad_b_ce(x, y)
self.w -= learning_rate * dw
self.b -= learning_rate * db
if display_loss:
Y_pred = self.sigmoid(self.perceptron(X_train))
Y_test_pred = self.sigmoid(self.perceptron(X_test))
if loss_fn == "mse":
loss[i] = mean_squared_error(Y_train, Y_pred)
valid_loss[i] = mean_squared_error(Y_test, Y_test_pred)
elif loss_fn == "ce":
loss[i] = log_loss(Y_train, Y_pred)
valid_loss[i] = log_loss(Y_test, Y_test_pred)
if lr_decay:
if i>0 and valid_loss[i]-valid_loss[i-1]<0:
w = self.w
b = self.b
i+=1
elif i>0:
i = i-1
self.w = w
self.b = b
learning_rate/=2
else:
i+=1
else:
i+=1
if display_loss:
plt.plot(loss.values())
plt.plot(valid_loss.values() ,color='orange')
plt.xlabel('Epochs')
if loss_fn == "mse":
plt.ylabel('Mean Squared Error')
elif loss_fn == "ce":
plt.ylabel('Log Loss')
plt.show()
def fit(self, X, Y, epochs=1, learning_rate=1, initialise=True, loss_fn="mse", display_loss=False):
if initialise:
np.random.seed(100)
self.w = np.random.randn(1, X.shape[1])
np.random.seed(100)
self.b = np.random.randn()
if display_loss:
loss = {}
for i in range(epochs):
dw = 0
db = 0
for x, y in zip(X, Y):
if loss_fn == "mse":
dw += self.grad_w_mse(x, y)
db += self.grad_b_mse(x, y)
elif loss_fn == "ce":
dw += self.grad_w_ce(x, y)
db += self.grad_b_ce(x, y)
self.w -= learning_rate * dw
self.b -= learning_rate * db
if display_loss:
Y_pred = self.sigmoid(self.perceptron(X))
if loss_fn == "mse":
loss[i] = mean_squared_error(Y, Y_pred)
elif loss_fn == "ce":
loss[i] = log_loss(Y, Y_pred)
if display_loss:
plt.plot(loss.values())
plt.xlabel('Epochs')
if loss_fn == "mse":
plt.ylabel('Mean Squared Error')
elif loss_fn == "ce":
plt.ylabel('Log Loss')
plt.show()
def predict(self, X):
Y_pred = []
for x in X:
y_pred =1 if self.sigmoid(self.perceptron(x)) >=0.5 else 0
Y_pred.append(y_pred)
return np.array(Y_pred )<train_model> | train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
train_data.head() | Titanic - Machine Learning from Disaster |
12,642,986 | class Perceptron:
def __init__(self):
self.w = None
self.b = None
def model(self, x):
return 1 if(np.dot(self.w, x)>= self.b)else 0
def predict(self, X):
Y = []
for x in X:
result = self.model(x)
Y.append(result)
return np.array(Y)
def fit(self, X, Y, epochs = 1, lr = 1,initialize=True,display_loss=False):
if initialize:
np.random.seed(100)
self.w = np.random.randn(1,X.shape[1])
np.random.seed(100)
self.b = np.random.randn()
accuracy = {}
max_accuracy = 0
wt_matrix = []
for i in range(epochs):
for x, y in zip(X, Y):
y_pred = self.model(x)
if y == 1 and y_pred == 0:
self.w = self.w + lr * x
self.b = self.b - lr * 1
elif y == 0 and y_pred == 1:
self.w = self.w - lr * x
self.b = self.b + lr * 1
if display_loss:
accuracy[i] = accuracy_score(self.predict(X), Y)
if(accuracy[i] > max_accuracy):
max_accuracy = accuracy[i]
if display_loss:
print(max_accuracy)
plt.title("Accuracy")
plt.plot(accuracy.values())
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.ylim([0, 1])
plt.show()<define_variables> | test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
test_data.head() | Titanic - Machine Learning from Disaster |
12,642,986 | sys.path.insert(0,'.. /input/level_4b_train/' )<load_pretrained> | submission = pd.read_csv('.. /input/titanic/gender_submission.csv' ) | Titanic - Machine Learning from Disaster |
12,642,986 | languages = ['ta', 'hi', 'en']
images_train = read_all('.. /input/level_4b_train/'+LEVEL+"/"+"background", key_prefix='bgr_')
for language in languages:
images_train.update(read_all('.. /input/level_4b_train/'+LEVEL+"/"+language, key_prefix=language+"_"))
print(len(images_train))
sys.path.insert(0,'.. /input/level_4b_test/')
images_test = read_all('.. /input/level_4b_test/kaggle_'+LEVEL, key_prefix='')
print(len(images_test))<normalization> | def highlight(value):
if value >= 0.75:
style = 'background-color: green'
elif value >= 0.5:
style = 'background-color: palegreen'
elif value >= 0.25:
style = 'background-color: pink'
else:
style = 'background-color: red'
return style | Titanic - Machine Learning from Disaster |
12,642,986 | def pre_process_image(img):
img = img.reshape(64,64)
ret,th1 = cv2.threshold(img,25,255,cv2.THRESH_BINARY)
th1 = th1.ravel()
return th1<train_model> | women = train_data.loc[train_data.Sex == 'female']['Survived']
rate_women = sum(women)/ len(women)
print('percent of women who survived :', rate_women ) | Titanic - Machine Learning from Disaster |
12,642,986 | X_train_processed = []
for i in range(X_train.shape[0]):
X_train_processed.append(pre_process_image(X_train[i]))
X_train = np.array(X_train_processed )<categorify> | men = train_data.loc[train_data.Sex == 'male']["Survived"]
rate_men = sum(men)/len(men)
print("percent of men who survived :", rate_men ) | Titanic - Machine Learning from Disaster |
12,642,986 | X_test_processed = []
for i in range(X_test.shape[0]):
X_test_processed.append(pre_process_image(X_test[i]))
X_test = np.array(X_test_processed )<split> | train_data['Family_size'] = train_data['SibSp'] + train_data['Parch'] + 1 | Titanic - Machine Learning from Disaster |
12,642,986 | X_sub_train, X_sub_test, y_sub_train, y_sub_test = train_test_split(X_train, Y_train, test_size=0.1, random_state=34,stratify=Y_train )<normalization> | missing_age_value = train_data['Age'].isnull().sum()
percent_of_missing_age_value = missing_age_value / 100
print('% of missing age value =', percent_of_missing_age_value ) | Titanic - Machine Learning from Disaster |
12,642,986 | sc = StandardScaler()
X_sc_sub_train = sc.fit_transform(X_sub_train)
X_sc_sub_test = sc.transform(X_sub_test )<normalization> | train_data['Age_5'] = train_data['Age'] // 5
train_data['Age_5'] = train_data['Age_5'].fillna(mean_age // 5 ).astype('int' ) | Titanic - Machine Learning from Disaster |
12,642,986 | min_max = MinMaxScaler()
X_min_max_sub_train = min_max.fit_transform(X_sub_train)
X_min_max_test = min_max.transform(X_sub_test )<statistical_test> | number_of_unique_age_values = len(train_data['Age_5'].unique())
print(number_of_unique_age_values ) | Titanic - Machine Learning from Disaster |
12,642,986 | sn_mse = SigmoidNeuron()<train_model> | def df_transform(df):
df['Family_size'] = df['SibSp'] + df['Parch'] + 1
df['Age_5'] = df['Age'] // 5
mean_age = df['Age'].mean()
df['Age_5'] = df['Age_5'].fillna(mean_age // 5 ).astype('int')
df['Sex'] = df['Sex'].replace({'male': 0, 'female': 1})
df = df[['Family_size', 'Age_5', 'Sex', 'Pclass']]
return df
| Titanic - Machine Learning from Disaster |
12,642,986 | for learning_rate in [0.1,0.01,0.001,0.0001]:
sn_mse.fit_train_valid(X_sc_sub_train,y_sub_train,X_sc_sub_test,y_sub_test,display_loss=True,epochs=100,learning_rate=learning_rate,loss_fn="mse" )<train_model> | target = train_data.pop('Survived' ) | Titanic - Machine Learning from Disaster |
12,642,986 | for learning_rate in [0.0003,0.0006,0.0007,0.0008]:
sn_mse.fit_train_valid(X_sc_sub_train,y_sub_train,X_sc_sub_test,y_sub_test,display_loss=True,epochs=100,learning_rate=learning_rate,loss_fn="mse" )<train_model> | train_data = df_transform(train_data)
train_data.head() | Titanic - Machine Learning from Disaster |
12,642,986 | sn_mse.fit_train_valid(X_sc_sub_train,y_sub_train,X_sc_sub_test,y_sub_test,display_loss=True,epochs=4500,learning_rate=0.0008,loss_fn="mse" )<predict_on_test> | test_data = df_transform(test_data)
test_data.head() | Titanic - Machine Learning from Disaster |
12,642,986 | accuracy_score(sn_mse.predict(X_sc_sub_train),y_sub_train )<predict_on_test> | model = DecisionTreeClassifier(max_depth=5, random_state=42)
model.fit(train_data, target ) | Titanic - Machine Learning from Disaster |
12,642,986 | accuracy_score(sn_mse.predict(X_sc_sub_test),y_sub_test )<split> | y_train = model.predict(train_data ).astype(int ) | Titanic - Machine Learning from Disaster |
12,642,986 | X_sub_train, X_sub_test, y_sub_train, y_sub_test = train_test_split(X_train, Y_train, test_size=0.1, random_state=10,stratify=Y_train )<compute_test_metric> | confusion_matrix(target, y_train ) | Titanic - Machine Learning from Disaster |
12,642,986 | print(np.mean(y_sub_train),np.mean(y_sub_test))<normalization> | y_pred = model.predict(test_data ).astype(int ) | Titanic - Machine Learning from Disaster |
12,642,986 | scaler = StandardScaler()
X_scaled_train = scaler.fit_transform(X_sub_train)
X_scaled_test = scaler.transform(X_sub_test )<train_model> | submission["Survived"] = y_pred
submission.to_csv('submission.csv', index=False)
submission['Survived'].hist() | Titanic - Machine Learning from Disaster |
12,825,546 | sn_mse = sn_mse = SigmoidNeuron()
sn_mse.fit(X_scaled_train,y_sub_train,display_loss=True,epochs=4500,learning_rate=0.0008,loss_fn="mse" )<predict_on_test> | train = pd.read_csv('/kaggle/input/titanic/train.csv')
train.head() | Titanic - Machine Learning from Disaster |
12,825,546 | def print_accuracy(sn):
Y_pred_train = sn.predict(X_scaled_train)
Y_pred_binarised_train =(Y_pred_train >= 0.5 ).astype("int" ).ravel()
accuracy_train = accuracy_score(Y_pred_binarised_train, y_sub_train)
print("Train Accuracy : ", accuracy_train)
print("-"*50 )<compute_test_metric> | test = pd.read_csv('/kaggle/input/titanic/test.csv')
test.head() | Titanic - Machine Learning from Disaster |
12,825,546 | print_accuracy(sn_mse )<predict_on_test> | Id = test["PassengerId"]
all_data = pd.concat([train,test],ignore_index = True ) | Titanic - Machine Learning from Disaster |
12,825,546 | print(accuracy_score(sn_mse.predict(X_scaled_test),y_sub_test))<normalization> | all_data['Name']
all_data["Title"] = all_data["Name"].apply(lambda x:x.split(",")[1].split(".")[0].strip())
Title_Dict = {}
Title_Dict.update(dict.fromkeys(['Capt', 'Col', 'Major', 'Dr', 'Rev'],"Officer"))
Title_Dict.update(dict.fromkeys(['Don', 'Sir', 'the Countess', 'Dona', 'Lady'], 'Royalty'))
Title_Dict.update(dict.fromkeys(['Mme', 'Ms', 'Mrs'], 'Mrs'))
Title_Dict.update(dict.fromkeys(['Mlle', 'Miss'], 'Miss'))
Title_Dict.update(dict.fromkeys(['Mr'], 'Mr'))
Title_Dict.update(dict.fromkeys(['Master','Jonkheer'], 'Master'))
all_data["Title"] = all_data["Title"].map(Title_Dict ) | Titanic - Machine Learning from Disaster |
12,825,546 | mainScaler = StandardScaler()
X_train_scaled = mainScaler.fit_transform(X_train)
X_test_scaled = mainScaler.transform(X_test )<train_model> | all_data['FamilySize']=all_data['SibSp']+all_data['Parch']+1
def SizeToLabel(s):
if(s>=2)&(s<=4):
return 2
elif(( s>=5)&(s<=7)) |(s == 1):
return 1
elif(s>7):
return 0
all_data["FamilyLabel"] = all_data["FamilySize"].apply(SizeToLabel ) | Titanic - Machine Learning from Disaster |
12,825,546 | sn_mse = SigmoidNeuron()
sn_mse.fit(X_train_scaled,Y_train,display_loss=True,epochs=4500,learning_rate=0.0008,loss_fn="mse" )<compute_test_metric> | all_data["Cabin"] = all_data["Cabin"].fillna("Unknown")
all_data["Deck"] = all_data["Cabin"].str.get(0 ) | Titanic - Machine Learning from Disaster |
12,825,546 | print(accuracy_score(sn_mse.predict(X_train_scaled),Y_train))<save_to_csv> | Tick_Counts = dict(all_data['Ticket'].value_counts())
all_data["TicketGroup"] = all_data["Ticket"].apply(lambda x:Tick_Counts[x] ) | Titanic - Machine Learning from Disaster |
12,825,546 | Y_pred_test = sn_mse.predict(X_test_scaled)
Y_pred_binarised_test =(Y_pred_test >= 0.5 ).astype("int" ).ravel()
submission = {}
submission['ImageId'] = ID_test
submission['Class'] = Y_pred_binarised_test
submission = pd.DataFrame(submission)
submission = submission[['ImageId', 'Class']]
submission = submission.sort_values(['ImageId'])
submission.to_csv("submisision.csv", index=False )<define_variables> | def GroupToLabel(s):
if(s>=2)&(s<=4):
return 2
elif(( s>=5)&(s<=8)) |(s==1):
return 1
elif(s>8):
return 0
all_data["TicketLabel"] = all_data["TicketGroup"].apply(GroupToLabel ) | Titanic - Machine Learning from Disaster |
12,825,546 | np.random.seed(100)
LEVEL = 'level_4a'<compute_test_metric> | def sex(s):
if s=="male":
return 1
else:
return 0
all_data["Sex"] = all_data["Sex"].apply(sex ) | Titanic - Machine Learning from Disaster |
12,825,546 | class SigmoidNeuron:
def __init__(self):
self.w = None
self.b = None
def perceptron(self, x):
return np.dot(x, self.w.T)+ self.b
def sigmoid(self, x):
return 1.0/(1.0 + np.exp(-x))
def grad_w_mse(self, x, y):
y_pred = self.sigmoid(self.perceptron(x))
return(y_pred - y)* y_pred *(1 - y_pred)* x
def grad_b_mse(self, x, y):
y_pred = self.sigmoid(self.perceptron(x))
return(y_pred - y)* y_pred *(1 - y_pred)
def grad_w_ce(self, x, y):
y_pred = self.sigmoid(self.perceptron(x))
if y == 0:
return y_pred * x
elif y == 1:
return -1 *(1 - y_pred)* x
else:
raise ValueError("y should be 0 or 1")
def grad_b_ce(self, x, y):
y_pred = self.sigmoid(self.perceptron(x))
if y == 0:
return y_pred
elif y == 1:
return -1 *(1 - y_pred)
else:
raise ValueError("y should be 0 or 1")
def fit(self, X, Y, epochs=1, learning_rate=1, initialise=True, loss_fn="mse", display_loss=False):
if initialise:
self.w = np.random.randn(1, X.shape[1])
self.b = 0
if display_loss:
loss = {}
for i in tqdm_notebook(range(epochs), total=epochs, unit="epoch"):
dw = 0
db = 0
for x, y in zip(X, Y):
if loss_fn == "mse":
dw += self.grad_w_mse(x, y)
db += self.grad_b_mse(x, y)
elif loss_fn == "ce":
dw += self.grad_w_ce(x, y)
db += self.grad_b_ce(x, y)
self.w -= learning_rate * dw
self.b -= learning_rate * db
if display_loss:
Y_pred = self.sigmoid(self.perceptron(X))
if loss_fn == "mse":
loss[i] = mean_squared_error(Y, Y_pred)
elif loss_fn == "ce":
loss[i] = log_loss(Y, Y_pred)
if display_loss:
plt.plot(loss.values())
plt.xlabel('Epochs')
if loss_fn == "mse":
plt.ylabel('Mean Squared Error')
elif loss_fn == "ce":
plt.ylabel('Log Loss')
plt.show()
def predict(self, X):
Y_pred = []
for x in X:
y_pred = self.sigmoid(self.perceptron(x))
Y_pred.append(y_pred)
return np.array(Y_pred )<load_pretrained> | def title(s):
if s=="Officer":
return 0
elif s=="Mr":
return 1
elif s=="Mrs":
return 2
elif s=="Miss":
return 3
elif s=="Master":
return 4
elif s=="Royalty":
return 5
all_data["Title"] = all_data["Title"].apply(title ) | Titanic - Machine Learning from Disaster |
12,825,546 | languages = ['ta', 'hi', 'en']
images_train = read_all(".. /input/level_4b_train/level_4b/background/", key_prefix='bgr_')
for language in languages:
images_train.update(read_all(".. /input/level_4b_train/level_4b/"+language, key_prefix=language+"_"))
print(len(images_train))
images_test = read_all(".. /input/level_4b_test/kaggle_level_4b", key_prefix='')
print(len(images_test))<normalization> | def embarked(s):
if s=="S":
return 0
elif s=="C":
return 1
elif s=="Q":
return 2
all_data["Embarked"] = all_data["Embarked"].apply(embarked ) | Titanic - Machine Learning from Disaster |
12,825,546 | scaler = MinMaxScaler()
X_scaled_train = scaler.fit_transform(X_train)
X_scaled_test = scaler.transform(X_test )<train_model> | Y_missing = all_data.loc[:,"Age"]
nameList = ["Sex","Pclass","SibSp","Parch","Title"]
X_missing = all_data.loc[:,nameList]
Ytrain = Y_missing[Y_missing.notnull() ]
Ytest = Y_missing[Y_missing.isnull() ]
Xtrain = X_missing.iloc[Ytrain.index,:]
Xtest = X_missing.iloc[Ytest.index,:] | Titanic - Machine Learning from Disaster |
12,825,546 | sn_ce = SigmoidNeuron()
sn_ce.fit(X_scaled_train, Y_train, epochs=100, learning_rate=0.015, loss_fn="ce", display_loss=True )<predict_on_test> | rfc = RandomForestRegressor(n_estimators=100)
rfc = rfc.fit(Xtrain, Ytrain)
Ypredict = rfc.predict(Xtest)
all_data.loc[all_data.loc[:,"Age"].isnull() ,"Age"] = Ypredict
all_data["Age"] = all_data["Age"].apply(lambda x:round(x))
nameList = ["Sex","Pclass","Fare","Title","Age","FamilyLabel","TicketLabel","Embarked"]
df_data = all_data.loc[:,nameList]
df_data = SimpleImputer(missing_values=np.nan,strategy='constant',fill_value=0 ).fit_transform(df_data)
df_data = pd.DataFrame(df_data)
df_data.info() | Titanic - Machine Learning from Disaster |
12,825,546 | def print_accuracy(sn):
Y_pred_train = sn.predict(X_scaled_train)
Y_pred_binarised_train =(Y_pred_train >= 0.5 ).astype("int" ).ravel()
accuracy_train = accuracy_score(Y_pred_binarised_train, Y_train)
print("Train Accuracy : ", accuracy_train)
print("-"*50 )<compute_test_metric> | Y = all_data.loc[:,"Survived"]
Y_predict = Y[Y.isnull() ]
Y_train = Y[Y.notnull() ]
X_train = df_data.iloc[Y_train.index,:]
X_predict = df_data.iloc[Y_predict.index,:] | Titanic - Machine Learning from Disaster |
12,825,546 | print_accuracy(sn_ce )<save_to_csv> | rfc = RandomForestClassifier(n_estimators=100,random_state=90)
score_pre = cross_val_score(rfc,X_train,Y_train,cv=10 ).mean()
score_pre | Titanic - Machine Learning from Disaster |
12,825,546 | Y_pred_test = sn_ce.predict(X_scaled_test)
Y_pred_binarised_test =(Y_pred_test >= 0.5 ).astype("int" ).ravel()
submission = {}
submission['ImageId'] = ID_test
submission['Class'] = Y_pred_binarised_test
submission = pd.DataFrame(submission)
submission = submission[['ImageId', 'Class']]
submission = submission.sort_values(['ImageId'])
submission.to_csv("submisision.csv", index=False )<load_from_csv> | score_list=[]
for i in range(0,200,10):
rfc = RandomForestClassifier(n_estimators=i+1,random_state=90,n_jobs=-1)
score = cross_val_score(rfc,X_train,Y_train,cv=10 ).mean()
score_list.append(score)
print(max(score_list),(score_list.index(max(score_list)) *10)+1 ) | Titanic - Machine Learning from Disaster |
12,825,546 | assoc_df = pd.read_csv('.. /input/market-basket-id-ndsc-2020/association_order.csv')
rules_df = pd.read_csv('.. /input/market-basket-id-ndsc-2020/rules.csv' )<merge> | score_list=[]
for i in range(25,40):
rfc = RandomForestClassifier(n_estimators=i, random_state=90, n_jobs=-1)
score = cross_val_score(rfc,X_train,Y_train,cv=10 ).mean()
score_list.append(score)
print(max(score_list),([*range(25,40)][score_list.index(max(score_list)) ])) | Titanic - Machine Learning from Disaster |
12,825,546 | def get_assoc(*args):
orderid = [int(id)for id in args]
assoc_conds = [assoc_df.loc[(assoc_df['itemid'] == cond)]['orderid'] for cond in orderid]
assoc_conds = [set(assoc_cond.values.tolist())for assoc_cond in assoc_conds]
intersection_val = assoc_conds[0].intersection(*assoc_conds[1:])
return len(intersection_val)
<string_transform> | param_grid = {"max_depth":np.arange(1,20,1)}
rfc = RandomForestClassifier(n_estimators=30, random_state=90)
GS = GridSearchCV(rfc,param_grid,cv=10)
GS.fit(X_train,Y_train)
GS.best_params_ | Titanic - Machine Learning from Disaster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.