kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
12,850,197 | train=train.join(total_revenue,on=["date_block_num","shop_id"],how="left")
train["total_shop_revenue"]=train["total_shop_revenue"].astype(np.float32)
train.fillna(0,inplace=True)
train.head().T<drop_column> | def fare_cat(fare):
if fare <= 7.0:
return 1
elif fare <= 39 and fare > 7.0:
return 2
else:
return 3
full_df.loc[:, 'Fare_Cat'] = full_df['Fare'].apply(fare_cat ).astype('int' ) | Titanic - Machine Learning from Disaster |
12,850,197 | %%time
train,n_col=create_lag_features(train,"total_shop_revenue")
train=train.fillna(0.0)
train=train.drop(["total_shop_revenue"],axis=1)
numeric=numeric+n_col<groupby> | full_df.loc[:, 'Fare_Family_Size'] = full_df['Fare']/full_df['Family_Size']
full_df.loc[:, 'Fare_Cat_Pclass'] = full_df['Fare_Cat']*full_df['Pclass']
full_df.loc[:, 'Fare_Cat_Title'] = full_df['Fare_Cat']*full_df['Title']
full_df.loc[:, 'Fsize_Cat_Title'] = full_df['Fsize_Cat']*full_df['Title']
full_df.loc[:, 'Fsize_Cat_Fare_Cat'] = full_df['Fare_Cat']/full_df['Fsize_Cat'].astype('int')
full_df.loc[:, 'Pclass_Title'] = full_df['Pclass']*full_df['Title']
full_df.loc[:, 'Fsize_Cat_Pclass'] = full_df['Fsize_Cat']*full_df['Pclass'] | Titanic - Machine Learning from Disaster |
12,850,197 | groups = train.groupby(train.date_block_num ).groups
sorted_groups = [value for(key, value)in sorted(groups.items())]
cv=[(np.concatenate(sorted_groups[:8]),np.concatenate(sorted_groups[8:])) ,
(np.concatenate(sorted_groups[:16]),np.concatenate(sorted_groups[16:])) ,
(np.concatenate(sorted_groups[:24]),np.concatenate(sorted_groups[24:])) ]<prepare_x_and_y> | colsToRemove = []
cols = ['Tkt_AQ', 'Tkt_AS', 'Tkt_C', 'Tkt_CA',
'Tkt_CASOTON', 'Tkt_FC', 'Tkt_FCC', 'Tkt_Fa', 'Tkt_LINE', 'Tkt_LP',
'Tkt_NUM', 'Tkt_PC', 'Tkt_PP', 'Tkt_PPP', 'Tkt_SC', 'Tkt_SCA',
'Tkt_SCAH', 'Tkt_SCAHBasle', 'Tkt_SCOW', 'Tkt_SCPARIS', 'Tkt_SCParis',
'Tkt_SOC', 'Tkt_SOP', 'Tkt_SOPP', 'Tkt_SOTONO', 'Tkt_SOTONOQ',
'Tkt_SP', 'Tkt_STONO', 'Tkt_STONOQ', 'Tkt_SWPP', 'Tkt_WC',
'Tkt_WEP', 'Fare_Cat', 'Fare_Family_Size', 'Fare_Cat_Pclass',
'Fare_Cat_Title', 'Fsize_Cat_Title', 'Fsize_Cat_Fare_Cat',
'Pclass_Title', 'Fsize_Cat_Pclass']
for col in cols:
if full_df[col][:train_shape[0]].std() == 0:
colsToRemove.append(col)
full_df.drop(colsToRemove, axis=1, inplace=True)
print("Removed `{}` Constant Columns
".format(len(colsToRemove)))
print(colsToRemove ) | Titanic - Machine Learning from Disaster |
12,850,197 | y_train=train["item_cnt_day"]<data_type_conversions> | imp_features = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Title',
'Name_Length', 'Emb_C', 'Emb_Q', 'Emb_S','Family_Size',
'Fsize_Cat', 'Family_Friends_Surv_Rate', 'Surv_Rate_Invalid',
'Cabin_Clean','Ticket_Frequency', 'Tkt_AS', 'Tkt_C', 'Tkt_CA',
'Tkt_CASOTON', 'Tkt_FC', 'Tkt_FCC', 'Tkt_Fa', 'Tkt_LINE',
'Tkt_NUM', 'Tkt_PC', 'Tkt_PP', 'Tkt_PPP', 'Tkt_SC', 'Tkt_SCA',
'Tkt_SCAH', 'Tkt_SCAHBasle', 'Tkt_SCOW', 'Tkt_SCPARIS', 'Tkt_SCParis',
'Tkt_SOC', 'Tkt_SOP', 'Tkt_SOPP', 'Tkt_SOTONO', 'Tkt_SOTONOQ',
'Tkt_SP', 'Tkt_STONO', 'Tkt_SWPP', 'Tkt_WC',
'Tkt_WEP', 'Fare_Cat', 'Fare_Family_Size', 'Fare_Cat_Pclass',
'Fare_Cat_Title', 'Fsize_Cat_Title', 'Fsize_Cat_Fare_Cat',
'Pclass_Title', 'Fsize_Cat_Pclass']
imputer = KNNImputer(n_neighbors=10, missing_values=np.nan)
imputer.fit(full_df[imp_features] ) | Titanic - Machine Learning from Disaster |
12,850,197 | X_train_categorical=train[categorical]
X_test_categorical=test_df[categorical]
X_train_categorical["subcategory"]=X_train_categorical["subcategory"].astype(str)
X_test_categorical["subcategory"]=X_test_categorical["subcategory"].astype(str)
X_test_categorical["year"]=X_test_categorical["year"].astype(np.int16)
X_test_categorical["month"]=X_test_categorical["month"].astype(np.int8)
X_test_categorical.loc[X_test_categorical.category=="PC ","category"]="Игры PC "
X_test_categorical.loc[X_test_categorical.subcategory==" Гарнитуры/Наушники","subcategory"]=" Аксессуары для игр"<categorify> | full_df.loc[:, imp_features] = pd.DataFrame(imputer.transform(full_df[imp_features]), index=full_df.index, columns = imp_features ) | Titanic - Machine Learning from Disaster |
12,850,197 | for feature in categorical:
le=LabelEncoder()
print(feature)
X_train_categorical[feature]=le.fit_transform(X_train_categorical[feature])
X_test_categorical[feature]=le.transform(X_test_categorical[feature] )<prepare_x_and_y> | df_train_final = full_df[:train_shape[0]]
df_test_final = full_df[train_shape[0]:] | Titanic - Machine Learning from Disaster |
12,850,197 | X_train_numeric=train[numeric]
X_test_numeric=test_df[numeric]<concatenate> | viz_features = ['PassengerId', 'Survived', 'Pclass', 'Name', 'Sex',
'Age', 'SibSp','Parch', 'Ticket', 'Fare', 'Cabin',
'Title', 'Name_Length', 'Family_Friends_Surv_Rate',
'Ticket_Frequency']
train_viz = df_train_final[viz_features] | Titanic - Machine Learning from Disaster |
12,850,197 | label_cat_not_num_train=pd.concat([X_train_categorical,X_train_numeric],axis=1)
label_cat_not_num_train.head().T<concatenate> | full_df.to_pickle("full_df")
df_train_final.to_pickle("df_train_final")
df_test_final.to_pickle("df_test_final" ) | Titanic - Machine Learning from Disaster |
12,850,197 | label_cat_not_num_test=pd.concat([X_test_categorical,X_test_numeric],axis=1)
label_cat_not_num_test.head().T<data_type_conversions> | scaler_cols = ['Age', 'Fare', 'Name_Length', 'Family_Size', 'Name_Length',
'Ticket_Frequency', 'Fare_Family_Size', 'Fare_Cat_Pclass']
std = StandardScaler()
std.fit(df_train_final[scaler_cols])
df_train_final.loc[:, scaler_cols] = pd.DataFrame(std.transform(df_train_final[scaler_cols]), index=df_train_final.index, columns = scaler_cols)
df_test_final.loc[:, scaler_cols] = pd.DataFrame(std.transform(df_test_final[scaler_cols]), index=df_test_final.index, columns = scaler_cols ) | Titanic - Machine Learning from Disaster |
12,850,197 | def downcast_type(df):
for feature in categorical:
df[feature]=df[feature].astype(np.int8 )<compute_test_metric> | features = ['Pclass', 'Sex', 'Age', 'Fare', 'Title', 'Name_Length', 'Emb_C',
'Emb_Q', 'Emb_S', 'Title_Master', 'Title_Miss', 'Title_Mr', 'Title_Mrs',
'Title_Other', 'Title_Royal', 'Family_Size', 'Fsize_Cat',
'Family_Friends_Surv_Rate', 'Surv_Rate_Invalid', 'Cabin_Clean',
'Ticket_Frequency', 'Tkt_AS', 'Tkt_C', 'Tkt_CA',
'Tkt_CASOTON', 'Tkt_FC', 'Tkt_FCC', 'Tkt_Fa', 'Tkt_LINE',
'Tkt_NUM', 'Tkt_PC', 'Tkt_PP', 'Tkt_PPP', 'Tkt_SC', 'Tkt_SCA',
'Tkt_SCAH', 'Tkt_SCAHBasle', 'Tkt_SCOW', 'Tkt_SCPARIS', 'Tkt_SCParis',
'Tkt_SOC', 'Tkt_SOP', 'Tkt_SOPP', 'Tkt_SOTONO', 'Tkt_SOTONOQ', 'Tkt_SP',
'Tkt_STONO', 'Tkt_SWPP', 'Tkt_WC', 'Tkt_WEP', 'Fare_Cat',
'Fare_Family_Size', 'Fare_Cat_Pclass', 'Fare_Cat_Title',
'Fsize_Cat_Title', 'Fsize_Cat_Fare_Cat', 'Pclass_Title',
'Fsize_Cat_Pclass', 'Child', 'Senior']
features_train = ['Survived', 'Pclass', 'Sex', 'Age', 'Fare', 'Title', 'Name_Length', 'Emb_C',
'Emb_Q', 'Emb_S', 'Title_Master', 'Title_Miss', 'Title_Mr', 'Title_Mrs',
'Title_Other', 'Title_Royal', 'Family_Size', 'Fsize_Cat',
'Family_Friends_Surv_Rate', 'Surv_Rate_Invalid', 'Cabin_Clean',
'Ticket_Frequency', 'Tkt_AS', 'Tkt_C', 'Tkt_CA',
'Tkt_CASOTON', 'Tkt_FC', 'Tkt_FCC', 'Tkt_Fa', 'Tkt_LINE',
'Tkt_NUM', 'Tkt_PC', 'Tkt_PP', 'Tkt_PPP', 'Tkt_SC', 'Tkt_SCA',
'Tkt_SCAH', 'Tkt_SCAHBasle', 'Tkt_SCOW', 'Tkt_SCPARIS', 'Tkt_SCParis',
'Tkt_SOC', 'Tkt_SOP', 'Tkt_SOPP', 'Tkt_SOTONO', 'Tkt_SOTONOQ', 'Tkt_SP',
'Tkt_STONO', 'Tkt_SWPP', 'Tkt_WC', 'Tkt_WEP', 'Fare_Cat',
'Fare_Family_Size', 'Fare_Cat_Pclass', 'Fare_Cat_Title',
'Fsize_Cat_Title', 'Fsize_Cat_Fare_Cat', 'Pclass_Title',
'Fsize_Cat_Pclass', 'Child', 'Senior']
df_train_final = df_train_final[features_train]
df_test_final = df_test_final[features] | Titanic - Machine Learning from Disaster |
12,850,197 | def RMSE(y,predictions):
return np.sqrt(mean_squared_error(y,predictions))
scorer=make_scorer(RMSE,False )<compute_train_metric> | corr_mat = df_train_final.astype(float ).corr()
corr_mat_fil = corr_mat.loc[:, 'Survived'].sort_values(ascending=False)
corr_mat_fil = pd.DataFrame(data=corr_mat_fil[1:] ) | Titanic - Machine Learning from Disaster |
12,850,197 | %%time
baseline = -cross_val_score(
XGBRegressor(max_depth=10,
subsample=0.8,
colsample_bytree=0.9,
colsample_bylevel=0.7,
min_child_weight=200,
n_estimators=1000,
learning_rate=0.025,
objective="reg:squarederror",
tree_method="hist"), label_cat_not_num_train, y_train, scoring=scorer,cv=cv
).mean()
print(baseline )<choose_model_class> | features = df_test_final.columns.to_list()
X_train = df_train_final[features]
Y_train = df_train_final['Survived']
X_test = df_test_final | Titanic - Machine Learning from Disaster |
12,850,197 | %%time
estimator=XGBRegressor(max_depth=10,
subsample=0.8,
colsample_bytree=0.9,
colsample_bylevel=0.7,
min_child_weight=200,
n_estimators=1000,
learning_rate=0.025,
objective="reg:squarederror",
tree_method="hist")
estimator.fit(label_cat_not_num_train,y_train)
importances=estimator.feature_importances_
predictions=estimator.predict(label_cat_not_num_test )<prepare_output> | from sklearn.model_selection import cross_val_predict, cross_val_score, cross_validate
from sklearn.model_selection import StratifiedKFold, StratifiedShuffleSplit
from sklearn.metrics import confusion_matrix, roc_curve
from sklearn.metrics import precision_score, recall_score, f1_score
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from tensorflow.keras.models import Sequential
from tensorflow.keras.utils import plot_model
from tensorflow.keras.layers import Input, Dense, Dropout, AlphaDropout, BatchNormalization,Concatenate, concatenate
from tensorflow.keras.optimizers import SGD, RMSprop, Adamax, Adagrad, Adam, Nadam, SGD
from tensorflow.keras.regularizers import l2
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping
from tensorflow.keras.metrics import * | Titanic - Machine Learning from Disaster |
12,850,197 | sample_submissions["item_cnt_month"]=predictions
sample_submissions.head()<save_to_csv> | metrics = ['accuracy',
Precision() ,
Recall() ] | Titanic - Machine Learning from Disaster |
12,850,197 | sample_submissions.to_csv("xgboost_lagged_features_6.csv",index=False )<set_options> | def create_model() :
model = Sequential()
model.add(Input(shape=X_train.shape[1], name='Input_'))
model.add(Dense(8, activation='relu', kernel_initializer='glorot_normal', kernel_regularizer=l2(0.001)))
model.add(Dense(16, activation='relu', kernel_initializer='glorot_normal', kernel_regularizer=l2(0.1)))
model.add(Dropout(0.5))
model.add(Dense(16, activation='relu', kernel_initializer='glorot_normal', kernel_regularizer=l2(0.1)))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid', kernel_initializer='glorot_normal'))
model.summary()
optimize = Adam(lr = 0.0001)
model.compile(optimizer = optimize,
loss = 'binary_crossentropy',
metrics = metrics)
return model | Titanic - Machine Learning from Disaster |
12,850,197 | %matplotlib inline<load_from_csv> | estimator = KerasClassifier(build_fn = create_model, epochs = 600, batch_size = 32, verbose = 1)
kfold = StratifiedKFold(n_splits = 3)
results = cross_val_score(estimator, X_train, Y_train, cv = kfold ) | Titanic - Machine Learning from Disaster |
12,850,197 | shops = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/shops.csv')
items = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/items.csv')
catgs = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/item_categories.csv')
sales = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sales_train.csv')
testd = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/test.csv')
sampl = pd.read_csv('/kaggle/input/competitive-data-science-predict-future-sales/sample_submission.csv' )<count_missing_values> | train_history = estimator.fit(X_train, Y_train, epochs = 600, batch_size = 32 ) | Titanic - Machine Learning from Disaster |
12,850,197 | <feature_engineering><EOS> | y_preds = estimator.predict(X_test)
submission = pd.read_csv(".. /input/titanic/gender_submission.csv", index_col='PassengerId')
submission['Survived'] = y_preds.astype(int)
submission.to_csv('submission.csv' ) | Titanic - Machine Learning from Disaster |
11,482,088 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<feature_engineering> | %matplotlib inline
sns.set_style('dark')
sns.set_palette('Set2')
| Titanic - Machine Learning from Disaster |
11,482,088 | sales.loc[sales.shop_id == 0, 'shop_id'] = 57
testd.loc[testd.shop_id == 0, 'shop_id'] = 57
sales.loc[sales.shop_id == 1, 'shop_id'] = 58
testd.loc[testd.shop_id == 1, 'shop_id'] = 58
sales.loc[sales.shop_id == 10, 'shop_id'] = 11
testd.loc[testd.shop_id == 10, 'shop_id'] = 11
sales.loc[sales.shop_id == 39, 'shop_id'] = 40
testd.loc[testd.shop_id == 39, 'shop_id'] = 40<feature_engineering> | train_data = pd.read_csv('.. /input/titanic/train.csv')
test_data = pd.read_csv('.. /input/titanic/test.csv')
data = pd.concat([train_data, test_data] ).reset_index().drop(['index'], axis=1 ) | Titanic - Machine Learning from Disaster |
11,482,088 | shops.loc[shops.shop_name == 'Сергиев Посад ТЦ "7Я"', 'shop_name'] = 'СергиевПосад ТЦ "7Я"'
shops['shop_category'] = shops['shop_name'].str.split(' ' ).map(lambda x:x[1] ).astype(str)
categories = ['Орджоникидзе,', 'ТЦ', 'ТРК', 'ТРЦ','ул.', 'Магазин', 'ТК', 'склад']
shops.shop_category = shops.shop_category.apply(lambda x: x if(x in categories)else 'etc')
shops.shop_category.unique()<groupby> | data['Surname'] = data['Name'].apply(lambda x: x.split(',')[0] ) | Titanic - Machine Learning from Disaster |
11,482,088 | shops.groupby(['shop_category'] ).sum()<categorify> | data['Ticket_id'] = 'new_col'
def ticket_id(row):
row['Ticket_id'] = str(row.Pclass)+ '-' + str(row.Ticket)[:-1] + '-' + str(row.Fare)+ '-' + str(row.Embarked)
return row
data = data.apply(ticket_id, axis='columns' ) | Titanic - Machine Learning from Disaster |
11,482,088 | category = ['ТЦ', 'ТРК', 'ТРЦ', 'ТК']
shops.shop_category = shops.shop_category.apply(lambda x: x if(x in category)else 'etc')
print('Category Distribution', shops.groupby(['shop_category'] ).sum())
shops['shop_category_code'] = LabelEncoder().fit_transform(shops['shop_category'] )<categorify> | data['Group_id'] = 'new_col2'
def group_id(row):
row['Group_id'] = str(row.Surname)+ '-' + str(row.Ticket_id)
return row
data = data.apply(group_id, axis='columns' ) | Titanic - Machine Learning from Disaster |
11,482,088 | shops['city'] = shops['shop_name'].str.split(' ' ).map(lambda x: x[0])
shops.loc[shops.city == '!Якутск', 'city'] = 'Якутск'
shops['city_code'] = LabelEncoder().fit_transform(shops['city'])
shops = shops[['shop_id','city_code', 'shop_category_code']]
shops.head()<count_unique_values> | data['Title'] = 'man'
data.loc[data.Sex == 'female', 'Title'] = 'woman'
data.loc[data['Name'].str.contains('Master'), 'Title'] = 'boy' | Titanic - Machine Learning from Disaster |
11,482,088 | print(len(catgs.item_category_name.unique()))
catgs.item_category_name.unique()<feature_engineering> | data.loc[data.Title == 'man', 'Group_id'] = 'noGroup'
data['WC_count'] = data.loc[data.Title != 'man'].groupby('Group_id')['Group_id'].transform('count')
data.loc[data.WC_count <=1, 'Group_id'] = 'noGroup' | Titanic - Machine Learning from Disaster |
11,482,088 | catgs['type'] = catgs.item_category_name.apply(lambda x: x.split(' ')[0] ).astype(str)
catgs.loc[(catgs.type == 'Игровые')|(catgs.type == 'Аксессуары'), 'category'] = 'Игры'
catgs.loc[catgs.type == 'PC', 'category'] = 'Музыка'
category = ['Игры', 'Карты', 'Кино', 'Книги','Музыка', 'Подарки', 'Программы', 'Служебные', 'Чистые', 'Аксессуары']
catgs['type'] = catgs.type.apply(lambda x: x if(x in category)else 'etc')
print(catgs.groupby(['type'] ).sum())
catgs['type_code'] = LabelEncoder().fit_transform(catgs['type'])
catgs['split'] = catgs.item_category_name.apply(lambda x: x.split('-'))
catgs['subtype'] = catgs['split'].map(lambda x: x[1].strip() if len(x)> 1 else x[0].strip())
catgs['subtype_code'] = LabelEncoder().fit_transform(catgs['subtype'])
catgs = catgs[['item_category_id','type_code', 'subtype_code']]
catgs.head()<feature_engineering> | cols = ['PassengerId', 'Survived', 'Name', 'Title', 'Ticket_id','Group_id']
data.loc[(data.Ticket_id == '1-1696-134.5-C')&(data.Title != 'man'), cols] | Titanic - Machine Learning from Disaster |
11,482,088 | sales['date'] = pd.to_datetime(sales['date'], format='%d.%m.%Y')
sales['month'] = sales['date'].dt.month
sales['year'] = sales['date'].dt.year
sales = sales.drop(columns=['date'])
to_append = testd[['shop_id', 'item_id']].copy()
to_append['date_block_num'] = sales['date_block_num'].max() + 1
to_append['year'] = 2015
to_append['month'] = 11
to_append['item_cnt_day'] = 0
to_append['item_price'] = 0
sales = pd.concat([sales, to_append], ignore_index=True, sort=False)
sales.head()<feature_engineering> | indices = []
count = 0
for i in range(0,1309):
if(data.loc[i,'Title'] != 'man')&(data.loc[i,'Group_id'] == 'noGroup'):
data.loc[i,'Group_id'] = data.loc[(data['Ticket_id'] == data.loc[i, 'Ticket_id'])&(data.Title != 'man'), 'Group_id'].iloc[0]
if(data.loc[i, 'Group_id'] != 'noGroup'):
indices.append(i)
count += 1
print('{:d} passengers were added to an existing group'.format(count)) | Titanic - Machine Learning from Disaster |
11,482,088 | period = sales[['date_block_num', 'year', 'month']].drop_duplicates().reset_index(drop=True)
period['days'] = period.apply(lambda r: monthrange(r.year, r.month)[1], axis=1)
sales = sales.drop(columns=['month', 'year'])
period.head()<merge> | cols = ['PassengerId', 'Survived', 'Name', 'Title', 'Group_id']
data.loc[indices, cols] | Titanic - Machine Learning from Disaster |
11,482,088 | data = pd.merge(grid, shops, on='shop_id')
data = pd.merge(data, items, on='item_id')
data = pd.merge(data, catgs, on='item_category_id')
data = pd.merge(data, period, on='date_block_num')
data = data[['date_block_num', 'year', 'month', 'days', 'city_code', 'shop_category_code', 'shop_id', 'item_category_id', 'type_code', 'subtype_code', 'item_id']]
for c in ['date_block_num', 'month', 'days', 'city_code', 'shop_category_code', 'shop_id', 'item_category_id', 'type_code', 'subtype_code']:
data[c] = data[c].astype(np.int8)
data['item_id'] = data['item_id'].astype(np.int16)
data['year'] = data['year'].astype(np.int16)
del grid, shops, items, catgs, to_append
data.head()<data_type_conversions> | number_of_groups = data.loc[data.Group_id != 'noGroup', 'Group_id'].nunique()
print('Number of groups found: {:d}'.format(number_of_groups))
number_of_WCG_passengers = data.loc[data.Group_id != 'noGroup', 'Group_id'].count()
print('
Number of passengers in a group: {:d}'.format(number_of_WCG_passengers))
composition = data.loc[data.Group_id != 'noGroup','Title'].value_counts()
print('
Composition of the groups:')
print(composition.to_string() ) | Titanic - Machine Learning from Disaster |
11,482,088 | aux = sales\
.groupby(['date_block_num', 'shop_id', 'item_id'], as_index=False)\
.agg({'item_cnt_day' : 'sum', 'item_price' : 'mean'})\
.rename(columns= {'item_cnt_day' : 'item_cnt_month', 'item_price' : 'item_price_month'})
aux['item_cnt_month'] = aux['item_cnt_month'].astype(np.float16)
aux['item_price_month'] = aux['item_price_month'].astype(np.float16)
month_summary = pd.merge(data, aux, how='left', on=['date_block_num', 'shop_id', 'item_id'])\
.fillna(0.0 ).sort_values(by=['shop_id', 'item_id', 'date_block_num'])
del data, aux
month_summary.head()<feature_engineering> | data['WCSurvived'] = data.loc[(data.Title != 'man')&(data.Group_id != 'noGroup')].groupby('Group_id' ).Survived.transform('mean' ) | Titanic - Machine Learning from Disaster |
11,482,088 | month_summary['item_cnt_month'] = month_summary['item_cnt_month'].clip(0,20 )<categorify> | cols = ['PassengerId', 'Survived', 'WCSurvived', 'Name', 'Title', 'Group_id']
data.loc[data.Group_id == 'Sage-3-CA.234-69.55-S', cols] | Titanic - Machine Learning from Disaster |
11,482,088 | def agg_by(month_summary, group_cols, new_col, target_col = 'item_cnt_month', agg_func = 'mean'):
aux = month_summary\
.groupby(group_cols, as_index=False)\
.agg({target_col : agg_func})\
.rename(columns= {target_col : new_col})
aux[new_col] = aux[new_col].astype(np.float16)
return pd.merge(month_summary, aux, how='left', on=group_cols)
def lag_feature(df, col, lags=[1,2,3,6,12]):
tmp = df[['date_block_num','shop_id','item_id', col]]
for i in lags:
shifted = tmp.copy()
cols = ['date_block_num','shop_id','item_id', '{}_lag_{}'.format(col, i)]
shifted.columns = cols
shifted['date_block_num'] += i
df = pd.merge(df, shifted, on=['date_block_num','shop_id','item_id'], how='left' ).fillna(value={(cols[-1]): 0.0})
return df
def agg_by_and_lag(month_summary, group_cols, new_col, lags=[1,2,3,6,12], target_col = 'item_cnt_month', agg_func = 'mean'):
tmp = agg_by(month_summary, group_cols, new_col, target_col, agg_func)
tmp = lag_feature(tmp, new_col, lags)
return tmp.drop(columns=[new_col] )<groupby> | data.loc[(data.WCSurvived==0.75)|(data.WCSurvived==0.5), cols].sort_values(by='Group_id' ) | Titanic - Machine Learning from Disaster |
11,482,088 | month_summary = agg_by_and_lag(month_summary, ['date_block_num'], 'date_avg_item_cnt', [1])
month_summary = agg_by_and_lag(month_summary, ['date_block_num', 'item_id'], 'date_item_avg_item_cnt', [1,2,3,6,12])
month_summary = agg_by_and_lag(month_summary, ['date_block_num', 'city_code'], 'date_city_avg_item_cnt', [1])
month_summary = agg_by_and_lag(month_summary, ['date_block_num', 'shop_id'], 'date_shop_avg_item_cnt', [1,2,3,6,12])
month_summary = agg_by_and_lag(month_summary, ['date_block_num', 'item_category_id'], 'date_cat_avg_item_cnt', [1])
month_summary = agg_by_and_lag(month_summary, ['date_block_num', 'type_code'], 'date_type_avg_item_cnt', [1])
month_summary = agg_by_and_lag(month_summary, ['date_block_num', 'subtype_code'], 'date_subtype_avg_item_cnt', [1])
month_summary = agg_by_and_lag(month_summary, ['date_block_num', 'shop_category_code'], 'date_shop_category_avg_item_cnt', [1])
month_summary = agg_by_and_lag(month_summary, ['date_block_num', 'shop_id', 'item_category_id'], 'date_shop_cat_avg_item_cnt', [1])
month_summary = agg_by_and_lag(month_summary, ['date_block_num', 'shop_id', 'type_code'], 'date_shop_type_avg_item_cnt', [1])
month_summary = agg_by_and_lag(month_summary, ['date_block_num', 'shop_id', 'subtype_code'], 'date_shop_subtype_avg_item_cnt', [1])
month_summary = agg_by_and_lag(month_summary, ['date_block_num', 'shop_category_code', 'subtype_code'], 'date_shop_category_subtype_avg_item_cnt', [1])
month_summary = agg_by_and_lag(month_summary, ['date_block_num', 'city_code', 'item_id'], 'date_item_city_avg_item_cnt', [1] )<groupby> | data.loc[data.Group_id.isin(test_groups), 'WCSurvived'] = 0
data.loc[(data.Group_id.isin(test_groups)) &(data.Pclass != 3), 'WCSurvived'] = 1 | Titanic - Machine Learning from Disaster |
11,482,088 | month_summary = agg_by_and_lag(month_summary, ['date_block_num'], 'date_avg_item_price', [1], 'item_price_month')
month_summary = agg_by_and_lag(month_summary, ['date_block_num', 'item_id'], 'date_item_avg_item_price', [1,2,3,6,12], 'item_price_month')
month_summary = agg_by_and_lag(month_summary, ['date_block_num', 'city_code'], 'date_city_avg_item_price', [1], 'item_price_month')
month_summary = agg_by_and_lag(month_summary, ['date_block_num', 'shop_id'], 'date_shop_avg_item_price', [1,2,3,6,12], 'item_price_month')
month_summary = agg_by_and_lag(month_summary, ['date_block_num', 'item_category_id'], 'date_cat_avg_item_price', [1], 'item_price_month')
month_summary = agg_by_and_lag(month_summary, ['date_block_num', 'type_code'], 'date_type_avg_item_price', [1], 'item_price_month')
month_summary = agg_by_and_lag(month_summary, ['date_block_num', 'subtype_code'], 'date_subtype_avg_item_price', [1], 'item_price_month')
month_summary = agg_by_and_lag(month_summary, ['date_block_num', 'shop_category_code'], 'date_shop_category_avg_item_price', [1], 'item_price_month')
month_summary = agg_by_and_lag(month_summary, ['date_block_num', 'shop_id', 'item_category_id'], 'date_shop_cat_avg_item_price', [1], 'item_price_month')
month_summary = agg_by_and_lag(month_summary, ['date_block_num', 'shop_id', 'type_code'], 'date_shop_type_avg_item_price', [1], 'item_price_month')
month_summary = agg_by_and_lag(month_summary, ['date_block_num', 'shop_id', 'subtype_code'], 'date_shop_subtype_avg_item_price', [1], 'item_price_month')
month_summary = agg_by_and_lag(month_summary, ['date_block_num', 'shop_category_code', 'subtype_code'], 'date_shop_category_subtype_avg_item_price', [1], 'item_price_month')
month_summary = agg_by_and_lag(month_summary, ['date_block_num', 'city_code', 'item_id'], 'date_item_city_avg_item_price', [1], 'item_price_month' )<categorify> | print('WCSurvived test values:')
print(data[891:1309].WCSurvived.value_counts().to_string() ) | Titanic - Machine Learning from Disaster |
11,482,088 | month_summary['item_shop_first_sale'] = month_summary['date_block_num'] - month_summary.groupby(['item_id','shop_id'])['date_block_num'].transform('min')
month_summary['item_first_sale'] = month_summary['date_block_num'] - month_summary.groupby('item_id')['date_block_num'].transform('min' )<load_pretrained> | data.loc[891:1308, 'Predict'] = 0
data.loc[891:1308, 'Predict'][(data.Sex == 'female')] = 1
data.loc[891:1308,'Predict'][(data.Sex == 'female')&(data['WCSurvived'] == 0)] = 0
data.loc[891:1308, 'Predict'][(data.Title == 'boy')&(data['WCSurvived'] == 1)] = 1
| Titanic - Machine Learning from Disaster |
11,482,088 | month_summary = pd.read_pickle('month_summary.pkl' )<prepare_x_and_y> | print('The following 8 males are predicted to live:')
cols = ['PassengerId', 'Name', 'Title', 'Group_id']
data[891:1309][cols].loc[(data.Title == 'boy')&(data.Predict == 1)] | Titanic - Machine Learning from Disaster |
11,482,088 | def generate_subsample(month_summary, target='item_cnt_month'):
X_test = month_summary[month_summary['date_block_num'] == 34]
X_test = X_test.drop(columns=[target])
X_val = month_summary[month_summary['date_block_num'] == 33]
y_val = X_val[target]
X_val = X_val.drop(columns=[target])
X_train = month_summary[(month_summary['date_block_num'] >= 12)&(month_summary['date_block_num'] < 33)]
y_train = X_train[target]
X_train = X_train.drop(columns=[target])
return X_train, y_train, X_val, y_val, X_test<prepare_x_and_y> | print('The following 15 females are predicted to die:')
data[891:1309][cols].loc[(data.Title == 'woman')&(data.Predict == 0)] | Titanic - Machine Learning from Disaster |
11,482,088 | X_train, y_train, X_val, y_val, X_test = generate_subsample(month_summary.drop(columns=['item_price_month']), 'item_cnt_month')
del month_summary<train_model> | print('The remaining 258 males are predicted to die')
print('and the remaining 137 females are predicted to live' ) | Titanic - Machine Learning from Disaster |
11,482,088 | def train_gbmodel(X_train, y_train, X_val, y_val):
RAND_SEED = 42
lgb_params = {'num_leaves': 2**8, 'max_depth': 19, 'max_bin': 107,
'bagging_freq': 1, 'bagging_fraction': 0.7135681370918421,
'feature_fraction': 0.49446461478601994, 'min_data_in_leaf': 2**8,
'learning_rate': 0.015980721586917768, 'num_threads': 2,
'min_sum_hessian_in_leaf': 6,
'random_state' : RAND_SEED,
'bagging_seed' : RAND_SEED,
'boost_from_average' : 'true',
'boost' : 'gbdt',
'metric' : 'rmse',
'verbose' : 1}
lgb_train = lgb.Dataset(X_train, label=y_train)
lgb_val = lgb.Dataset(X_val, label=y_val)
return lgb.train(lgb_params, lgb_train,
num_boost_round=300,
valid_sets=[lgb_train, lgb_val],
early_stopping_rounds=20 )<save_to_csv> | output = pd.DataFrame({'PassengerId': data[891:1309].PassengerId, 'Survived': data[891:1309].Predict.astype('int')})
output.to_csv('WCG_gender.csv', index=False)
print('WCG_gender submission was successfully saved!')
print('Submission is loading...you scored 81,6%!' ) | Titanic - Machine Learning from Disaster |
11,482,088 | y_pred = gbm_model.predict(X_test ).clip(0, 20)
result = pd.merge(testd, X_test.assign(item_cnt_month=y_pred), how='left', on=['shop_id', 'item_id'])[['ID', 'item_cnt_month']]
result.to_csv('submission.csv', index=False )<import_modules> | def fix_fare(row):
if row.Fare == 0:
row.Fare = np.NaN
return row
print('The following {:d} passengers have a zero Fare:'.format(data[data.Fare==0].shape[0]))
cols = ['PassengerId', 'Survived', 'Pclass','Fare', 'Name']
data.loc[data.Fare==0, cols] | Titanic - Machine Learning from Disaster |
11,482,088 | RANDOM_SEED = 42<load_from_csv> | data['Ticket_freq'] = data.groupby('Ticket')['Ticket'].transform('count')
data['Pfare'] = data['Fare'] / data['Ticket_freq'] | Titanic - Machine Learning from Disaster |
11,482,088 | PATH = '.. /input/competitive-data-science-predict-future-sales'
items = pd.read_csv(PATH + '/items.csv')
shops = pd.read_csv(PATH + '/shops.csv')
cats = pd.read_csv(PATH + '/item_categories.csv')
train = pd.read_csv(PATH + '/sales_train.csv')
test = pd.read_csv(PATH + '/test.csv' ).set_index('ID' )<create_dataframe> | train_male = data[0:891].loc[(data.Sex=='male')&(data.WCSurvived.isnull())]
test_male = data[891:1309].loc[(data.Sex=='male')&(data.WCSurvived.isnull())] | Titanic - Machine Learning from Disaster |
11,482,088 | summary_stats_table(train )<feature_engineering> | cols = ['PassengerId', 'Name', 'Pfare', 'Pclass', 'Embarked']
y_m = train_male['Survived']
features = ['Pfare', 'Pclass', 'Embarked']
X_m = train_male[features]
numerical_cols = ['Pfare']
categorical_cols = ['Pclass', 'Embarked']
numerical_transformer = Pipeline(steps=[
('imputer', SimpleImputer()),
('scaler', StandardScaler())
])
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='most_frequent')) ,
('onehot', OneHotEncoder(handle_unknown='ignore'))
])
preprocessor = ColumnTransformer(transformers=[
('num', numerical_transformer, numerical_cols),
('cat', categorical_transformer, categorical_cols)
])
precision_m = []
recall_m = []
for k in range(1,18):
pipeline1 = Pipeline(steps=[
('preprocessor', preprocessor),
('model', KNeighborsClassifier(n_neighbors=k))
])
precision_m.append(cross_val_score(pipeline1, X_m, y_m, cv=15, n_jobs=-1, scoring='precision' ).mean())
recall_m.append(cross_val_score(pipeline1, X_m, y_m, cv=15, n_jobs=-1, scoring='recall' ).mean())
k_range = range(1,18)
plt.figure(figsize=(7,5))
plt.plot(k_range, precision_m, label='15-fold precision')
plt.plot(k_range, recall_m, label='15-fold recall')
plt.axhline(y=0.5, color='r')
plt.xlabel('Value of k for KNN')
plt.title('Precision and recall by number of neighbors', fontsize=14)
plt.legend()
plt.show() | Titanic - Machine Learning from Disaster |
11,482,088 | train = train[train.item_price<100000]
train = train[train.item_cnt_day<1000]
train = train[train.item_price > 0].reset_index(drop=True)
train.loc[train.item_cnt_day < 0, 'item_cnt_day'] = 0
train.loc[train.shop_id == 0, 'shop_id'] = 57
test.loc[test.shop_id == 0, 'shop_id'] = 57
train.loc[train.shop_id == 1, 'shop_id'] = 58
test.loc[test.shop_id == 1, 'shop_id'] = 58
train.loc[train.shop_id == 11, 'shop_id'] = 10
test.loc[test.shop_id == 11, 'shop_id'] = 10
train.loc[train.shop_id == 40, 'shop_id'] = 39
test.loc[test.shop_id == 40, 'shop_id'] = 39
<feature_engineering> | m1 = KNeighborsClassifier(n_neighbors=1)
m2 = KNeighborsClassifier(n_neighbors=3)
m3 = KNeighborsClassifier(n_neighbors=7)
male_pipeline = Pipeline(steps=[
('preprocessor', preprocessor),
('voting',VotingClassifier([
('m1', m1),('m2', m2),('m3', m3)]))
])
print('15-fold precision of the ensemble: {:.3f}'.format(
cross_val_score(male_pipeline, X_m, y_m, cv=15, n_jobs=-1, scoring='precision' ).mean()))
print('15-fold recall of the ensemble: {:.3f}'.format(
cross_val_score(male_pipeline, X_m, y_m, cv=15, n_jobs=-1, scoring='recall' ).mean()))
print('15-fold accuracy of the ensemble: {:.3f}'.format(
cross_val_score(male_pipeline, X_m, y_m, cv=15, n_jobs=-1 ).mean()))
male_pipeline.fit(X_m, y_m)
learn_train_m = male_pipeline.predict(X_m)
X_test_m = test_male[features]
predictions_m = male_pipeline.predict(X_test_m)
print('
The following 9 adult males are predicted to live:')
test_male.loc[(predictions_m==1), cols] | Titanic - Machine Learning from Disaster |
11,482,088 | shops.loc[shops.shop_name == 'Сергиев Посад ТЦ "7Я"', 'shop_name'] = 'СергиевПосад ТЦ "7Я"'
shops['city'] = shops['shop_name'].str.split(' ' ).map(lambda x: x[0])
shops['category'] = shops['shop_name'].str.split(' ' ).map(lambda x:x[1] ).astype(str)
shops.loc[shops.city == '!Якутск', 'city'] = 'Якутск'
category = ['ТЦ', 'ТРК', 'ТРЦ', 'ТК']
shops.category = shops.category.apply(lambda x: x if(x in category)else 'etc')
shops.groupby(['category'] ).sum()
shops['shop_city'] = shops.city
shops['shop_category'] = shops.category
shops['shop_city'] = LabelEncoder().fit_transform(shops['shop_city'])
shops['shop_category'] = LabelEncoder().fit_transform(shops['shop_category'])
shops = shops[['shop_id','shop_city', 'shop_category']]
shops.head()
<categorify> | data.loc[891:1308, 'Predict'][(data.Sex=='male')&(data.WCSurvived.isnull())] = predictions_m
output = pd.DataFrame({'PassengerId': data[891:1309].PassengerId, 'Survived': data[891:1309].Predict.astype('int')})
output.to_csv('WCG_male.csv', index=False)
print('WCG_male submission was successfully saved!')
print('Submission is loading...you scored 82,3%!' ) | Titanic - Machine Learning from Disaster |
11,482,088 | cats['type_code'] = cats.item_category_name.apply(lambda x: x.split(' ')[0] ).astype(str)
cats.loc[(cats.type_code == 'Игровые')|(cats.type_code == 'Аксессуары'), 'type_code'] = 'Игры'
cats.loc[cats.type_code == 'PC', 'type_code'] = 'Музыка'
category = ['Игры', 'Карты', 'Кино', 'Книги','Музыка', 'Подарки', 'Программы', 'Служебные', 'Чистые']
cats['type_code'] = cats.type_code.apply(lambda x: x if(x in category)else 'etc')
cats['type_code'] = LabelEncoder().fit_transform(cats['type_code'])
cats['split'] = cats.item_category_name.apply(lambda x: x.split('-'))
cats['subtype'] = cats['split'].map(lambda x: x[1].strip() if len(x)> 1 else x[0].strip())
cats['subtype_code'] = LabelEncoder().fit_transform(cats['subtype'])
cats = cats[['item_category_id','type_code', 'subtype_code']]
<feature_engineering> | train_female = data[0:891].loc[(data.Sex=='female')&(data.WCSurvived.isnull())]
test_female = data[891:1309].loc[(data.Sex=='female')&(data.WCSurvived.isnull())] | Titanic - Machine Learning from Disaster |
11,482,088 | items['name_1'], items['name_2'] = items['item_name'].str.split('[', 1 ).str
items['name_1'], items['name_3'] = items['item_name'].str.split('(', 1 ).str
items['name_2'] = items['name_2'].str.replace('[^A-Za-z0-9А-Яа-я]+', ' ' ).str.lower()
items['name_3'] = items['name_3'].str.replace('[^A-Za-z0-9А-Яа-я]+', ' ' ).str.lower()
items = items.fillna('0')
result_1 = Counter(' '.join(items['name_2'].values.tolist() ).split(' ')).items()
result_1 = sorted(result_1, key=itemgetter(1))
result_1 = pd.DataFrame(result_1, columns=['feature', 'count'])
result_1 = result_1[(result_1['feature'].str.len() > 1)&(result_1['count'] > 200)]
result_2 = Counter(' '.join(items['name_3'].values.tolist() ).split(" ")).items()
result_2 = sorted(result_2, key=itemgetter(1))
result_2 = pd.DataFrame(result_2, columns=['feature', 'count'])
result_2 = result_2[(result_2['feature'].str.len() > 1)&(result_2['count'] > 200)]
result = pd.concat([result_1, result_2])
result = result.drop_duplicates(subset=['feature'] ).reset_index(drop=True)
print('Most common aditional features:', result)
items['type'] = items.name_2.apply(lambda x: x[0:8] if x.split(' ')[0] == 'xbox' else x.split(' ')[0])
items.loc[(items.type == 'x360')|(items.type == 'xbox360'), 'type'] = 'xbox 360'
items.loc[items.type == '', 'type'] = 'mac'
items.type = items.type.apply(lambda x: x.replace(' ',''))
items.loc[(items.type == 'pc')|(items.type == 'pс')|(items.type == 'рс'), 'type'] = 'pc'
items.loc[(items.type == 'рs3'), 'type'] = 'ps3'
group_sum = items.groupby('type' ).sum()
drop_list = group_sum.loc[group_sum.item_category_id < 200].index
print('drop list:', drop_list)
items.name_2 = items.type.apply(lambda x: 'etc' if x in drop_list else x)
items = items.drop(['type'], axis=1)
print(items.groupby('name_2' ).count() [['item_id']])
items['name_2'] = LabelEncoder().fit_transform(items['name_2'] ).astype(np.int8)
items['name_3'] = LabelEncoder().fit_transform(items['name_3'] ).astype(np.int16)
items.drop(['item_name', 'name_1'], axis=1, inplace=True )<data_type_conversions> | custom_precision = make_scorer(precision_score, pos_label=0, zero_division=0)
custom_recall = make_scorer(recall_score, pos_label=0 ) | Titanic - Machine Learning from Disaster |
11,482,088 | ts = time.time()
matrix = []
cols = ['date_block_num','shop_id','item_id']
for i in range(34):
sales = train[train.date_block_num==i]
matrix.append(np.array(list(product([i], sales.shop_id.unique() , sales.item_id.unique())) , dtype='int16'))
matrix = pd.DataFrame(np.vstack(matrix), columns=cols)
matrix['date_block_num'] = matrix['date_block_num'].astype(np.int8)
matrix['shop_id'] = matrix['shop_id'].astype(np.int8)
matrix['item_id'] = matrix['item_id'].astype(np.int16)
matrix.sort_values(cols,inplace=True)
print('Use time:', time.time() - ts)
<merge> | f1 = KNeighborsClassifier(n_neighbors=4)
f2 = KNeighborsClassifier(n_neighbors=9)
f3 = KNeighborsClassifier(n_neighbors=11)
female_pipeline = Pipeline(steps=[
('preprocessor', preprocessor),
('voting', VotingClassifier([
('f1', f1),('f2', f2),('f3', f3)]))
])
print('9-fold precision of the ensemble: {:.3f}'.format(
cross_val_score(female_pipeline, X_f, y_f, cv=9, scoring=custom_precision ).mean()))
print('9-fold recall of the ensemble: {:.3f}'.format(
cross_val_score(female_pipeline, X_f, y_f, cv=9, scoring=custom_recall ).mean()))
print('9-fold accuracy of the ensemble: {:.3f}'.format(
cross_val_score(female_pipeline, X_f, y_f, cv=9 ).mean()))
female_pipeline.fit(X_f, y_f)
learn_train_f = female_pipeline.predict(X_f)
X_test_f = test_female[features]
predictions_f = female_pipeline.predict(X_test_f)
print('
The following 6 non-WCG females are predicted to die:')
test_female.loc[(predictions_f==0), cols] | Titanic - Machine Learning from Disaster |
11,482,088 | train['revenue'] = train['item_price'] * train['item_cnt_day']
ts = time.time()
group = train.groupby(['date_block_num','shop_id','item_id'] ).agg({'item_cnt_day': ['sum']})
group.columns = ['item_cnt_month']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=cols, how='left')
matrix['item_cnt_month'] =(matrix['item_cnt_month']
.fillna(0)
.clip(0,20)
.astype(np.float16))
print('Use time:', time.time() - ts)
<data_type_conversions> | data.loc[891:1308, 'Predict'][(data.Sex=='female')&(data.WCSurvived.isnull())] = predictions_f
output = pd.DataFrame({'PassengerId': data[891:1309].PassengerId, 'Survived': data[891:1309].Predict.astype('int')})
output.to_csv('WCG_male_female.csv', index=False)
print('WCG_male_female was successfully saved!')
print('Submission is loading...you scored 82,8%!' ) | Titanic - Machine Learning from Disaster |
10,854,282 | test['date_block_num'] = 34
test['date_block_num'] = test['date_block_num'].astype(np.int8)
test['shop_id'] = test['shop_id'].astype(np.int8)
test['item_id'] = test['item_id'].astype(np.int16)
ts = time.time()
matrix = pd.concat([matrix, test], ignore_index=True, sort=False, keys=cols)
matrix.fillna(0, inplace=True)
print('Use time:', time.time() - ts)
<data_type_conversions> | data_path = '/kaggle/input/titanic/'
train_df = pd.read_csv(data_path+'train.csv')
test_df = pd.read_csv(data_path+'test.csv' ) | Titanic - Machine Learning from Disaster |
10,854,282 | ts = time.time()
matrix = pd.merge(matrix, shops, on=['shop_id'], how='left')
matrix = pd.merge(matrix, items, on=['item_id'], how='left')
matrix = pd.merge(matrix, cats, on=['item_category_id'], how='left')
matrix['shop_city'] = matrix['shop_city'].astype(np.int8)
matrix['shop_category'] = matrix['shop_category'].astype(np.int8)
matrix['item_category_id'] = matrix['item_category_id'].astype(np.int8)
matrix['type_code'] = matrix['type_code'].astype(np.int8)
matrix['subtype_code'] = matrix['subtype_code'].astype(np.int8)
print('Use time:', time.time() - ts)
<merge> | combined_df = pd.concat([train_df,test_df])
train_df.name = 'Training Dataset'
test_df.name = 'Test Dataset'
combined_df.name = 'Combined Dataset' | Titanic - Machine Learning from Disaster |
10,854,282 | def lag_feature(df, lags, col):
tmp = df[['date_block_num','shop_id','item_id',col]]
for i in lags:
shifted = tmp.copy()
shifted.columns = ['date_block_num','shop_id','item_id', col+'-lag'+str(i)]
shifted['date_block_num'] += i
df = pd.merge(df, shifted, on=['date_block_num','shop_id','item_id'], how='left')
return df<groupby> | def missing_columns(df):
for col in df.columns.tolist() :
print('{} column missing values: {}'.format(col, df[col].isnull().sum()))
print('
')
for df in [train_df,test_df]:
print('{}'.format(df.name))
missing_columns(df ) | Titanic - Machine Learning from Disaster |
10,854,282 | dict_simple = {'date_block_num': 'date', 'item_id': 'item', 'shop_id': 'shop',
'item_category_id': 'itemcate', 'item_price':'price',
'item_cnt_month': 'cnt', }
def sum_names(name_list):
names = ''
for x in name_list:
names += x+'+'
return names
def group_agg(matrix, groupby_feats, transform_feat, aggtype='mean'):
group = matrix.groupby(groupby_feats ).agg({transform_feat: [aggtype]})
groupby_feats_simple = [dict_simple[x] if x in dict_simple.keys() else x
for x in groupby_feats]
transform_feat_simple = dict_simple[transform_feat] \
if transform_feat in dict_simple.keys() else transform_feat
group_name = f'{sum_names(groupby_feats_simple)[:-1]}-{aggtype.upper() }-{transform_feat_simple}'
group.columns = [ group_name ]
group.reset_index(inplace=True)
return group, group_name
def add_groupmean_lag(matrix, groupby_feats, transform_feat, lags):
group, group_name = group_agg(matrix, groupby_feats, transform_feat)
matrix = pd.merge(matrix, group, on=groupby_feats, how='left')
matrix[group_name] = matrix[group_name].astype(np.float16)
if lags != []:
matrix = lag_feature(matrix, lags, group_name)
matrix.drop([group_name], axis=1, inplace=True)
return matrix
<groupby> | %matplotlib inline
| Titanic - Machine Learning from Disaster |
10,854,282 | ts = time.time()
transform_feat = 'item_cnt_month'
groupby_feats = ['date_block_num']
lags = [1]
matrix = add_groupmean_lag(matrix, groupby_feats, transform_feat, lags)
groupby_feats = ['date_block_num', 'item_id']
lags = [1,2,3]
matrix = add_groupmean_lag(matrix, groupby_feats, transform_feat, lags)
groupby_feats = ['date_block_num', 'shop_id']
lags = [1,2,3]
matrix = add_groupmean_lag(matrix, groupby_feats, transform_feat, lags)
groupby_feats = ['date_block_num', 'item_category_id']
lags = [1]
matrix = add_groupmean_lag(matrix, groupby_feats, transform_feat, lags)
groupby_feats = ['date_block_num', 'shop_id', 'item_category_id']
lags = [1]
matrix = add_groupmean_lag(matrix, groupby_feats, transform_feat, lags)
groupby_feats = ['date_block_num', 'shop_id', 'item_id']
lags = [1]
matrix = add_groupmean_lag(matrix, groupby_feats, transform_feat, lags)
groupby_feats = ['date_block_num', 'shop_id', 'subtype_code']
lags = [1]
matrix = add_groupmean_lag(matrix, groupby_feats, transform_feat, lags)
groupby_feats = ['date_block_num', 'shop_city']
lags = [1]
matrix = add_groupmean_lag(matrix, groupby_feats, transform_feat, lags)
groupby_feats = ['date_block_num', 'item_id', 'shop_city']
lags = [1]
matrix = add_groupmean_lag(matrix, groupby_feats, transform_feat, lags)
print('Use time:', time.time() - ts)
<categorify> | mask_m1 =(train_df.Sex == "male")&(train_df.Pclass == 1)
mask_m2 =(train_df.Sex == "male")&(train_df.Pclass == 2)
mask_m3 =(train_df.Sex == "male")&(train_df.Pclass == 3)
mask_f1 =(train_df.Sex == "female")&(train_df.Pclass == 1)
mask_f2 =(train_df.Sex == "female")&(train_df.Pclass == 2)
mask_f3 =(train_df.Sex == "female")&(train_df.Pclass == 3)
m_age_class1_male = combined_df.loc[(combined_df.Sex == "male")&(combined_df.Pclass == 1),'Age'].dropna().median()
m_age_class2_male = combined_df.loc[(combined_df.Sex == "male")&(combined_df.Pclass == 2),'Age'].dropna().median()
m_age_class3_male = combined_df.loc[(combined_df.Sex == "male")&(combined_df.Pclass == 3),'Age'].dropna().median()
m_age_class1_female = combined_df.loc[(combined_df.Sex == "female")&(combined_df.Pclass == 1),'Age'].dropna().median()
m_age_class2_female = combined_df.loc[(combined_df.Sex == "female")&(combined_df.Pclass == 2),'Age'].dropna().median()
m_age_class3_female = combined_df.loc[(combined_df.Sex == "female")&(combined_df.Pclass == 3),'Age'].dropna().median()
train_df.loc[mask_m1,'Age'] = train_df.loc[mask_m1,'Age'].fillna(m_age_class1_male)
train_df.loc[mask_m2,'Age'] = train_df.loc[mask_m2,'Age'].fillna(m_age_class2_male)
train_df.loc[mask_m3,'Age'] = train_df.loc[mask_m3,'Age'].fillna(m_age_class3_male)
train_df.loc[mask_f1,'Age'] = train_df.loc[mask_f1,'Age'].fillna(m_age_class1_female)
train_df.loc[mask_f2,'Age'] = train_df.loc[mask_f2,'Age'].fillna(m_age_class2_female)
train_df.loc[mask_f3,'Age'] = train_df.loc[mask_f3,'Age'].fillna(m_age_class3_female)
| Titanic - Machine Learning from Disaster |
10,854,282 | ts = time.time()
fetures_to_drop = []
transform_feat = 'item_price'
groupby_feats = ['item_id']
group, mean_price_col = group_agg(train, groupby_feats,
transform_feat, aggtype='mean')
matrix = pd.merge(matrix, group, on=groupby_feats, how='left')
matrix[mean_price_col] = matrix[mean_price_col].astype(np.float16)
transform_feat = 'item_price'
groupby_feats = ['date_block_num','item_id']
group, mean_monthlyprice_col = group_agg(train, groupby_feats,
transform_feat, aggtype='mean')
matrix = pd.merge(matrix, group, on=groupby_feats, how='left')
matrix[mean_monthlyprice_col] = matrix[mean_monthlyprice_col].astype(np.float16)
lags = [1,2,3]
matrix = lag_feature(matrix, lags, mean_monthlyprice_col)
for i in lags:
matrix['delta_price-lag'+str(i)] = \
(matrix[f'{mean_monthlyprice_col}-lag'+str(i)] - matrix[mean_price_col])\
/ matrix[mean_price_col]
matrix['delta_price-lag']=0
bool_loc = np.ones(len(matrix)) ==1
for i in lags:
matrix.loc[bool_loc, 'delta_price-lag'] = matrix.loc[bool_loc,'delta_price-lag'+str(i)]
bool_loc &= matrix['delta_price-lag'+str(i)]==0
matrix['delta_price-lag'] = matrix['delta_price-lag'].astype(np.float16)
matrix['delta_price-lag'].fillna(0, inplace=True)
fetures_to_drop.append(mean_price_col)
fetures_to_drop.append(mean_monthlyprice_col)
for i in lags:
fetures_to_drop += [f'{mean_monthlyprice_col}-lag'+str(i)]
fetures_to_drop += ['delta_price-lag'+str(i)]
matrix.drop(fetures_to_drop, axis=1, inplace=True)
print('Use time:', time.time() - ts)
<merge> | mask_m1 =(test_df.Sex == "male")&(test_df.Pclass == 1)
mask_m2 =(test_df.Sex == "male")&(test_df.Pclass == 2)
mask_m3 =(test_df.Sex == "male")&(test_df.Pclass == 3)
mask_f1 =(test_df.Sex == "female")&(test_df.Pclass == 1)
mask_f2 =(test_df.Sex == "female")&(test_df.Pclass == 2)
mask_f3 =(test_df.Sex == "female")&(test_df.Pclass == 3)
test_df.loc[mask_m1,'Age'] = test_df.loc[mask_m1,'Age'].fillna(m_age_class1_male)
test_df.loc[mask_m2,'Age'] = test_df.loc[mask_m2,'Age'].fillna(m_age_class2_male)
test_df.loc[mask_m3,'Age'] = test_df.loc[mask_m3,'Age'].fillna(m_age_class3_male)
test_df.loc[mask_f1,'Age'] = test_df.loc[mask_f1,'Age'].fillna(m_age_class1_female)
test_df.loc[mask_f2,'Age'] = test_df.loc[mask_f2,'Age'].fillna(m_age_class2_female)
test_df.loc[mask_f3,'Age'] = test_df.loc[mask_f3,'Age'].fillna(m_age_class3_female ) | Titanic - Machine Learning from Disaster |
10,854,282 | ts = time.time()
group = train.groupby(['date_block_num','shop_id'] ).agg({'revenue': ['sum']})
group.columns = ['date_shop_revenue']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=['date_block_num','shop_id'], how='left')
matrix['date_shop_revenue'] = matrix['date_shop_revenue'].astype(np.float32)
group = group.groupby(['shop_id'] ).agg({'date_shop_revenue': ['mean']})
group.columns = ['shop_avg_revenue']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=['shop_id'], how='left')
matrix['shop_avg_revenue'] = matrix['shop_avg_revenue'].astype(np.float32)
matrix['delta_revenue'] =(matrix['date_shop_revenue'] - matrix['shop_avg_revenue'])/ matrix['shop_avg_revenue']
matrix['delta_revenue'] = matrix['delta_revenue'].astype(np.float16)
matrix = lag_feature(matrix, [1], 'delta_revenue')
matrix.drop(['date_shop_revenue','shop_avg_revenue','delta_revenue'], axis=1, inplace=True)
print('Use time:', time.time() - ts)
<feature_engineering> | train_df[train_df.Embarked.isna() ] | Titanic - Machine Learning from Disaster |
10,854,282 | total_block_num = 35
date_block_num = np.arange(total_block_num)
date_block = [pd.Timestamp(2013, 1, 1)+pd.DateOffset(months=x)for x in date_block_num]
df_date = pd.DataFrame(date_block_num, columns=['date_block_num'])
df_date['date_block'] = date_block
df_date['year'] = df_date['date_block'].dt.year
df_date['month'] = df_date['date_block'].dt.month
for i in range(len(df_date)) :
day_to_count = 0
calendar_matrix = calendar.monthcalendar(df_date['year'].iloc[i],df_date['month'].iloc[i])
for j in range(7):
num_days = sum(1 for x in calendar_matrix if x[j] != 0)
df_date.loc[i, f'week{j}'] = num_days
df_date = df_date[['date_block_num', 'year','month','week0','week1',
'week2','week3','week4','week5','week6']]
df_date['days'] = df_date[['week0','week1','week2','week3','week4','week5','week6']].sum(axis=1)
df_date['year'] = df_date['year']-2012
df_date = df_date.astype(np.int8)
matrix = pd.merge(matrix, df_date, on=['date_block_num'], how='left')
<categorify> | train_df['Embarked'] = train_df['Embarked'].fillna('C' ) | Titanic - Machine Learning from Disaster |
10,854,282 | matrix['item_shop_first_sale'] = \
matrix['date_block_num'] - matrix.groupby(['item_id','shop_id'])['date_block_num'].transform('min')
matrix['item_first_sale'] = \
matrix['date_block_num'] - matrix.groupby('item_id')['date_block_num'].transform('min')
<filter> | median_class3_fare = combined_df.loc[(combined_df.Pclass == 3),'Fare'].dropna().median()
test_df.Fare.fillna(median_class3_fare,inplace=True)
| Titanic - Machine Learning from Disaster |
10,854,282 | matrix = matrix[matrix.date_block_num > 3]
<data_type_conversions> | train_df['SibSp_cat'] =(train_df['SibSp'] > 0)* 1
train_df['Parch_cat'] =(train_df['Parch'] > 0)* 1
train_df['Gender'] =(train_df['Sex'] == 'female')* 1
train_df['Class 1'] =(train_df['Pclass'] == 1)* 1
train_df['Class 2'] =(train_df['Pclass'] == 2)* 1
train_df['Class 3'] =(train_df['Pclass'] == 3)* 1
train_df['IsAlone'] =(( train_df['SibSp'] == 0)&(train_df['Parch'] == 0)) * 1
train_df['High_Fare'] =(train_df['Fare'] >= 200.0)* 1
train_df['Family_Members'] = train_df.SibSp + train_df.Parch | Titanic - Machine Learning from Disaster |
10,854,282 | def fill_na(df):
for col in df.columns:
if('-lag' in col)&(df[col].isnull().any()):
print(col)
if('cnt' in col):
df[col].fillna(0, inplace=True)
return df
matrix = fill_na(matrix )<count_missing_values> | test_df['SibSp_cat'] =(test_df['SibSp'] > 0)* 1
test_df['Parch_cat'] =(test_df['Parch'] > 0)* 1
test_df['Gender'] =(test_df['Sex'] == 'female')* 1
test_df['Class 1'] =(test_df['Pclass'] == 1)* 1
test_df['Class 2'] =(test_df['Pclass'] == 2)* 1
test_df['Class 3'] =(test_df['Pclass'] == 3)* 1
test_df['IsAlone'] =(( test_df['SibSp'] == 0)&(test_df['Parch'] == 0)) * 1
test_df['High_Fare'] =(test_df['Fare'] >= 200.0)* 1
test_df['Family_Members'] = test_df.SibSp + test_df.Parch | Titanic - Machine Learning from Disaster |
10,854,282 | matrix.isna().sum()<load_pretrained> | y_train = train_df['Survived']
features = ['Gender','Age','Class 1','Class 2','Class 3','Family_Members','Embarked','Fare']
X_train = train_df[features]
X_test = test_df[features] | Titanic - Machine Learning from Disaster |
10,854,282 | del group
del items
del shops
del cats
del train
gc.collect() ;
matrix.to_pickle('.. /working/data.pkl')
del matrix
gc.collect() ;<load_from_csv> | X_train_1hot = pd.get_dummies(X_train)
X_train_1hot | Titanic - Machine Learning from Disaster |
10,854,282 | data = pd.read_pickle('.. /working/data.pkl')
test = pd.read_csv('.. /input/competitive-data-science-predict-future-sales/test.csv' ).set_index('ID')
print(len(data.columns))
data.columns<prepare_x_and_y> | X_test_1hot = pd.get_dummies(X_test)
X_test_1hot | Titanic - Machine Learning from Disaster |
10,854,282 | X_train = data[data.date_block_num < 33].drop(['item_cnt_month'], axis=1)
Y_train = data[data.date_block_num < 33]['item_cnt_month']
X_valid = data[data.date_block_num == 33].drop(['item_cnt_month'], axis=1)
Y_valid = data[data.date_block_num == 33]['item_cnt_month']
X_test = data[data.date_block_num == 34].drop(['item_cnt_month'], axis=1)
del data
gc.collect() ;<create_dataframe> | base_model = RandomForestClassifier(random_state=42,n_estimators=100,max_depth=5)
fit = base_model.fit(X_train_1hot,y_train ) | Titanic - Machine Learning from Disaster |
10,854,282 | lgb_train = lgb.Dataset(X_train, Y_train)
lgb_eval = lgb.Dataset(X_valid, Y_valid, reference=lgb_train)
del X_train
gc.collect() ;<compute_test_metric> | fit.score(X_train_1hot,y_train ) | Titanic - Machine Learning from Disaster |
10,854,282 | def rmsle(y, y_pred):
return np.sqrt(mean_squared_error(y, y_pred))
params = {'num_leaves': 2000, 'max_depth': 19, 'max_bin': 107, 'n_estimators': 3747,
'bagging_freq': 1, 'bagging_fraction': 0.7135681370918421,
'feature_fraction': 0.49446461478601994, 'min_data_in_leaf': 88,
'learning_rate': 0.015980721586917768, 'num_threads': 3,
'min_sum_hessian_in_leaf': 6,
'random_state' : RANDOM_SEED,
'verbosity' : 1,
'bagging_seed' : RANDOM_SEED,
'boost_from_average' : 'true',
'boost' : 'gbdt',
'metric' : 'rmse',}
model = lgb.train(params,
lgb_train,
num_boost_round=20,
valid_sets=[lgb_train,lgb_eval],
early_stopping_rounds=20,
verbose_eval=1,
)
y_pred = model.predict(X_valid)
rmsle(Y_valid, y_pred )<save_to_csv> | n_estimators = [int(x)for x in np.linspace(start = 100, stop = 1000, num = 10)]
max_features = ['auto', 'sqrt']
max_depth = [5,6,7,8,9,10]
max_depth.append(None)
min_samples_split = [2, 5, 10]
min_samples_leaf = [1, 2, 4]
bootstrap = [True, False]
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(random_grid ) | Titanic - Machine Learning from Disaster |
10,854,282 | Y_pred = model.predict(X_valid ).clip(0, 20)
Y_test = model.predict(X_test ).clip(0, 20)
submission = pd.DataFrame({
"ID": test.index,
"item_cnt_month": Y_test
})
submission.to_csv('lgb_submission.csv', index=False)
pickle.dump(Y_pred, open('lgb_train.pickle', 'wb'))
pickle.dump(Y_test, open('lgb_test.pickle', 'wb'))<import_modules> | rf = RandomForestClassifier()
rf_random = RandomizedSearchCV(estimator = rf, param_distributions = random_grid, n_iter = 200, cv = 4, verbose=2, random_state=42, n_jobs = -1)
rf_random.fit(X_train_1hot, y_train)
| Titanic - Machine Learning from Disaster |
10,854,282 | BatchNormalization, Input, Conv2D, GlobalAveragePooling2D,concatenate,Concatenate)
<set_options> | rf_random.best_params_ | Titanic - Machine Learning from Disaster |
10,854,282 | WORKERS = 2
CHANNEL = 3
warnings.filterwarnings("ignore")
SIZE = 300
NUM_CLASSES = 5<load_from_csv> | rf_best = rf_random.best_estimator_
best_fit = rf_best.fit(X_train_1hot, y_train)
best_fit.score(X_train_1hot,y_train ) | Titanic - Machine Learning from Disaster |
10,854,282 | <split><EOS> | predictions = base_model.predict(X_test_1hot)
output = pd.DataFrame({'PassengerId': test_df.PassengerId, 'Survived': predictions})
output.to_csv('my_submission.csv', index=False)
print("Your submission was successfully saved!" ) | Titanic - Machine Learning from Disaster |
9,725,565 | <SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<define_variables> | sub1=pd.read_csv('.. /input/titanic-leaked/titanic.csv' ) | Titanic - Machine Learning from Disaster |
9,725,565 | class My_Generator(Sequence):
def __init__(self, image_filenames, labels,
batch_size, is_train=True,
mix=False, augment=False):
self.image_filenames, self.labels = image_filenames, labels
self.batch_size = batch_size
self.is_train = is_train
self.is_augment = augment
if(self.is_train):
self.on_epoch_end()
self.is_mix = mix
def __len__(self):
return int(np.ceil(len(self.image_filenames)/ float(self.batch_size)))
def __getitem__(self, idx):
batch_x = self.image_filenames[idx * self.batch_size:(idx + 1)* self.batch_size]
batch_y = self.labels[idx * self.batch_size:(idx + 1)* self.batch_size]
if(self.is_train):
return self.train_generate(batch_x, batch_y)
return self.valid_generate(batch_x, batch_y)
def on_epoch_end(self):
if(self.is_train):
self.image_filenames, self.labels = shuffle(self.image_filenames, self.labels)
else:
pass
def mix_up(self, x, y):
lam = np.random.beta(0.2, 0.4)
ori_index = np.arange(int(len(x)))
index_array = np.arange(int(len(x)))
np.random.shuffle(index_array)
mixed_x = lam * x[ori_index] +(1 - lam)* x[index_array]
mixed_y = lam * y[ori_index] +(1 - lam)* y[index_array]
return mixed_x, mixed_y
def train_generate(self, batch_x, batch_y):
batch_images = []
for(sample, label)in zip(batch_x, batch_y):
img = cv2.imread('.. /input/aptos2019-blindness-detection/train_images/'+sample+'.png')
img = cv2.resize(img,(SIZE, SIZE))
if(self.is_augment):
img = seq.augment_image(img)
batch_images.append(img)
batch_images = np.array(batch_images, np.float32)/ 255
batch_y = np.array(batch_y, np.float32)
if(self.is_mix):
batch_images, batch_y = self.mix_up(batch_images, batch_y)
return batch_images, batch_y
def valid_generate(self, batch_x, batch_y):
batch_images = []
for(sample, label)in zip(batch_x, batch_y):
img = cv2.imread('.. /input/aptos2019-blindness-detection/train_images/'+sample+'.png')
img = cv2.resize(img,(SIZE, SIZE))
batch_images.append(img)
batch_images = np.array(batch_images, np.float32)/ 255
batch_y = np.array(batch_y, np.float32)
return batch_images, batch_y<choose_model_class> | sub1.to_csv('submission1.csv', index=False ) | Titanic - Machine Learning from Disaster |
9,725,565 | def create_model(input_shape, n_out):
input_tensor = Input(shape=input_shape)
base_model = DenseNet121(include_top=False,
weights=None,
input_tensor=input_tensor)
base_model.load_weights(".. /input/densenet-keras/DenseNet-BC-121-32-no-top.h5")
x = GlobalAveragePooling2D()(base_model.output)
x = Dropout(0.5 )(x)
x = Dense(1024, activation='relu' )(x)
x = Dropout(0.5 )(x)
final_output = Dense(n_out, activation='softmax', name='final_output' )(x)
model = Model(input_tensor, final_output)
return model<choose_model_class> | %matplotlib inline
| Titanic - Machine Learning from Disaster |
9,725,565 | EarlyStopping, ReduceLROnPlateau,CSVLogger)
epochs = 30; batch_size = 32
checkpoint = ModelCheckpoint('.. /working/densenet_.h5', monitor='val_loss', verbose=1,
save_best_only=True, mode='min', save_weights_only = True)
reduceLROnPlat = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=4,
verbose=1, mode='auto', epsilon=0.0001)
early = EarlyStopping(monitor="val_loss",
mode="min",
patience=9)
csv_logger = CSVLogger(filename='.. /working/training_log.csv',
separator=',',
append=True)
train_generator = My_Generator(train_x, train_y, 128, is_train=True)
train_mixup = My_Generator(train_x, train_y, batch_size, is_train=True, mix=False, augment=True)
valid_generator = My_Generator(valid_x, valid_y, batch_size, is_train=False)
model = create_model(
input_shape=(SIZE,SIZE,3),
n_out=NUM_CLASSES )<train_model> | train_df = pd.read_csv('.. /input/titanic/train.csv')
test_df = pd.read_csv('.. /input/titanic/test.csv')
combine = [train_df, test_df] | Titanic - Machine Learning from Disaster |
9,725,565 | def kappa_loss(y_true, y_pred, y_pow=2, eps=1e-12, N=5, bsize=32, name='kappa'):
with tf.name_scope(name):
y_true = tf.to_float(y_true)
repeat_op = tf.to_float(tf.tile(tf.reshape(tf.range(0, N), [N, 1]), [1, N]))
repeat_op_sq = tf.square(( repeat_op - tf.transpose(repeat_op)))
weights = repeat_op_sq / tf.to_float(( N - 1)** 2)
pred_ = y_pred ** y_pow
try:
pred_norm = pred_ /(eps + tf.reshape(tf.reduce_sum(pred_, 1), [-1, 1]))
except Exception:
pred_norm = pred_ /(eps + tf.reshape(tf.reduce_sum(pred_, 1), [bsize, 1]))
hist_rater_a = tf.reduce_sum(pred_norm, 0)
hist_rater_b = tf.reduce_sum(y_true, 0)
conf_mat = tf.matmul(tf.transpose(pred_norm), y_true)
nom = tf.reduce_sum(weights * conf_mat)
denom = tf.reduce_sum(weights * tf.matmul(
tf.reshape(hist_rater_a, [N, 1]), tf.reshape(hist_rater_b, [1, N])) /
tf.to_float(bsize))
return nom*0.5 /(denom + eps)+ categorical_crossentropy(y_true, y_pred)*0.5<predict_on_test> | train_df[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
9,725,565 | class QWKEvaluation(Callback):
def __init__(self, validation_data=() , batch_size=64, interval=1):
super(Callback, self ).__init__()
self.interval = interval
self.batch_size = batch_size
self.valid_generator, self.y_val = validation_data
self.history = []
def on_epoch_end(self, epoch, logs={}):
if epoch % self.interval == 0:
y_pred = self.model.predict_generator(generator=self.valid_generator,
steps=np.ceil(float(len(self.y_val)) / float(self.batch_size)) ,
workers=1, use_multiprocessing=False,
verbose=1)
def flatten(y):
return np.argmax(y, axis=1 ).reshape(-1)
score = cohen_kappa_score(flatten(self.y_val),
flatten(y_pred),
labels=[0,1,2,3,4],
weights='quadratic')
print("
epoch: %d - QWK_score: %.6f
" %(epoch+1, score))
self.history.append(score)
if score >= max(self.history):
print('saving checkpoint: ', score)
self.model.save('.. /working/densenet_bestqwk.h5')
qwk = QWKEvaluation(validation_data=(valid_generator, valid_y),
batch_size=batch_size, interval=1 )<train_model> | train_df[["Sex", "Survived"]].groupby(['Sex'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
9,725,565 | for layer in model.layers:
layer.trainable = False
for i in range(-3,0):
model.layers[i].trainable = True
model.compile(
loss='categorical_crossentropy',
optimizer=Adam(1e-3))
model.fit_generator(
train_generator,
steps_per_epoch=np.ceil(float(len(train_y)) / float(128)) ,
epochs=2,
workers=WORKERS, use_multiprocessing=True,
verbose=1,
callbacks=[qwk] )<train_model> | train_df[["SibSp", "Survived"]].groupby(['SibSp'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
9,725,565 | for layer in model.layers:
layer.trainable = True
callbacks_list = [checkpoint, csv_logger, reduceLROnPlat, early, qwk]
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=1e-4))
model.fit_generator(
train_mixup,
steps_per_epoch=np.ceil(float(len(train_x)) / float(batch_size)) ,
validation_data=valid_generator,
validation_steps=np.ceil(float(len(valid_x)) / float(batch_size)) ,
epochs=epochs,
verbose=1,
workers=1, use_multiprocessing=False,
callbacks=callbacks_list )<load_from_csv> | train_df[["Parch", "Survived"]].groupby(['Parch'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
9,725,565 | submit = pd.read_csv('.. /input/aptos2019-blindness-detection/sample_submission.csv')
model.load_weights('.. /working/densenet_bestqwk.h5')
predicted = []<predict_on_test> | for dataset in combine:
dataset['Title'] = dataset.Name.str.extract('([A-Za-z]+)\.', expand=False)
pd.crosstab(train_df['Title'], train_df['Sex'] ) | Titanic - Machine Learning from Disaster |
9,725,565 | for i, name in tqdm(enumerate(submit['id_code'])) :
path = os.path.join('.. /input/aptos2019-blindness-detection/train_images/', name+'.png')
image = cv2.imread(path)
image = cv2.resize(image,(SIZE, SIZE))
X = np.array(( image[np.newaxis])/255)
score_predict=(( model.predict(X ).ravel() *model.predict(X[:, ::-1, :, :] ).ravel() *model.predict(X[:, ::-1, ::-1, :] ).ravel() *model.predict(X[:, :, ::-1, :] ).ravel())**0.25 ).tolist()
label_predict = np.argmax(score_predict)
predicted.append(str(label_predict))<save_to_csv> | for dataset in combine:
dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col',\
'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
train_df[['Title', 'Survived']].groupby(['Title'], as_index=False ).mean() | Titanic - Machine Learning from Disaster |
9,725,565 | submit['diagnosis'] = predicted
submit.to_csv('submission.csv', index=False)
submit.head()<define_variables> | title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
for dataset in combine:
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
train_df.head() | Titanic - Machine Learning from Disaster |
9,725,565 | IMG_DIM = 256
BATCH_SIZE = 32
CHANNELS = 3
NUM_CLASSES = 5
print(os.listdir("."))
print(os.listdir(".. /"))
print(os.listdir(".. /input/"))
print(os.listdir(".. /input/aptos2019-blindness-detection"))
print(os.listdir(".. /input/densenetmulti"))
INPUT_FOLDER = '.. /input/aptos2019-blindness-detection/'<choose_model_class> | train_df = train_df.drop(['Name', 'PassengerId'], axis=1)
test_df = test_df.drop(['Name'], axis=1)
combine = [train_df, test_df]
train_df.shape, test_df.shape | Titanic - Machine Learning from Disaster |
9,725,565 | def dataGenerator(jitter=0.1):
datagen = image.ImageDataGenerator(rescale=1./255,
horizontal_flip = True and(jitter > 0.01),
vertical_flip = True and(jitter > 0.01),
zoom_range = [max(0.8, 1-5*jitter), 1],
rotation_range = int(600*jitter),
brightness_range = [1-jitter/3, 1+jitter/3],
fill_mode = "mirror",
channel_shift_range=int(30*jitter),
)
return datagen
<choose_model_class> | for dataset in combine:
dataset['Sex'] = dataset['Sex'].map({'female': 1, 'male': 0} ).astype(int)
train_df.head() | Titanic - Machine Learning from Disaster |
9,725,565 | def load_network(network_name):
weights = f".. /input/densenetmulti/{network_name}.h5"
if network_name == "normal":
weights = f".. /input/densenetmulti/dense-0.800.h5"
model = Sequential()
model.add(DenseNet121(weights=None, include_top=False, input_shape=(IMG_DIM, IMG_DIM, CHANNELS)))
model.add(GlobalAveragePooling2D())
model.add(Dropout(0.5))
model.add(Dense(NUM_CLASSES, activation='sigmoid'))
model.load_weights(weights)
model.compile(optimizer=Adam(lr=0.00005), loss='binary_crossentropy', metrics=['accuracy'])
return model
<categorify> | guess_ages = np.zeros(( 2,3))
guess_ages | Titanic - Machine Learning from Disaster |
9,725,565 | def prediction_convert_highest(predictions, thresholds):
thresholded = np.zeros(predictions.shape)
for i in range(NUM_CLASSES):
thresholded[:,i] = predictions[:,i] > thresholds[i]
y_val = np.zeros(( predictions.shape[0]), dtype=np.int)
for i in range(predictions.shape[0]):
for j in range(4, -1, -1):
if thresholded[i][j]:
y_val[i] = j
break
return y_val
def make_predictions(d_set, models):
images_dir = f"{INPUT_FOLDER}{d_set}_images/"
df = pd.read_csv(f"{INPUT_FOLDER}{d_set}.csv")
df.id_code = df.id_code.apply(lambda x: x + ".png")
block_size = 512
total = df.index.size
jitter_amounts = [0, 0.02, 0.02, 0.02, 0.03, 0.03, 0.03, 0.1]
ensemble_predictions = np.zeros(( df.index.size, len(jitter_amounts)*len(models), NUM_CLASSES))
for m, model in enumerate(models):
print(f"Making predictions with the {model} model on the {d_set} dataset.")
neural_net = load_network(model)
for start in range(0, total, block_size):
end = start + block_size
if end > total:
end = total
img_block = np.empty(( end-start, IMG_DIM, IMG_DIM, CHANNELS))
for i, filename in enumerate(df[start:end].id_code):
try:
bgr = cv2.imread(images_dir + filename)
img_block[i,:,:,:] = process(bgr, model)
except:
print("Error opening or manipulating image")
img_block[i,:,:,:] = 128.
for i, jit in enumerate(jitter_amounts):
datagen = dataGenerator(jit ).flow(img_block, shuffle=False)
ensemble_predictions[start:end, i + len(models)*m] = neural_net.predict_generator(generator=datagen,
steps=len(datagen), workers=4, verbose=1)
print(f"{start} - {end} finished")
gc.collect()
return np.median(ensemble_predictions, axis=1 )<load_from_csv> | for dataset in combine:
for i in range(0, 2):
for j in range(0, 3):
guess_df = dataset[(dataset['Sex'] == i)& \
(dataset['Pclass'] == j+1)]['Age'].dropna()
age_guess = guess_df.median()
guess_ages[i,j] = int(age_guess/0.5 + 0.5)* 0.5
for i in range(0, 2):
for j in range(0, 3):
dataset.loc[(dataset.Age.isnull())&(dataset.Sex == i)&(dataset.Pclass == j+1),\
'Age'] = guess_ages[i,j]
dataset['Age'] = dataset['Age'].astype(int)
train_df.head() | Titanic - Machine Learning from Disaster |
9,725,565 |
<data_type_conversions> | train_df['AgeBand'] = pd.cut(train_df['Age'], 5)
train_df[['AgeBand', 'Survived']].groupby(['AgeBand'], as_index=False ).mean().sort_values(by='AgeBand', ascending=True ) | Titanic - Machine Learning from Disaster |
9,725,565 |
<save_to_csv> | for dataset in combine:
dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0
dataset.loc[(dataset['Age'] > 16)&(dataset['Age'] <= 32), 'Age'] = 1
dataset.loc[(dataset['Age'] > 32)&(dataset['Age'] <= 48), 'Age'] = 2
dataset.loc[(dataset['Age'] > 48)&(dataset['Age'] <= 64), 'Age'] = 3
dataset.loc[ dataset['Age'] > 64, 'Age']
train_df.head() | Titanic - Machine Learning from Disaster |
9,725,565 | thresholds = [0.5, 0.5, 0.4, 0.4, 0.3]
predictions = make_predictions("test", ["normal", "weird"])
as_classes = prediction_convert_highest(predictions, thresholds)
print(as_classes[:10])
test_df = pd.read_csv(INPUT_FOLDER + 'test.csv')
test_df['diagnosis'] = as_classes
test_df.to_csv('submission.csv', index=False )<set_options> | train_df = train_df.drop(['AgeBand'], axis=1)
combine = [train_df, test_df]
train_df.head() | Titanic - Machine Learning from Disaster |
9,725,565 |
!pip install.. /input/efficientnet/efficientnet-master/efficientnet-master
set_random_seed(2)
np.random.seed(0)
<define_variables> | for dataset in combine:
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
train_df[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
9,725,565 |
IMG_SIZE = 300
BATCH_SIZE = 16<choose_model_class> | for dataset in combine:
dataset['IsAlone'] = 0
dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1
train_df[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=False ).mean() | Titanic - Machine Learning from Disaster |
9,725,565 |
def output_relu(x):
return K.relu(x, max_value=4)
base_model = EfficientNetB3(weights=None, include_top=False, input_shape=(IMG_SIZE,IMG_SIZE,3))
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dropout(0.4 )(x)
x = Dense(1, activation=output_relu, kernel_initializer='he_normal' )(x)
model = Model(inputs=base_model.input, outputs=x )<load_from_csv> | train_df = train_df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1)
test_df = test_df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1)
combine = [train_df, test_df]
train_df.head() | Titanic - Machine Learning from Disaster |
9,725,565 |
train_csv = pd.read_csv('.. /input/aptos2019-blindness-detection/train.csv')
train_id_codes = train_csv['id_code']
train_labels = train_csv['diagnosis']
for i in range(len(train_id_codes)) :
train_id_codes[i] = '.. /input/aptos2019-blindness-detection/train_images/{}.png'.format(train_id_codes[i])
test_csv = pd.read_csv('.. /input/aptos2019-blindness-detection/sample_submission.csv')
test_id_codes = test_csv['id_code']
test_pseudo_labels = np.empty(len(test_id_codes), dtype='float32')
for i in range(len(test_id_codes)) :
test_id_codes[i] = '.. /input/aptos2019-blindness-detection/test_images/{}.png'.format(test_id_codes[i])
img = cv2.imread(test_id_codes[i])
img = load_ben_color(img)
X = np.array([img])
pred = model.predict(X)
test_pseudo_labels[i] = pred<data_type_conversions> | for dataset in combine:
dataset['Age*Class'] = dataset.Age * dataset.Pclass
train_df.loc[:, ['Age*Class', 'Age', 'Pclass']].head(10 ) | Titanic - Machine Learning from Disaster |
9,725,565 |
d = {}
d['id_code'] = np.concatenate(( train_id_codes, test_id_codes), axis=0)
d['diagnosis'] = np.concatenate(( train_labels, test_pseudo_labels), axis=0 ).astype('str')
df = pd.DataFrame(data=d )<data_type_conversions> | freq_port = train_df.Embarked.dropna().mode() [0]
freq_port | Titanic - Machine Learning from Disaster |
9,725,565 |
fig, ax = plt.subplots(nrows=1, ncols=4, figsize=(20,4))
it = 0
for x, y in pseudo_datagen:
ax[it].imshow(( x[0]*255.).astype('uint8'))
ax[it].axis('off')
it += 1
if it == 4:
break<save_to_csv> | for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].fillna(freq_port)
train_df[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False ).mean().sort_values(by='Survived', ascending=False ) | Titanic - Machine Learning from Disaster |
9,725,565 |
prediction = predict(test_prediction ).astype('uint8')
test_csv['diagnosis'] = prediction
test_csv.to_csv("submission.csv", index=False)
unique, counts = np.unique(prediction, return_counts=True)
tmp = dict(zip(unique, counts))
print(tmp)
print('Done!' )<set_options> | for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].map({'S': 0, 'C': 1, 'Q': 2} ).astype(int)
train_df.head() | Titanic - Machine Learning from Disaster |
9,725,565 | %reload_ext autoreload
%autoreload 2
%matplotlib inline
%matplotlib inline
<load_pretrained> | test_df['Fare'].fillna(test_df['Fare'].dropna().median() , inplace=True)
test_df.head() | Titanic - Machine Learning from Disaster |
9,725,565 | md_ef = EfficientNet.from_pretrained('efficientnet-b5', num_classes=1 )<load_from_csv> | train_df['FareBand'] = pd.qcut(train_df['Fare'], 4)
train_df[['FareBand', 'Survived']].groupby(['FareBand'], as_index=False ).mean().sort_values(by='FareBand', ascending=True ) | Titanic - Machine Learning from Disaster |
9,725,565 | def get_df() :
base_image_dir = os.path.join('.. ', 'input/aptos2019-blindness-detection/')
train_dir = os.path.join(base_image_dir,'train_images/')
df = pd.read_csv(os.path.join(base_image_dir, 'train.csv'))
df['path'] = df['id_code'].map(lambda x: os.path.join(train_dir,'{}.png'.format(x)))
df = df.drop(columns=['id_code'])
df = df.sample(frac=1 ).reset_index(drop=True)
test_df = pd.read_csv('.. /input/aptos2019-blindness-detection/sample_submission.csv')
return df, test_df
df, test_df = get_df()<feature_engineering> | for dataset in combine:
dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0
dataset.loc[(dataset['Fare'] > 7.91)&(dataset['Fare'] <= 14.454), 'Fare'] = 1
dataset.loc[(dataset['Fare'] > 14.454)&(dataset['Fare'] <= 31), 'Fare'] = 2
dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3
dataset['Fare'] = dataset['Fare'].astype(int)
train_df = train_df.drop(['FareBand'], axis=1)
combine = [train_df, test_df]
train_df.head(10 ) | Titanic - Machine Learning from Disaster |
9,725,565 | bs = 128
sz = 256
tfms = get_transforms(do_flip=True, flip_vert=True )<compute_test_metric> | X_train = train_df.drop("Survived", axis=1)
Y_train = train_df["Survived"]
X_test = test_df.drop("PassengerId", axis=1 ).copy()
X_train.shape, Y_train.shape, X_test.shape | Titanic - Machine Learning from Disaster |
9,725,565 | def qk(y_pred, y):
return torch.tensor(cohen_kappa_score(torch.round(y_pred), y, weights='quadratic'), device='cuda:0' )<load_pretrained> | logreg = LogisticRegression()
logreg.fit(X_train, Y_train)
Y_pred = logreg.predict(X_test)
acc_log = round(logreg.score(X_train, Y_train)* 100, 2)
acc_log | Titanic - Machine Learning from Disaster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.