kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
22,184,906
learn = cnn_learner(data, MODEL, metrics=accuracy, model_dir=PATH )<train_model>
test.Cabin.isnull().sum()
Titanic - Machine Learning from Disaster
22,184,906
learn.fit_one_cycle(5, slice(5e-3))<compute_test_metric>
def detect_outliers(df, n, features): outlier_indices = [] for col in features: Q1 = np.percentile(df[col], 25) Q3 = np.percentile(df[col], 75) IQR = Q3 - Q1 outlier_step = 1.5 * IQR outlier_list_col = df[(df[col] < Q1 - outlier_step)|(df[col] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list(key for key, value in outlier_indices.items() if value > n) return multiple_outliers
Titanic - Machine Learning from Disaster
22,184,906
accuracy(*learn.get_preds() )<save_model>
outliers_to_drop = detect_outliers(train, 2, ['Age', 'SibSp', 'Parch', 'Fare']) print("The {} indices for the outliers to drop are: ".format(len(outliers_to_drop)) , outliers_to_drop )
Titanic - Machine Learning from Disaster
22,184,906
learn.save('stage-1-resnet50' )<train_model>
train.loc[outliers_to_drop, :]
Titanic - Machine Learning from Disaster
22,184,906
learn.fit_one_cycle(5, max_lr=slice(1e-7,1e-5),) learn.recorder.plot() learn.recorder.plot_losses() learn.recorder.plot_metrics()<compute_test_metric>
print("Before: {} rows".format(len(train))) train = train.drop(outliers_to_drop, axis = 0 ).reset_index(drop = True) print("After: {} rows".format(len(train)) )
Titanic - Machine Learning from Disaster
22,184,906
accuracy(*learn.get_preds() )<save_model>
outliers_to_drop_to_test = detect_outliers(test, 2, ['Age', 'SibSp', 'Parch', 'Fare'] )
Titanic - Machine Learning from Disaster
22,184,906
learn.save('stage-2-resnet50', return_path=True )<choose_model_class>
test.loc[outliers_to_drop_to_test, :]
Titanic - Machine Learning from Disaster
22,184,906
learn = cnn_learner(data, MODEL, metrics=accuracy, model_dir=PATH) learn.unfreeze()<train_model>
train['SibSp'].value_counts(dropna = False )
Titanic - Machine Learning from Disaster
22,184,906
learn.fit_one_cycle(30, max_lr=1e-4) learn.recorder.plot() learn.recorder.plot_losses() learn.recorder.plot_metrics()<compute_test_metric>
train[['SibSp', 'Survived']].groupby('SibSp', as_index = False ).mean().sort_values(by = 'Survived', ascending = False )
Titanic - Machine Learning from Disaster
22,184,906
accuracy(*learn.get_preds() )<save_model>
train['Parch'].value_counts(dropna = False )
Titanic - Machine Learning from Disaster
22,184,906
learn.freeze() learn.save('stage-3-resnet50', return_path=True )<load_from_csv>
train[['Parch', 'Survived']].groupby('Parch', as_index = False ).mean().sort_values(by = 'Survived', ascending = False )
Titanic - Machine Learning from Disaster
22,184,906
submission = pd.read_csv('.. /input/sample.csv') submission.shape<prepare_x_and_y>
train['Age'].isnull().sum()
Titanic - Machine Learning from Disaster
22,184,906
data.train_ds.x<prepare_x_and_y>
train['Fare'].isnull().sum()
Titanic - Machine Learning from Disaster
22,184,906
data.test_ds.x<predict_on_test>
train['Pclass'].value_counts(dropna = False )
Titanic - Machine Learning from Disaster
22,184,906
preds_test, _ = learn.get_preds(DatasetType.Test )<save_to_csv>
train[['Pclass', 'Survived']].groupby('Pclass', as_index = False ).mean().sort_values(by = 'Survived', ascending = False )
Titanic - Machine Learning from Disaster
22,184,906
preds_test_s = pd.DataFrame(preds_test) preds_test_s.to_csv('preds_sub.csv', index=False) preds_test_s.shape<prepare_output>
train['Sex'].value_counts(dropna = False )
Titanic - Machine Learning from Disaster
22,184,906
preds_test = np.argmax(preds_test, axis=1) preds_test = preds_test.numpy()<create_dataframe>
train[['Sex', 'Survived']].groupby('Sex', as_index = False ).mean().sort_values(by = 'Survived', ascending = False )
Titanic - Machine Learning from Disaster
22,184,906
fnames = [f.name for f in learn.data.test_ds.items] submission = pd.DataFrame({'Id':fnames, 'Category':preds_test}, columns=['Id', 'Category'] )<save_to_csv>
train['Embarked'].value_counts(dropna = False )
Titanic - Machine Learning from Disaster
22,184,906
submission.to_csv('submission_v23.csv', index=False )<feature_engineering>
train[['Embarked', 'Survived']].groupby(['Embarked'], as_index = False ).mean().sort_values(by = 'Survived', ascending = False )
Titanic - Machine Learning from Disaster
22,184,906
preds_test_tta, _ = learn.TTA(ds_type=DatasetType.Test) <save_to_csv>
train = train.drop(['Ticket'], axis = 1) test = test.drop(['Ticket'], axis = 1 )
Titanic - Machine Learning from Disaster
22,184,906
preds_test_tta_s = pd.DataFrame(preds_test_tta) preds_test_tta_s.to_csv('preds_tta_sub.csv', index=False) preds_test_tta_s.shape<prepare_output>
train.isnull().sum().sort_values(ascending = False )
Titanic - Machine Learning from Disaster
22,184,906
preds_test_tta = np.argmax(preds_test_tta, axis=1) preds_test_tta = preds_test_tta.numpy()<create_dataframe>
mode = train['Embarked'].dropna().mode() [0] mode
Titanic - Machine Learning from Disaster
22,184,906
fnames = [f.name for f in learn.data.test_ds.items] submission = pd.DataFrame({'Id':fnames, 'Category':preds_test_tta}, columns=['Id', 'Category'] )<save_to_csv>
train['Embarked'].fillna(mode, inplace = True )
Titanic - Machine Learning from Disaster
22,184,906
submission['Category'] = preds_test_tta submission.to_csv('tta_submission_v23.csv', index=False )<set_options>
test.isnull().sum().sort_values(ascending = False )
Titanic - Machine Learning from Disaster
22,184,906
shutil.rmtree(PATH )<install_modules>
test['Fare'].fillna(median, inplace = True )
Titanic - Machine Learning from Disaster
22,184,906
!pip show fastai<set_options>
combine = pd.concat([train, test], axis = 0 ).reset_index(drop = True) combine.head()
Titanic - Machine Learning from Disaster
22,184,906
%matplotlib inline <define_variables>
combine.isnull().sum().sort_values(ascending = False )
Titanic - Machine Learning from Disaster
22,184,906
MODEL=resnet50 BATCH = 64 SIZE = 320 DATA_PATH = '.. /input/' PATH = ".. /car-classification/"<compute_test_metric>
combine['Sex'] = combine['Sex'].map({'male': 0, 'female': 1} )
Titanic - Machine Learning from Disaster
22,184,906
def accuracy(preds, targs): preds = torch.max(preds, dim=1)[1] return(preds==targs ).float().mean()<load_from_csv>
age_nan_indices = list(combine[combine['Age'].isnull() ].index) len(age_nan_indices )
Titanic - Machine Learning from Disaster
22,184,906
train_df = pd.read_csv(".. /input/train.csv") test_df = pd.read_csv(".. /input/sample.csv") train_df.head()<count_values>
for index in age_nan_indices: median_age = combine['Age'].median() predict_age = combine['Age'][(combine['SibSp'] == combine.iloc[index]['SibSp']) &(combine['Parch'] == combine.iloc[index]['Parch']) &(combine['Pclass'] == combine.iloc[index]["Pclass"])].median() if np.isnan(predict_age): combine['Age'].iloc[index] = median_age else: combine['Age'].iloc[index] = predict_age
Titanic - Machine Learning from Disaster
22,184,906
train_df.Category.value_counts()<train_model>
combine['Age'].isnull().sum()
Titanic - Machine Learning from Disaster
22,184,906
image = Image.open(DATA_PATH+'/train/train/0/100380.jpg') imgplot = plt.imshow(image) plt.show() image.size<feature_engineering>
train['Fare'] = train['Fare'].map(lambda x: np.log(x)if x > 0 else 0 )
Titanic - Machine Learning from Disaster
22,184,906
tfms = get_transforms(max_zoom=1., max_warp=0.2, max_lighting=0.3, xtra_tfms=[cutout(n_holes=(1,20)) ] )<define_variables>
combine['Title'] = [name.split(',')[1].split('.')[0].strip() for name in combine['Name']] combine[['Name', 'Title']].head()
Titanic - Machine Learning from Disaster
22,184,906
data = ImageDataBunch.from_folder(DATA_PATH, train=PATH+'train/train/', test='test/test_upload/', ds_tfms=tfms, padding_mode='zeros', valid_pct=0.1, size=SIZE, classes=['0','1','2','3','4','5','6','7','8','9'], bs=BATCH, num_workers=0 ).normalize(imagenet_stats) data.path = pathlib.Path(PATH )<define_variables>
combine['Title'].value_counts()
Titanic - Machine Learning from Disaster
22,184,906
data.show_batch(rows=4, figsize=(12,9))<choose_model_class>
combine['Title'].nunique()
Titanic - Machine Learning from Disaster
22,184,906
learn = cnn_learner(data, MODEL, metrics=accuracy, model_dir=PATH )<train_model>
combine['Title'] = combine['Title'].replace(['Dr', 'Rev', 'Col', 'Major', 'Capt'], 'Officer') combine['Title'] = combine['Title'].replace(['Lady', 'Jonkheer', 'Don','the Countess','Sir', 'Dona'], 'Royalty') combine['Title'] = combine['Title'].replace(['Mlle', 'Miss'], 'Miss') combine['Title'] = combine['Title'].replace(['Mme','Mrs','Ms'], 'Mrs') combine['Title'] = combine['Title'].replace('Mr', 'Mr') combine['Title'] = combine['Title'].replace('Master', 'Master' )
Titanic - Machine Learning from Disaster
22,184,906
learn.fit_one_cycle(5, slice(5e-3))<compute_test_metric>
combine[['Title', 'Survived']].groupby(['Title'], as_index = False ).mean().sort_values(by = 'Survived', ascending = False )
Titanic - Machine Learning from Disaster
22,184,906
accuracy(*learn.get_preds() )<save_model>
combine = combine.drop('Name', axis = 1) combine.head()
Titanic - Machine Learning from Disaster
22,184,906
learn.save('stage-1-resnet50' )<train_model>
combine['Family_Size'] = combine['SibSp'] + combine['Parch'] + 1 combine[['SibSp', 'Parch', 'Family_Size']].head(10 )
Titanic - Machine Learning from Disaster
22,184,906
learn.fit_one_cycle(5, max_lr=slice(1e-7,1e-5),) learn.recorder.plot() learn.recorder.plot_losses() learn.recorder.plot_metrics()<compute_test_metric>
combine[['Family_Size', 'Survived']].groupby('Family_Size', as_index = False ).mean().sort_values(by = 'Survived', ascending = False )
Titanic - Machine Learning from Disaster
22,184,906
accuracy(*learn.get_preds() )<save_model>
combine['Alone'] = 0 combine.loc[combine['Family_Size'] == 1, 'Alone'] = 1
Titanic - Machine Learning from Disaster
22,184,906
learn.save('stage-2-resnet50', return_path=True )<choose_model_class>
combine[['Alone', 'Survived']].groupby('Alone', as_index = False ).mean().sort_values(by = 'Survived', ascending = False )
Titanic - Machine Learning from Disaster
22,184,906
learn = cnn_learner(data, MODEL, metrics=accuracy, model_dir=PATH) learn.unfreeze()<train_model>
combine = combine.drop(['SibSp', 'Parch', 'Family_Size'], axis = 1) combine.head()
Titanic - Machine Learning from Disaster
22,184,906
learn.fit_one_cycle(30, max_lr=1e-4) learn.recorder.plot() learn.recorder.plot_losses() learn.recorder.plot_metrics()<compute_test_metric>
combine['Minor'] = combine['Age'] <= 17 combine['Major'] = 1 - combine['Minor']
Titanic - Machine Learning from Disaster
22,184,906
accuracy(*learn.get_preds() )<save_model>
combine[['Major', 'Survived']].groupby('Major', as_index = False ).mean().sort_values(by = 'Survived', ascending = False )
Titanic - Machine Learning from Disaster
22,184,906
learn.freeze() learn.save('stage-3-resnet50', return_path=True )<load_from_csv>
combine.loc[(combine['Age'] <= 17), 'Major'] = 0 combine.loc[(combine['Age'] > 17), 'Major'] = 1
Titanic - Machine Learning from Disaster
22,184,906
submission = pd.read_csv('.. /input/sample.csv') submission.shape<prepare_x_and_y>
combine = combine.drop(['Age', 'Minor'], axis = 1) combine.head()
Titanic - Machine Learning from Disaster
22,184,906
data.train_ds.x<prepare_x_and_y>
combine = pd.get_dummies(combine, columns = ['Title']) combine = pd.get_dummies(combine, columns = ['Embarked'], prefix = 'Em') combine.head()
Titanic - Machine Learning from Disaster
22,184,906
data.test_ds.x<predict_on_test>
combine.loc[combine['Fare'] <= 1.56, 'Fare'] = 0 combine.loc[(combine['Fare'] > 1.56)&(combine['Fare'] <= 3.119), 'Fare'] = 1 combine.loc[(combine['Fare'] > 3.119)&(combine['Fare'] <= 4.679), 'Fare'] = 2 combine.loc[combine['Fare'] > 4.679, 'Fare'] = 3
Titanic - Machine Learning from Disaster
22,184,906
preds_test, _ = learn.get_preds(DatasetType.Test )<save_to_csv>
combine['Fare'] = combine['Fare'].astype('int' )
Titanic - Machine Learning from Disaster
22,184,906
preds_test_s = pd.DataFrame(preds_test) preds_test_s.to_csv('preds_sub.csv', index=False) preds_test_s.shape<prepare_output>
combine = combine.drop('Fare_Band', axis = 1 )
Titanic - Machine Learning from Disaster
22,184,906
preds_test = np.argmax(preds_test, axis=1) preds_test = preds_test.numpy()<create_dataframe>
train = combine[:len(train)] test = combine[len(train):]
Titanic - Machine Learning from Disaster
22,184,906
fnames = [f.name for f in learn.data.test_ds.items] submission = pd.DataFrame({'Id':fnames, 'Category':preds_test}, columns=['Id', 'Category'] )<save_to_csv>
train = train.drop('PassengerId', axis = 1) train.head()
Titanic - Machine Learning from Disaster
22,184,906
submission.to_csv('submission_v23.csv', index=False )<feature_engineering>
train['Survived'] = train['Survived'].astype('int') train.head()
Titanic - Machine Learning from Disaster
22,184,906
preds_test_tta, _ = learn.TTA(ds_type=DatasetType.Test) <save_to_csv>
test = test.drop('Survived', axis = 1) test.head()
Titanic - Machine Learning from Disaster
22,184,906
preds_test_tta_s = pd.DataFrame(preds_test_tta) preds_test_tta_s.to_csv('preds_tta_sub.csv', index=False) preds_test_tta_s.shape<prepare_output>
train['Cabin'] = train['Cabin'].astype('int' )
Titanic - Machine Learning from Disaster
22,184,906
preds_test_tta = np.argmax(preds_test_tta, axis=1) preds_test_tta = preds_test_tta.numpy()<create_dataframe>
test['Cabin'] = test['Cabin'].astype('int' )
Titanic - Machine Learning from Disaster
22,184,906
fnames = [f.name for f in learn.data.test_ds.items] submission = pd.DataFrame({'Id':fnames, 'Category':preds_test_tta}, columns=['Id', 'Category'] )<save_to_csv>
X_train = train.drop('Survived', axis = 1) Y_train = train['Survived'] X_test = test.drop('PassengerId', axis = 1 ).copy() print("X_train shape: ", X_train.shape) print("Y_train shape: ", Y_train.shape) print("X_test shape: ", X_test.shape )
Titanic - Machine Learning from Disaster
22,184,906
submission['Category'] = preds_test_tta submission.to_csv('tta_submission_v23.csv', index=False )<set_options>
logreg = LogisticRegression() logreg.fit(X_train, Y_train) Y_pred = logreg.predict(X_test) acc_log = round(logreg.score(X_train, Y_train)* 100, 2) acc_log
Titanic - Machine Learning from Disaster
22,184,906
shutil.rmtree(PATH )<set_options>
svc = SVC() svc.fit(X_train, Y_train) Y_pred = svc.predict(X_test) acc_svc = round(svc.score(X_train, Y_train)* 100, 2) acc_svc
Titanic - Machine Learning from Disaster
22,184,906
%matplotlib inline RANDOM_SEED = 42 r_np = np.random.seed(RANDOM_SEED) r_tf = tf.set_random_seed(RANDOM_SEED) np.random.seed(RANDOM_SEED) tf.set_random_seed(RANDOM_SEED )<load_from_csv>
knn = KNeighborsClassifier(n_neighbors = 5) knn.fit(X_train, Y_train) Y_pred = knn.predict(X_test) acc_knn = round(knn.score(X_train, Y_train)* 100, 2) acc_knn
Titanic - Machine Learning from Disaster
22,184,906
<prepare_x_and_y>
gaussian = GaussianNB() gaussian.fit(X_train, Y_train) Y_pred = gaussian.predict(X_test) acc_gaussian = round(gaussian.score(X_train, Y_train)* 100, 2) acc_gaussian
Titanic - Machine Learning from Disaster
22,184,906
') Time taken: %i hours %i minutes and %s seconds.' % <define_variables>
perceptron = Perceptron() perceptron.fit(X_train, Y_train) Y_pred = perceptron.predict(X_test) acc_perceptron = round(perceptron.score(X_train, Y_train)* 100, 2) acc_perceptron
Titanic - Machine Learning from Disaster
22,184,906
batch_size = 16 num_classes = 10 epochs = 100 patience = 15 data_augmentation = False num_predictions = 20 save_dir = os.path.join(os.getcwd() , 'saved_models') model_name = 'keras_cifar10_trained_model.h5' (x_train, y_train),(x_test, y_test)= cifar10.load_data() print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) model = Sequential() model.add(Conv2D(32,(2, 2), padding = 'valid', input_shape = x_train.shape[1:])) model.add(Activation('relu')) model.add(Conv2D(32,(2, 2))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size =(2, 2))) model.add(Dropout(0.15)) model.add(Conv2D(64,(2, 2), padding = 'same')) model.add(Activation('relu')) model.add(Conv2D(64,(2, 2))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size =(2, 2))) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(num_classes)) model.add(Activation('softmax')) model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy']) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 callbacks = [ReduceLROnPlateau(monitor = 'val_acc', factor = 0.15, patience = 7, verbose = 2, mode = 'auto'), ReduceLROnPlateau(monitor = 'val_acc', patience = 3, verbose = 2, factor = 0.5, min_lr = 0.00001), EarlyStopping(monitor = 'val_acc', patience = patience, mode = 'max', verbose = 0), ModelCheckpoint('best_model.h5', monitor = 'val_acc', mode = 'max', save_best_only = True, verbose = 0)] if not data_augmentation: print('Not using data augmentation.') model.fit(x_train, y_train, batch_size = batch_size, epochs = epochs, validation_data =(x_test, y_test), shuffle = True, callbacks = callbacks, verbose = 2) else: print('Using real-time data augmentation.') datagen = ImageDataGenerator( featurewise_center = False, samplewise_center = False, featurewise_std_normalization = True, samplewise_std_normalization = False, zca_whitening = False, zca_epsilon = 1e-06, rotation_range = 0, width_shift_range = 0.1, height_shift_range = 0.1, shear_range = 0.15, zoom_range = 0.2, channel_shift_range = 0., fill_mode = 'nearest', cval = 0., horizontal_flip = True, vertical_flip = True, rescale = 0.2, preprocessing_function = None, data_format = None, validation_split = 0.0) datagen.fit(x_train) model = model.fit_generator(datagen.flow(x_train, y_train, batch_size = batch_size), steps_per_epoch = x_train.shape[0] * 2, epochs = epochs, validation_data =(x_test, y_test), workers = -1, verbose = 1, callbacks = callbacks) del model model = load_model('best_model.h5') if not os.path.isdir(save_dir): os.makedirs(save_dir) model_path = os.path.join(save_dir, model_name) model.save(model_path) print('Saved trained model at %s ' % model_path) scores = model.evaluate(x_test, y_test, verbose = 0) print('Test loss:', scores[0]) print('Test accuracy:', scores[1] )<save_to_csv>
linear_svc = LinearSVC() linear_svc.fit(X_train, Y_train) Y_pred = linear_svc.predict(X_test) acc_linear_svc = round(linear_svc.score(X_train, Y_train)* 100, 2) acc_linear_svc
Titanic - Machine Learning from Disaster
22,184,906
res = pd.DataFrame({'loss': scores[0], 'score': scores[1]}, index = list('0')) res.to_csv('result.csv') res.head()<train_on_grid>
sgd = SGDClassifier() sgd.fit(X_train, Y_train) Y_pred = sgd.predict(X_test) acc_sgd = round(sgd.score(X_train, Y_train)* 100, 2) acc_sgd
Titanic - Machine Learning from Disaster
22,184,906
Fold %d - Run %d ' %(( i + 1),(run + 1))) <compute_train_metric>
decision_tree = DecisionTreeClassifier() decision_tree.fit(X_train, Y_train) Y_pred = decision_tree.predict(X_test) acc_decision_tree = round(decision_tree.score(X_train, Y_train)* 100, 2) acc_decision_tree
Titanic - Machine Learning from Disaster
22,184,906
Fold %d Run %d Log-loss: %.5f' %(( i + 1),(run + 1), LL_run)) Fold %d Log-loss: %.5f' %(( i + 1), LL)) <compute_test_metric>
random_forest = RandomForestClassifier(n_estimators = 100) random_forest.fit(X_train, Y_train) Y_pred = random_forest.predict(X_test) acc_random_forest = round(random_forest.score(X_train, Y_train)* 100, 2) acc_random_forest
Titanic - Machine Learning from Disaster
22,184,906
Average Log-loss: %.5f' %(cv_LL/folds)) Average AUC: %.5f' %(cv_AUC/folds)) Average normalized gini: %.5f' %(cv_gini/folds)) <compute_test_metric>
catboost = CatBoostClassifier() catboost.fit(X_train, Y_train) Y_pred = catboost.predict(X_test) acc_catboost = round(catboost.score(X_train, Y_train)* 100, 2 )
Titanic - Machine Learning from Disaster
22,184,906
class SigmoidNeuron: def __init__(self): self.w = None self.b = None def perceptron(self, x): return np.dot(x, self.w.T)+ self.b def sigmoid(self, x): return 1.0/(1.0 + np.exp(-x)) def grad_w_mse(self, x, y): y_pred = self.sigmoid(self.perceptron(x)) return(y_pred - y)* y_pred *(1 - y_pred)* x def grad_b_mse(self, x, y): y_pred = self.sigmoid(self.perceptron(x)) return(y_pred - y)* y_pred *(1 - y_pred) def grad_w_ce(self, x, y): y_pred = self.sigmoid(self.perceptron(x)) if y == 0: return y_pred * x elif y == 1: return -1 *(1 - y_pred)* x else: raise ValueError("y should be 0 or 1") def grad_b_ce(self, x, y): y_pred = self.sigmoid(self.perceptron(x)) if y == 0: return y_pred elif y == 1: return -1 *(1 - y_pred) else: raise ValueError("y should be 0 or 1") def fit(self, X, Y, epochs=1, learning_rate=1, initialise=True, loss_fn="mse", display_loss=False): if initialise: self.w = np.random.randn(1, X.shape[1]) self.b = 0 if display_loss: loss = {} for i in tqdm_notebook(range(epochs), total=epochs, unit="epoch"): dw = 0 db = 0 for x, y in zip(X, Y): if loss_fn == "mse": dw += self.grad_w_mse(x, y) db += self.grad_b_mse(x, y) elif loss_fn == "ce": dw += self.grad_w_ce(x, y) db += self.grad_b_ce(x, y) self.w -= learning_rate * dw self.b -= learning_rate * db if display_loss: Y_pred = self.sigmoid(self.perceptron(X)) if loss_fn == "mse": loss[i] = mean_squared_error(Y, Y_pred) elif loss_fn == "ce": loss[i] = log_loss(Y, Y_pred) if display_loss: plt.plot(loss.values()) plt.xlabel('Epochs') if loss_fn == "mse": plt.ylabel('Mean Squared Error') elif loss_fn == "ce": plt.ylabel('Log Loss') plt.show() def predict(self, X): Y_pred = [] for x in X: y_pred = self.sigmoid(self.perceptron(x)) Y_pred.append(y_pred) return np.array(Y_pred )<load_pretrained>
models = pd.DataFrame({'Model': ['Support Vector Machines', 'KNN', 'Logistic Regression', 'Random Forest', 'Naive Bayes', 'Perceptron', 'Stochastic Gradient Decent', 'Linear SVC', 'Decision Tree', 'CatBoost'], 'Score': [acc_svc, acc_knn, acc_log, acc_random_forest, acc_gaussian, acc_perceptron, acc_sgd, acc_linear_svc, acc_decision_tree, acc_catboost]}) models.sort_values(by = 'Score', ascending = False, ignore_index = True )
Titanic - Machine Learning from Disaster
22,184,906
languages = ['ta', 'hi', 'en'] images_train = read_all(".. /input/level_4a_train/"+LEVEL+"/"+"background", key_prefix='bgr_') for language in languages: images_train.update(read_all(".. /input/level_4a_train/"+LEVEL+"/"+language, key_prefix=language+"_")) print(len(images_train)) images_test = read_all(".. /input/level_4a_test/kaggle_"+LEVEL, key_prefix='') print(len(images_test))<normalization>
classifiers = [] classifiers.append(LogisticRegression()) classifiers.append(SVC()) classifiers.append(KNeighborsClassifier(n_neighbors = 5)) classifiers.append(GaussianNB()) classifiers.append(Perceptron()) classifiers.append(LinearSVC()) classifiers.append(SGDClassifier()) classifiers.append(DecisionTreeClassifier()) classifiers.append(RandomForestClassifier()) classifiers.append(CatBoostClassifier()) len(classifiers )
Titanic - Machine Learning from Disaster
22,184,906
scaler = StandardScaler() X_scaled_train = scaler.fit_transform(X_train) X_scaled_test = scaler.transform(X_test )<compute_test_metric>
cv_results = [] for classifier in classifiers: cv_results.append(cross_val_score(classifier, X_train, Y_train, scoring = 'accuracy', cv = 10))
Titanic - Machine Learning from Disaster
22,184,906
class SigmoidNeuronMy: def __init__(self): self.w = None self.b = None def perceptron(self, x): return np.dot(x, self.w.T)+ self.b def sigmoid(self, x): return 1.0/(1.0 + np.exp(-x)) def grad_w_mse(self, x, y): y_pred = self.sigmoid(self.perceptron(x)) return(y_pred - y)* y_pred *(1 - y_pred)* x def grad_b_mse(self, x, y): y_pred = self.sigmoid(self.perceptron(x)) return(y_pred - y)* y_pred *(1 - y_pred) def grad_w_ce(self, x, y): y_pred = self.sigmoid(self.perceptron(x)) if y == 0: return y_pred * x elif y == 1: return -1 *(1 - y_pred)* x else: raise ValueError("y should be 0 or 1") def grad_b_ce(self, x, y): y_pred = self.sigmoid(self.perceptron(x)) if y == 0: return y_pred elif y == 1: return -1 *(1 - y_pred) else: raise ValueError("y should be 0 or 1") def fit(self, X, Y, epochs=1, learning_rate=1, initialise=True, loss_fn="mse", display_loss=False, setting = False): if initialise: self.w = np.random.randn(1, X.shape[1]) self.b = 0 loss = {} if display_loss: loss = {} for i in tqdm_notebook(range(epochs), total=epochs, unit="epoch"): dw = 0 db = 0 for x, y in zip(X, Y): if loss_fn == "mse": dw += self.grad_w_mse(x, y) db += self.grad_b_mse(x, y) elif loss_fn == "ce": dw += self.grad_w_ce(x, y) db += self.grad_b_ce(x, y) self.w -= learning_rate * dw self.b -= learning_rate * db if display_loss: Y_pred = self.sigmoid(self.perceptron(X)) if loss_fn == "mse": loss[i] = mean_squared_error(Y, Y_pred) elif loss_fn == "ce": loss[i] = log_loss(Y, Y_pred) if setting: Y_pred = self.sigmoid(self.perceptron(X)) if loss_fn == "mse": loss[i] = mean_squared_error(Y, Y_pred) elif loss_fn == "ce": loss[i] = log_loss(Y, Y_pred) if setting: return list(loss.values()) if display_loss: plt.plot(loss.values()) plt.xlabel('Epochs') if loss_fn == "mse": plt.ylabel('Mean Squared Error') elif loss_fn == "ce": plt.ylabel('Log Loss') plt.show() def optimumValue(self , X , Y, loss_fn="mse"): lr = np.linspace(0, 0.05, 6) epochs = np.linspace(0, 100, 11) acc = {} min_loss = 1000 lr_fin = 0 epoch_fin = 0 std = 0 fin = [] for j in tqdm_notebook(range(len(lr)) , total=len(lr), unit="lr"): for i in tqdm_notebook(range(len(epochs)) , total=len(epochs), unit="epoch"): for k in range(4): X1_train, X1_test, y1_train, y1_test = train_test_split(X, Y, test_size=0.2, random_state=k**2) self.fit(X1_train, y1_train, epochs=epochs[i].astype("int")+1, learning_rate=lr[j], loss_fn="ce", display_loss=False) Y_pred_train = self.predict(X1_test) Y_pred_binarised_train =(Y_pred_train >= 0.5 ).astype("int" ).ravel() acc[k] = accuracy_score(Y_pred_binarised_train, y1_test) std=statistics.stdev(list(acc.values())) accuracy = max(list(acc.values())) fin.append([accuracy,std,epochs[i].astype("int")+1,lr[j]]) print(fin) def predict(self, X): Y_pred = [] for x in X: y_pred = self.sigmoid(self.perceptron(x)) Y_pred.append(y_pred) return np.array(Y_pred) <train_model>
cv_res = pd.DataFrame({'Cross Validation Mean': cv_mean, 'Cross Validation Std': cv_std, 'Algorithm': ['Logistic Regression', 'Support Vector Machines', 'KNN', 'Gausian Naive Bayes', 'Perceptron', 'Linear SVC', 'Stochastic Gradient Descent', 'Decision Tree', 'Random Forest', 'CatBoost']}) cv_res.sort_values(by = 'Cross Validation Mean', ascending = False, ignore_index = True )
Titanic - Machine Learning from Disaster
22,184,906
<train_model>
param_grid = {'learning_rate': [0.03, 0.1], 'depth': [4, 6, 10], 'l2_leaf_reg': [1, 3, 5, 7, 9]} grid = GridSearchCV(CatBoostClassifier() , param_grid = param_grid, cv = 3, refit=True, verbose = True) grid.fit(X_train, Y_train )
Titanic - Machine Learning from Disaster
22,184,906
sn_ce = SigmoidNeuronMy() sn_ce.fit(X_scaled_train, Y_train, epochs=800, learning_rate=0.00005, loss_fn="ce", display_loss=True )<predict_on_test>
print("Best parameters: ", grid.best_params_) print("Best estimator: ", grid.best_estimator_ )
Titanic - Machine Learning from Disaster
22,184,906
def print_accuracy(sn): Y_pred_train = sn.predict(X_scaled_train) Y_pred_binarised_train =(Y_pred_train >= 0.5 ).astype("int" ).ravel() accuracy_train = accuracy_score(Y_pred_binarised_train, Y_train) print("Train Accuracy : ", accuracy_train) print("-"*50 )<compute_test_metric>
catboost = CatBoostClassifier(depth=10, l2_leaf_reg=5, learning_rate=0.1) catboost.fit(X_train, Y_train) Y_pred = catboost.predict(X_test) acc_catboost = round(catboost.score(X_train, Y_train)* 100, 2 )
Titanic - Machine Learning from Disaster
22,184,906
print_accuracy(sn_ce )<save_to_csv>
cross_val_score(catboost, X_train, Y_train, scoring = 'accuracy', cv = 10 ).mean()
Titanic - Machine Learning from Disaster
22,184,906
Y_pred_test = sn_ce.predict(X_scaled_test) Y_pred_binarised_test =(Y_pred_test >= 0.5 ).astype("int" ).ravel() submission = {} submission['ImageId'] = ID_test submission['Class'] = Y_pred_binarised_test submission = pd.DataFrame(submission) submission = submission[['ImageId', 'Class']] submission = submission.sort_values(['ImageId']) submission.to_csv("submisision.csv", index=False )<set_options>
output = pd.DataFrame({'PassengerId': test['PassengerId'], 'Survived': Y_pred} )
Titanic - Machine Learning from Disaster
22,184,906
np.random.seed(100) LEVEL = 'level_4a' warnings.filterwarnings("ignore" )<compute_test_metric>
output.to_csv("submission_titanic.csv", index = False )
Titanic - Machine Learning from Disaster
22,184,906
<import_modules><EOS>
print("Your submission was successfully saved!" )
Titanic - Machine Learning from Disaster
21,908,637
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<load_pretrained>
pd.set_option('max_columns', 90) PALETTE = [' BACKCOLOR = ' sns.set_palette(PALETTE) warnings.filterwarnings('ignore') mpl.rcParams['figure.dpi'] = 120 mpl.rcParams['axes.spines.top'] = False mpl.rcParams['axes.spines.right'] = False
Titanic - Machine Learning from Disaster
21,908,637
languages = ['ta', 'hi', 'en'] images_train = read_all(".. /input/level_4a_train/"+LEVEL+"/"+"background", key_prefix='bgr_') for language in languages: images_train.update(read_all(".. /input/level_4a_train/"+LEVEL+"/"+language, key_prefix=language+"_")) print(len(images_train)) images_test = read_all(".. /input/level_4a_test/kaggle_"+LEVEL, key_prefix='') print(len(images_test))<normalization>
train = pd.read_csv('.. /input/titanic/train.csv') test = pd.read_csv('.. /input/titanic/test.csv') all_data = pd.concat(( train, test)).reset_index(drop=True )
Titanic - Machine Learning from Disaster
21,908,637
scaler = StandardScaler() X_scaled_train = scaler.fit_transform(X_train) X_scaled_test = scaler.transform(X_test )<choose_model_class>
def multi_table(table_list): return HTML( f"<table><tr> {''.join(['<td>' + table._repr_html_() + '</td>' for table in table_list])} </tr></table>" )
Titanic - Machine Learning from Disaster
21,908,637
kfold = KFold(5, True, 10) <find_best_model_class>
multi_table([pd.DataFrame(all_data[i].value_counts())for i in all_data.columns] )
Titanic - Machine Learning from Disaster
21,908,637
sn_ce = SigmoidNeuron() for train, test in kfold.split(X_scaled_train,Y_train): sn_ce.fit(X_scaled_train[train], Y_train[train], epochs=500, learning_rate=0.00002, loss_fn="ce", display_loss=True,initialise=False) Y_pred_train = sn_ce.predict(X_scaled_train[test]) Y_pred_binarised_train =(Y_pred_train >= 0.5 ).astype("int" ).ravel() accuracy_train = accuracy_score(Y_pred_binarised_train, Y_train[test]) print("Train Accuracy : ", accuracy_train) print("-"*50) <predict_on_test>
numerical_vars = ['Age', 'SibSp', 'Parch', 'Fare'] ordinal_vars = ['Pclass'] nominal_vars = ['Survived', 'Name', 'Sex', 'Ticket', 'Cabin', 'Embarked']
Titanic - Machine Learning from Disaster
21,908,637
def print_accuracy(sn): Y_pred_train = sn.predict(X_scaled_train) Y_pred_binarised_train =(Y_pred_train >= 0.5 ).astype("int" ).ravel() accuracy_train = accuracy_score(Y_pred_binarised_train, Y_train) print("Train Accuracy : ", accuracy_train) print("-"*50 )<compute_test_metric>
train0 = train[train.Survived == 0] train1 = train[train.Survived == 1] cnt = 0 detail_desc = [] for c in train.columns: if c == 'PassengerId': continue if train[c].dtypes != 'object': desc = pd.DataFrame(columns=['feature', 'data', 'type', 'count', 'mean', 'median', 'std', 'min', 'max', 'skew', 'null']) desc.loc[0] = [c, 'Train', train[c].dtype.name, train[c].count() , train[c].mean() , train[c].median() , train[c].std() , train[c].min() , train[c].max() , train[c].skew() , train[c].isnull().sum() ] desc.loc[1] = [c, 'All', train[c].dtype.name, all_data[c].count() , all_data[c].mean() , all_data[c].median() , all_data[c].std() , all_data[c].min() , all_data[c].max() , all_data[c].skew() , all_data[c].isnull().sum() ] desc.loc[2] = [c, 'Target=0', train0[c].dtype.name, train0[c].count() , train0[c].mean() , train0[c].median() , train0[c].std() , train0[c].min() , train0[c].max() , train0[c].skew() , train0[c].isnull().sum() ] desc.loc[3] = [c, 'Target=1', train1[c].dtype.name, train1[c].count() , train1[c].mean() , train1[c].median() , train1[c].std() , train1[c].min() , train1[c].max() , train1[c].skew() , train1[c].isnull().sum() ] desc = desc.set_index(['feature', 'data'],drop=True) detail_desc.append(desc.style.background_gradient() )
Titanic - Machine Learning from Disaster
21,908,637
print_accuracy(sn_ce )<save_to_csv>
train0 = train[train.Survived == 0] train1 = train[train.Survived == 1] cnt = 0 detail_desc = [] for c in train.columns: if c == 'PassengerId': continue if train[c].dtypes == 'object': desc = pd.DataFrame(columns=['feature', 'data', 'type', 'count', 'null', 'mode', 'value_count']) desc.loc[0] = [c, 'Train', train[c].dtype.name, train[c].count() , train[c].isnull().sum() , train[c].mode() , train[c].value_counts() ] desc = desc.set_index(['feature', 'data'],drop=True) detail_desc.append(desc.style.background_gradient() )
Titanic - Machine Learning from Disaster
21,908,637
Y_pred_test = sn_ce.predict(X_scaled_test) Y_pred_binarised_test =(Y_pred_test >= 0.5 ).astype("int" ).ravel() submission = {} submission['ImageId'] = ID_test submission['Class'] = Y_pred_binarised_test submission = pd.DataFrame(submission) submission = submission[['ImageId', 'Class']] submission = submission.sort_values(['ImageId']) submission.to_csv("submisision.csv", index=False )<define_variables>
multi_table(detail_desc )
Titanic - Machine Learning from Disaster
21,908,637
np.random.seed(100) LEVEL = 'level_4a' <compute_test_metric>
tmp_train = copy.deepcopy(train) tmp_train['AgeBin'] = 6 for i in range(6): tmp_train.loc[(tmp_train.Age >= 10*i)&(tmp_train.Age < 10*(i + 1)) , 'AgeBin'] = i tmp_train.head(3 )
Titanic - Machine Learning from Disaster
21,908,637
class SigmoidNeuron: def __init__(self): self.w = None self.b = None def perceptron(self, x): return np.dot(x, self.w.T)+ self.b def sigmoid(self, x): return 1.0/(1.0 + np.exp(-x)) def grad_w_mse(self, x, y): y_pred = self.sigmoid(self.perceptron(x)) return(y_pred - y)* y_pred *(1 - y_pred)* x def grad_b_mse(self, x, y): y_pred = self.sigmoid(self.perceptron(x)) return(y_pred - y)* y_pred *(1 - y_pred) def grad_w_ce(self, x, y): y_pred = self.sigmoid(self.perceptron(x)) if y == 0: return y_pred * x elif y == 1: return -1 *(1 - y_pred)* x else: raise ValueError("y should be 0 or 1") def grad_b_ce(self, x, y): y_pred = self.sigmoid(self.perceptron(x)) if y == 0: return y_pred elif y == 1: return -1 *(1 - y_pred) else: raise ValueError("y should be 0 or 1") def fit(self, X, Y, epochs=1, learning_rate=1, initialise=True, loss_fn="mse", display_loss=False): if initialise: self.w = np.random.randn(1, X.shape[1]) self.b = 0 if display_loss: loss = {} for i in tqdm_notebook(range(epochs), total=epochs, unit="epoch"): dw = 0 db = 0 for x, y in zip(X, Y): if loss_fn == "mse": dw += self.grad_w_mse(x, y) db += self.grad_b_mse(x, y) elif loss_fn == "ce": dw += self.grad_w_ce(x, y) db += self.grad_b_ce(x, y) self.w -= learning_rate * dw self.b -= learning_rate * db if display_loss: Y_pred = self.sigmoid(self.perceptron(X)) if loss_fn == "mse": loss[i] = mean_squared_error(Y, Y_pred) elif loss_fn == "ce": loss[i] = log_loss(Y, Y_pred) if display_loss: plt.plot(loss.values()) plt.xlabel('Epochs') if loss_fn == "mse": plt.ylabel('Mean Squared Error') elif loss_fn == "ce": plt.ylabel('Log Loss') plt.show() def predict(self, X): Y_pred = [] for x in X: y_pred = self.sigmoid(self.perceptron(x)) Y_pred.append(y_pred) return np.array(Y_pred )<load_pretrained>
cat_dist(tmp_train, var='AgeBin', hue='Survived', msg_show=False )
Titanic - Machine Learning from Disaster
21,908,637
languages = ['ta', 'hi', 'en'] images_train = read_all(".. /input/level_4a_train/"+LEVEL+"/"+"background", key_prefix='bgr_') for language in languages: images_train.update(read_all(".. /input/level_4a_train//"+LEVEL+"/"+language, key_prefix=language+"_")) print(len(images_train)) images_test = read_all(".. /input/level_4a_test/kaggle_"+LEVEL, key_prefix='') print(len(images_test))<define_variables>
tmp_train = copy.deepcopy(train) tmp_train['FareBin'] = pd.cut(tmp_train.Fare, 10) tmp_train['FareBin'] = LabelEncoder().fit_transform(tmp_train.FareBin) tmp_train.head(3 )
Titanic - Machine Learning from Disaster
21,908,637
X_train = [] Y_train = [] for key, value in images_train.items() : X_train.append(value) if key[:4] == "bgr_": Y_train.append(0) else: Y_train.append(1) ID_test = [] X_test = [] for key, value in images_test.items() : ID_test.append(int(key)) X_test.append(value) X_train = np.array(X_train) Y_train = np.array(Y_train) X_test = np.array(X_test) print(X_train.shape, Y_train.shape) print(X_test.shape )<normalization>
cat_dist(tmp_train, var='FareBin', hue='Survived', msg_show=False )
Titanic - Machine Learning from Disaster
21,908,637
scaler = MinMaxScaler() X_scaled_train = scaler.fit_transform(X_train) X_scaled_test = scaler.transform(X_test) <train_model>
cat_dist(train, var='Embarked', hue='Survived' )
Titanic - Machine Learning from Disaster
21,908,637
sn_ce = SigmoidNeuron() sn_ce.fit(X_scaled_train, Y_train, epochs=200, learning_rate=0.015, loss_fn="ce", display_loss=True )<predict_on_test>
tmp_all_data = copy.deepcopy(all_data) t0 = pd.DataFrame(tmp_all_data.Name) t1 = pd.DataFrame(tmp_all_data.Name.apply(lambda x: x.split(',')[1].split('.')[0].strip() ).value_counts()) multi_table([t0, t1] )
Titanic - Machine Learning from Disaster
21,908,637
def print_accuracy(sn): Y_pred_train = sn.predict(X_scaled_train) Y_pred_binarised_train =(Y_pred_train >= 0.5 ).astype("int" ).ravel() accuracy_train = accuracy_score(Y_pred_binarised_train, Y_train) print("Train Accuracy : ", accuracy_train) print("-"*50 )<compute_test_metric>
tmp_all_data['Title'] = tmp_all_data.Name.apply(lambda x: x.split(',')[1].split('.')[0].strip())
Titanic - Machine Learning from Disaster
21,908,637
print_accuracy(sn_ce )<prepare_output>
tmp_train.Cabin.value_counts()
Titanic - Machine Learning from Disaster
21,908,637
Y_pred_test = sn_ce.predict(X_scaled_test) Y_pred_binarised_test =(Y_pred_test >= 0.5 ).astype("int" ).ravel() submission = {} submission['ImageId'] = ID_test submission['Class'] = Y_pred_binarised_test submission = pd.DataFrame(submission) submission = submission[['ImageId', 'Class']] submission = submission.sort_values(['ImageId']) submission submission.to_csv("submisision_6.csv", index=False) <groupby>
tmp_train['CabinCnt'] = tmp_train.Cabin.apply(lambda x: 0 if pd.isna(x)else len(x.split(' '))) tmp_train['CabinClass'] = tmp_train.Cabin.apply(lambda x: str(x)[0] )
Titanic - Machine Learning from Disaster
21,908,637
submission.groupby('Class' ).count()<set_options>
t0 = pd.DataFrame(tmp_train.CabinCnt.value_counts()) t1 = pd.DataFrame(tmp_train.CabinClass.value_counts()) multi_table([t0, t1] )
Titanic - Machine Learning from Disaster
21,908,637
warnings.filterwarnings('ignore') %matplotlib inline <load_from_csv>
cat_dist(tmp_train, var='CabinCnt', hue='Survived', msg_show=False )
Titanic - Machine Learning from Disaster
21,908,637
sputnik_data = pd.read_csv(".. /input/sputnik/train.csv",sep=",") sputnik_data<prepare_x_and_y>
cat_dist(tmp_train, var='CabinClass', hue='Survived', msg_show=False )
Titanic - Machine Learning from Disaster
21,908,637
sputnik_train = sputnik_data.dropna(inplace=False) sputnik_train['error'] = np.linalg.norm(sputnik_train[['x', 'y', 'z']].values - sputnik_train[['x_sim', 'y_sim', 'z_sim']].values, axis=1) sputnik_train.drop('x',axis=1,inplace=True) sputnik_train.drop('y',axis=1,inplace=True) sputnik_train.drop('z',axis=1,inplace=True) sputnik_train.drop('x_sim',axis=1,inplace=True) sputnik_train.drop('y_sim',axis=1,inplace=True) sputnik_train.drop('z_sim',axis=1,inplace=True) sputnik_train['epoch'] = pd.to_datetime(sputnik_train.epoch,format='%Y-%m-%d %H:%M:%S') sputnik_train.index = sputnik_train.epoch sputnik_train.drop('epoch', axis = 1, inplace = True )<predict_on_test>
tmp_train['IsNumericTicket'] = tmp_train.Ticket.apply(lambda x: 1 if x.isnumeric() else 0) tmp_train['TicketType'] = tmp_train.Ticket.apply(lambda x: ''.join(x.split(' ')[:-1] ).replace('.','' ).replace('/','' ).lower() if len(x.split(' ')[:-1])> 0 else 0 )
Titanic - Machine Learning from Disaster