kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
9,725,565
learn = Learner(data, md_ef, metrics = [qk], model_dir="models" ).to_fp16() learn.data.add_test(ImageList.from_df(test_df, '.. /input/aptos2019-blindness-detection', folder='test_images', suffix='.png'))<load_pretrained>
svc = SVC() svc.fit(X_train, Y_train) Y_pred = svc.predict(X_test) acc_svc = round(svc.score(X_train, Y_train)* 100, 2) acc_svc
Titanic - Machine Learning from Disaster
9,725,565
learn.load('abcdef');<compute_test_metric>
knn = KNeighborsClassifier(n_neighbors = 3) knn.fit(X_train, Y_train) Y_pred = knn.predict(X_test) acc_knn = round(knn.score(X_train, Y_train)* 100, 2) acc_knn
Titanic - Machine Learning from Disaster
9,725,565
class OptimizedRounder(object): def __init__(self): self.coef_ = 0 def _kappa_loss(self, coef, X, y): X_p = np.copy(X) for i, pred in enumerate(X_p): if pred < coef[0]: X_p[i] = 0 elif pred >= coef[0] and pred < coef[1]: X_p[i] = 1 elif pred >= coef[1] and pred < coef[2]: X_p[i] = 2 elif pred >= coef[2] and pred < coef[3]: X_p[i] = 3 else: X_p[i] = 4 ll = metrics.cohen_kappa_score(y, X_p, weights='quadratic') return -ll def fit(self, X, y): loss_partial = partial(self._kappa_loss, X=X, y=y) initial_coef = [0.5, 1.5, 2.5, 3.5] self.coef_ = sp.optimize.minimize(loss_partial, initial_coef, method='nelder-mead') print(-loss_partial(self.coef_['x'])) def predict(self, X, coef): X_p = np.copy(X) for i, pred in enumerate(X_p): if pred < coef[0]: X_p[i] = 0 elif pred >= coef[0] and pred < coef[1]: X_p[i] = 1 elif pred >= coef[1] and pred < coef[2]: X_p[i] = 2 elif pred >= coef[2] and pred < coef[3]: X_p[i] = 3 else: X_p[i] = 4 return X_p def coefficients(self): return self.coef_['x']<save_to_csv>
gaussian = GaussianNB() gaussian.fit(X_train, Y_train) Y_pred = gaussian.predict(X_test) acc_gaussian = round(gaussian.score(X_train, Y_train)* 100, 2) acc_gaussian
Titanic - Machine Learning from Disaster
9,725,565
def run_subm(learn=learn, coefficients=[0.5, 1.5, 2.5, 3.5]): opt = OptimizedRounder() preds,y = learn.get_preds(DatasetType.Test) tst_pred = opt.predict(preds, coefficients) test_df.diagnosis = tst_pred.astype(int) test_df.to_csv('submission.csv',index=False) print('done' )<define_variables>
perceptron = Perceptron() perceptron.fit(X_train, Y_train) Y_pred = perceptron.predict(X_test) acc_perceptron = round(perceptron.score(X_train, Y_train)* 100, 2) acc_perceptron
Titanic - Machine Learning from Disaster
9,725,565
TTA = False<set_options>
linear_svc = LinearSVC() linear_svc.fit(X_train, Y_train) Y_pred = linear_svc.predict(X_test) acc_linear_svc = round(linear_svc.score(X_train, Y_train)* 100, 2) acc_linear_svc
Titanic - Machine Learning from Disaster
9,725,565
%reload_ext autoreload %autoreload 2 %matplotlib inline %matplotlib inline <set_options>
sgd = SGDClassifier() sgd.fit(X_train, Y_train) Y_pred = sgd.predict(X_test) acc_sgd = round(sgd.score(X_train, Y_train)* 100, 2) acc_sgd
Titanic - Machine Learning from Disaster
9,725,565
def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True seed_everything(42 )<load_from_csv>
decision_tree = DecisionTreeClassifier() decision_tree.fit(X_train, Y_train) Y_pred = decision_tree.predict(X_test) acc_decision_tree = round(decision_tree.score(X_train, Y_train)* 100, 2) acc_decision_tree
Titanic - Machine Learning from Disaster
9,725,565
def get_df() : base_image_dir = os.path.join('.. ', 'input/aptos2019-blindness-detection/') train_dir = os.path.join(base_image_dir,'train_images/') df = pd.read_csv(os.path.join(base_image_dir, 'train.csv')) df['path'] = df['id_code'].map(lambda x: os.path.join(train_dir,'{}.png'.format(x))) df = df.drop(columns=['id_code']) df = df.sample(frac=1 ).reset_index(drop=True) test_df = pd.read_csv('.. /input/aptos2019-blindness-detection/sample_submission.csv') return df, test_df df, test_df = get_df()<compute_test_metric>
random_forest = RandomForestClassifier(n_estimators=100) random_forest.fit(X_train, Y_train) Y_pred = random_forest.predict(X_test) random_forest.score(X_train, Y_train) acc_random_forest = round(random_forest.score(X_train, Y_train)* 100, 2) acc_random_forest
Titanic - Machine Learning from Disaster
9,725,565
def qk(y_pred, y): return torch.tensor(cohen_kappa_score(torch.round(y_pred), y, weights='quadratic'), device='cuda:0' )<compute_test_metric>
models = pd.DataFrame({ 'Model': ['Support Vector Machines', 'KNN', 'Logistic Regression', 'Random Forest', 'Naive Bayes', 'Perceptron', 'Stochastic Gradient Decent', 'Linear SVC', 'Decision Tree'], 'Score': [acc_svc, acc_knn, acc_log, acc_random_forest, acc_gaussian, acc_perceptron, acc_sgd, acc_linear_svc, acc_decision_tree]}) sorted_model=models.sort_values(by='Score', ascending=False) sorted_model
Titanic - Machine Learning from Disaster
9,725,565
<save_to_csv><EOS>
submission = pd.DataFrame({ "PassengerId": test_df["PassengerId"], "Survived": Y_pred }) submission.to_csv('submission2.csv', index=False )
Titanic - Machine Learning from Disaster
8,325,212
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<choose_model_class>
%matplotlib inline
Titanic - Machine Learning from Disaster
8,325,212
md_ef = EfficientNet.from_pretrained('efficientnet-b5', num_classes=1) learn = Learner(data, md_ef, metrics = [qk], callback_fns=[ BnFreeze, partial(SaveModelCallback, monitor='quad_kappa', name='bestmodel') ], model_dir="models", ) if TTA: learn = learn.to_fp32() else: learn = learn.to_fp16() learn.data.add_test(ImageList.from_df(test_df, '.. /input/aptos2019-blindness-detection', folder='test_images', suffix='.png'))<load_pretrained>
df = pd.read_csv(".. /input/titanic/train.csv") df.head()
Titanic - Machine Learning from Disaster
8,325,212
!mkdir models !cp '.. /input/kaggle-public/abcdef.pth' 'models' learn.load('abcdef');<load_pretrained>
df.drop(['PassengerId','Cabin','Name','Ticket'],axis=1,inplace=True) df.head()
Titanic - Machine Learning from Disaster
8,325,212
learn.load('bestmodel' )<train_on_grid>
df.isnull().sum()
Titanic - Machine Learning from Disaster
8,325,212
rounder = OptimizedRounder() rounder.fit(valid_preds[0], valid_preds[1]) rounder_coefficients = rounder.coefficients() print(rounder_coefficients )<count_values>
df = df[df["Embarked"].notna() ] df.isnull().sum()
Titanic - Machine Learning from Disaster
8,325,212
test_df_tta['diagnosis'].value_counts()<import_modules>
df["Age"] = df[["Age","Pclass"]].apply(find_age,axis=1) df.head()
Titanic - Machine Learning from Disaster
8,325,212
import os import sys import cv2 import time import scipy as sp import numpy as np import pandas as pd from tqdm import tqdm from PIL import Image from functools import partial import matplotlib.pyplot as plt import tensorflow as tf from tensorflow import set_random_seed import keras from keras import initializers from keras import regularizers from keras import constraints from keras import backend as K from keras.activations import elu from keras.optimizers import Adam from keras.models import Sequential from keras.engine import Layer, InputSpec from keras.utils.generic_utils import get_custom_objects from keras.callbacks import Callback, EarlyStopping, ReduceLROnPlateau from keras.layers import Dense, Conv2D, Flatten, GlobalAveragePooling2D, Dropout from keras.preprocessing.image import ImageDataGenerator from sklearn.metrics import cohen_kappa_score from keras.applications.resnet50 import ResNet50<define_variables>
df.isnull().sum()
Titanic - Machine Learning from Disaster
8,325,212
SEED = 7 np.random.seed(SEED) set_random_seed(SEED) INPUT_PATH = '.. /input/aptos2019-blindness-detection/' DIM = 224 BATCH_SIZE = 4 CHANNEL_SIZE = 3 NUM_EPOCHS = 30 LR = 1e-3 CLASS= {0: "No DR", 1: "Mild", 2: "Moderate", 3: "Severe", 4: "Proliferative DR"} NUM_CLASSES = len(CLASS.keys()) SAVED_MODEL_NAME = 'model.h5'<load_from_csv>
df["Embarked"].value_counts()
Titanic - Machine Learning from Disaster
8,325,212
train = pd.read_csv(INPUT_PATH + 'train.csv') test = pd.read_csv(INPUT_PATH + 'test.csv' )<feature_engineering>
le = LabelEncoder() le.fit(["S","C","Q"]) df["Embarked"] = le.fit_transform(df["Embarked"] )
Titanic - Machine Learning from Disaster
8,325,212
train['images'] = train['id_code'].apply(lambda x: INPUT_PATH + "train_images/" + str(x)+ ".png") test['images'] = test['id_code'].apply(lambda x: INPUT_PATH + "test_images/" + str(x)+ ".png") train.drop(['id_code'],axis = 1, inplace =True) train = train[['images','diagnosis']]<predict_on_test>
from sklearn.preprocessing import StandardScaler
Titanic - Machine Learning from Disaster
8,325,212
def get_preds_and_labels(model, generator): preds = [] labels = [] for _ in range(int(np.ceil(generator.samples / BATCH_SIZE))): x, y = next(generator) preds.append(model.predict(x)) labels.append(y) return np.concatenate(preds ).ravel() , np.concatenate(labels ).ravel()<train_model>
scaler = StandardScaler()
Titanic - Machine Learning from Disaster
8,325,212
class Metrics(Callback): def on_train_begin(self, logs={}): self.val_kappas = [] def on_epoch_end(self, epoch, logs={}): y_pred, labels = get_preds_and_labels(model, val_generator) y_pred = np.rint(y_pred ).astype(np.uint8 ).clip(0, 4) _val_kappa = cohen_kappa_score(labels, y_pred, weights='quadratic') self.val_kappas.append(_val_kappa) print(f"val_kappa: {round(_val_kappa, 4)}") if _val_kappa == max(self.val_kappas): print("Validation Kappa has improved.Saving model.") self.model.save(SAVED_MODEL_NAME) return<create_dataframe>
s = scaler.fit(df[["Fare"]] )
Titanic - Machine Learning from Disaster
8,325,212
train_datagen = ImageDataGenerator(rotation_range=360, horizontal_flip=True, vertical_flip=True, validation_split=0.15, preprocessing_function=preprocess_image, rescale=1 / 128.) train_generator = train_datagen.flow_from_dataframe(train, x_col='images', y_col='diagnosis', target_size=(DIM, DIM), batch_size=BATCH_SIZE, class_mode='other', subset='training') val_generator = train_datagen.flow_from_dataframe(train, x_col='images', y_col='diagnosis', target_size=(DIM, DIM), batch_size=BATCH_SIZE, class_mode='other', subset='validation' )<choose_model_class>
df["Fare"] = s.transform(df[["Fare"]]) df.head()
Titanic - Machine Learning from Disaster
8,325,212
class RAdam(keras.optimizers.Optimizer): def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0., weight_decay=0., amsgrad=False, total_steps=0, warmup_proportion=0.1, min_lr=0., **kwargs): super(RAdam, self ).__init__(**kwargs) with K.name_scope(self.__class__.__name__): self.iterations = K.variable(0, dtype='int64', name='iterations') self.lr = K.variable(lr, name='lr') self.beta_1 = K.variable(beta_1, name='beta_1') self.beta_2 = K.variable(beta_2, name='beta_2') self.decay = K.variable(decay, name='decay') self.weight_decay = K.variable(weight_decay, name='weight_decay') self.total_steps = K.variable(total_steps, name='total_steps') self.warmup_proportion = K.variable(warmup_proportion, name='warmup_proportion') self.min_lr = K.variable(lr, name='min_lr') if epsilon is None: epsilon = K.epsilon() self.epsilon = epsilon self.initial_decay = decay self.initial_weight_decay = weight_decay self.initial_total_steps = total_steps self.amsgrad = amsgrad def get_updates(self, loss, params): grads = self.get_gradients(loss, params) self.updates = [K.update_add(self.iterations, 1)] lr = self.lr if self.initial_decay > 0: lr = lr *(1./(1.+ self.decay * K.cast(self.iterations, K.dtype(self.decay)))) t = K.cast(self.iterations, K.floatx())+ 1 if self.initial_total_steps > 0: warmup_steps = self.total_steps * self.warmup_proportion decay_steps = self.total_steps - warmup_steps lr = K.switch( t <= warmup_steps, lr *(t / warmup_steps), lr *(1.0 - K.minimum(t, decay_steps)/ decay_steps), ) ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p), name='m_' + str(i)) for(i, p)in enumerate(params)] vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p), name='v_' + str(i)) for(i, p)in enumerate(params)] if self.amsgrad: vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p), name='vhat_' + str(i)) for(i, p)in enumerate(params)] else: vhats = [K.zeros(1, name='vhat_' + str(i)) for i in range(len(params)) ] self.weights = [self.iterations] + ms + vs + vhats beta_1_t = K.pow(self.beta_1, t) beta_2_t = K.pow(self.beta_2, t) sma_inf = 2.0 /(1.0 - self.beta_2)- 1.0 sma_t = sma_inf - 2.0 * t * beta_2_t /(1.0 - beta_2_t) for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats): m_t =(self.beta_1 * m)+(1.- self.beta_1)* g v_t =(self.beta_2 * v)+(1.- self.beta_2)* K.square(g) m_corr_t = m_t /(1.0 - beta_1_t) if self.amsgrad: vhat_t = K.maximum(vhat, v_t) v_corr_t = K.sqrt(vhat_t /(1.0 - beta_2_t)+ self.epsilon) self.updates.append(K.update(vhat, vhat_t)) else: v_corr_t = K.sqrt(v_t /(1.0 - beta_2_t)+ self.epsilon) r_t = K.sqrt(( sma_t - 4.0)/(sma_inf - 4.0)* (sma_t - 2.0)/(sma_inf - 2.0)* sma_inf / sma_t) p_t = K.switch(sma_t > 5, r_t * m_corr_t / v_corr_t, m_corr_t) if self.initial_weight_decay > 0: p_t += self.weight_decay * p p_t = p - lr * p_t self.updates.append(K.update(m, m_t)) self.updates.append(K.update(v, v_t)) new_p = p_t if getattr(p, 'constraint', None)is not None: new_p = p.constraint(new_p) self.updates.append(K.update(p, new_p)) return self.updates def get_config(self): config = { 'lr': float(K.get_value(self.lr)) , 'beta_1': float(K.get_value(self.beta_1)) , 'beta_2': float(K.get_value(self.beta_2)) , 'decay': float(K.get_value(self.decay)) , 'weight_decay': float(K.get_value(self.weight_decay)) , 'epsilon': self.epsilon, 'amsgrad': self.amsgrad, 'total_steps': float(K.get_value(self.total_steps)) , 'warmup_proportion': float(K.get_value(self.warmup_proportion)) , 'min_lr': float(K.get_value(self.min_lr)) , } base_config = super(RAdam, self ).get_config() return dict(list(base_config.items())+ list(config.items())) <choose_model_class>
df["Male"] = pd.get_dummies(df["Sex"],drop_first=True )
Titanic - Machine Learning from Disaster
8,325,212
class GroupNormalization(Layer): def __init__(self, groups=32, axis=-1, epsilon=1e-5, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None, **kwargs): super(GroupNormalization, self ).__init__(**kwargs) self.supports_masking = True self.groups = groups self.axis = axis self.epsilon = epsilon self.center = center self.scale = scale self.beta_initializer = initializers.get(beta_initializer) self.gamma_initializer = initializers.get(gamma_initializer) self.beta_regularizer = regularizers.get(beta_regularizer) self.gamma_regularizer = regularizers.get(gamma_regularizer) self.beta_constraint = constraints.get(beta_constraint) self.gamma_constraint = constraints.get(gamma_constraint) def build(self, input_shape): dim = input_shape[self.axis] if dim is None: raise ValueError('Axis ' + str(self.axis)+ ' of ' 'input tensor should have a defined dimension ' 'but the layer received an input with shape ' + str(input_shape)+ '.') if dim < self.groups: raise ValueError('Number of groups(' + str(self.groups)+ ')cannot be ' 'more than the number of channels(' + str(dim)+ ' ).') if dim % self.groups != 0: raise ValueError('Number of groups(' + str(self.groups)+ ')must be a ' 'multiple of the number of channels(' + str(dim)+ ' ).') self.input_spec = InputSpec(ndim=len(input_shape), axes={self.axis: dim}) shape =(dim,) if self.scale: self.gamma = self.add_weight(shape=shape, name='gamma', initializer=self.gamma_initializer, regularizer=self.gamma_regularizer, constraint=self.gamma_constraint) else: self.gamma = None if self.center: self.beta = self.add_weight(shape=shape, name='beta', initializer=self.beta_initializer, regularizer=self.beta_regularizer, constraint=self.beta_constraint) else: self.beta = None self.built = True def call(self, inputs, **kwargs): input_shape = K.int_shape(inputs) tensor_input_shape = K.shape(inputs) reduction_axes = list(range(len(input_shape))) del reduction_axes[self.axis] broadcast_shape = [1] * len(input_shape) broadcast_shape[self.axis] = input_shape[self.axis] // self.groups broadcast_shape.insert(1, self.groups) reshape_group_shape = K.shape(inputs) group_axes = [reshape_group_shape[i] for i in range(len(input_shape)) ] group_axes[self.axis] = input_shape[self.axis] // self.groups group_axes.insert(1, self.groups) group_shape = [group_axes[0], self.groups] + group_axes[2:] group_shape = K.stack(group_shape) inputs = K.reshape(inputs, group_shape) group_reduction_axes = list(range(len(group_axes))) group_reduction_axes = group_reduction_axes[2:] mean = K.mean(inputs, axis=group_reduction_axes, keepdims=True) variance = K.var(inputs, axis=group_reduction_axes, keepdims=True) inputs =(inputs - mean)/(K.sqrt(variance + self.epsilon)) inputs = K.reshape(inputs, group_shape) outputs = inputs if self.scale: broadcast_gamma = K.reshape(self.gamma, broadcast_shape) outputs = outputs * broadcast_gamma if self.center: broadcast_beta = K.reshape(self.beta, broadcast_shape) outputs = outputs + broadcast_beta outputs = K.reshape(outputs, tensor_input_shape) return outputs def get_config(self): config = { 'groups': self.groups, 'axis': self.axis, 'epsilon': self.epsilon, 'center': self.center, 'scale': self.scale, 'beta_initializer': initializers.serialize(self.beta_initializer), 'gamma_initializer': initializers.serialize(self.gamma_initializer), 'beta_regularizer': regularizers.serialize(self.beta_regularizer), 'gamma_regularizer': regularizers.serialize(self.gamma_regularizer), 'beta_constraint': constraints.serialize(self.beta_constraint), 'gamma_constraint': constraints.serialize(self.gamma_constraint) } base_config = super(GroupNormalization, self ).get_config() return dict(list(base_config.items())+ list(config.items())) def compute_output_shape(self, input_shape): return input_shape<load_pretrained>
df.drop("Sex",inplace=True,axis=1 )
Titanic - Machine Learning from Disaster
8,325,212
resnet = ResNet50(weights=None, include_top=False, input_shape=(DIM, DIM, CHANNEL_SIZE)) resnet.load_weights('.. /input/resnet50-weights-file/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5' )<choose_model_class>
x = df[['Male','Agegroup','SibSp','Pclass', 'Parch', 'Fare', 'Embarked']].values y = df["Survived"].values
Titanic - Machine Learning from Disaster
8,325,212
for i, layer in enumerate(resnet.layers): if "batch_normalization" in layer.name: effnet.layers[i] = GroupNormalization(groups=32, axis=-1, epsilon=0.00001 )<choose_model_class>
from sklearn.model_selection import train_test_split
Titanic - Machine Learning from Disaster
8,325,212
def build_model() : model = Sequential() model.add(resnet) model.add(GlobalAveragePooling2D()) model.add(Dropout(0.5)) model.add(Dense(5, activation=elu)) model.add(Dense(1, activation="linear")) model.compile(loss='mse', optimizer=RAdam(lr=0.00005), metrics=['mse', 'acc']) print(model.summary()) return model model = build_model()<train_on_grid>
from sklearn.model_selection import train_test_split
Titanic - Machine Learning from Disaster
8,325,212
kappa_metrics = Metrics() es = EarlyStopping(monitor='val_loss', mode='auto', verbose=1, patience=12) rlr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=4, verbose=1, mode='auto', epsilon=0.0001) with tf.device('/gpu:0'): history = model.fit_generator(train_generator, steps_per_epoch=train_generator.samples // BATCH_SIZE, epochs=35, validation_data=val_generator, validation_steps = val_generator.samples // BATCH_SIZE, callbacks=[kappa_metrics, es, rlr], verbose = 1 )<load_pretrained>
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.35 )
Titanic - Machine Learning from Disaster
8,325,212
model.load_weights(SAVED_MODEL_NAME )<compute_test_metric>
from sklearn.tree import DecisionTreeClassifier
Titanic - Machine Learning from Disaster
8,325,212
y_train_preds, train_labels = get_preds_and_labels(model, train_generator) y_train_preds = np.rint(y_train_preds ).astype(np.uint8 ).clip(0, 4) train_score = cohen_kappa_score(train_labels, y_train_preds, weights="quadratic") y_val_preds, val_labels = get_preds_and_labels(model, val_generator) y_val_preds = np.rint(y_val_preds ).astype(np.uint8 ).clip(0, 4) val_score = cohen_kappa_score(val_labels, y_val_preds, weights="quadratic" )<compute_test_metric>
tree = DecisionTreeClassifier(max_depth=4,random_state=10 )
Titanic - Machine Learning from Disaster
8,325,212
print(f"The Training Cohen Kappa Score is: {round(train_score, 5)}") print(f"The Validation Cohen Kappa Score is: {round(val_score, 5)}" )<compute_train_metric>
tree.fit(x_train,y_train )
Titanic - Machine Learning from Disaster
8,325,212
class OptimizedRounder(object): def __init__(self): self.coef_ = 0 def _kappa_loss(self, coef, X, y): X_p = np.copy(X) for i, pred in enumerate(X_p): if pred < coef[0]: X_p[i] = 0 elif pred >= coef[0] and pred < coef[1]: X_p[i] = 1 elif pred >= coef[1] and pred < coef[2]: X_p[i] = 2 elif pred >= coef[2] and pred < coef[3]: X_p[i] = 3 else: X_p[i] = 4 ll = cohen_kappa_score(y, X_p, weights='quadratic') return -ll def fit(self, X, y): loss_partial = partial(self._kappa_loss, X=X, y=y) initial_coef = [0.5, 1.5, 2.5, 3.5] self.coef_ = sp.optimize.minimize(loss_partial, initial_coef, method='nelder-mead') def predict(self, X, coef): X_p = np.copy(X) for i, pred in enumerate(X_p): if pred < coef[0]: X_p[i] = 0 elif pred >= coef[0] and pred < coef[1]: X_p[i] = 1 elif pred >= coef[1] and pred < coef[2]: X_p[i] = 2 elif pred >= coef[2] and pred < coef[3]: X_p[i] = 3 else: X_p[i] = 4 return X_p def coefficients(self): return self.coef_['x']<compute_train_metric>
predict = tree.predict(x_test )
Titanic - Machine Learning from Disaster
8,325,212
y_val_preds, val_labels = get_preds_and_labels(model, val_generator) optR = OptimizedRounder() optR.fit(y_val_preds, val_labels) coefficients = optR.coefficients() opt_val_predictions = optR.predict(y_val_preds, coefficients) new_val_score = cohen_kappa_score(val_labels, opt_val_predictions, weights="quadratic" )<compute_test_metric>
from sklearn.metrics import accuracy_score
Titanic - Machine Learning from Disaster
8,325,212
print(f"Optimized Thresholds: {coefficients} ") print(f"The Validation Quadratic Weighted Kappa(QWK) \ with optimized rounding thresholds is: {round(new_val_score, 5)} ") print(f"This is an improvement of {round(new_val_score - val_score, 5)} \ over the unoptimized rounding" )<feature_engineering>
accuracy_score(y_test,predict )
Titanic - Machine Learning from Disaster
8,325,212
test['diagnosis'] = np.zeros(test.shape[0]) test_generator = ImageDataGenerator(preprocessing_function=preprocess_image, rescale=1 / 128.).flow_from_dataframe(test, x_col='images', y_col='diagnosis', target_size=(DIM, DIM), batch_size=BATCH_SIZE, class_mode= 'other', shuffle=False )<save_to_csv>
test = pd.read_csv(".. /input/titanic/test.csv") test.head()
Titanic - Machine Learning from Disaster
8,325,212
y_test,_ = get_preds_and_labels(model, test_generator) y_test = optR.predict(y_test, coefficients ).astype(np.uint8) test['diagnosis'] = y_test test['id_code'] = test['id_code'] test.drop(['images'], axis = 1, inplace = True) test.to_csv('submission.csv', index=False )<import_modules>
test.drop(['Cabin','Name','Ticket'],axis=1,inplace=True )
Titanic - Machine Learning from Disaster
8,325,212
import cv2 import matplotlib.pyplot as plt from os.path import isfile import torch.nn.init as init import torch import torch.nn as nn from PIL import Image, ImageFilter from sklearn.model_selection import train_test_split, StratifiedKFold from torch.utils.data import Dataset from torchvision import transforms from torch.optim import Adam, SGD, RMSprop import time from torch.autograd import Variable import torch.functional as F from tqdm import tqdm from sklearn import metrics import urllib import pickle import cv2 import torch.nn.functional as F from torchvision import models import seaborn as sns import random import sys<set_options>
test.isnull().sum()
Titanic - Machine Learning from Disaster
8,325,212
package_path = '.. /input/efficientnet/efficientnet-pytorch/EfficientNet-PyTorch/' sys.path.append(package_path) def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True seed_everything(1234) TTA = 5 num_classes = 1 IMG_SIZE = 256 test = '.. /input/aptos2019-blindness-detection/test_images/' def expand_path(p): p = str(p) if isfile(test + p + ".png"): return test +(p + ".png") return p def p_show(imgs, label_name=None, per_row=3): n = len(imgs) rows =(n + per_row - 1)//per_row cols = min(per_row, n) fig, axes = plt.subplots(rows,cols, figsize=(15,15)) for ax in axes.flatten() : ax.axis('off') for i,(p, ax)in enumerate(zip(imgs, axes.flatten())) : img = Image.open(expand_path(p)) ax.imshow(img) ax.set_title(train_df[train_df.id_code == p].diagnosis.values) def crop_image1(img,tol=7): mask = img>tol return img[np.ix_(mask.any(1),mask.any(0)) ] def crop_image_from_gray(img,tol=7): if img.ndim ==2: mask = img>tol return img[np.ix_(mask.any(1),mask.any(0)) ] elif img.ndim==3: gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) mask = gray_img>tol check_shape = img[:,:,0][np.ix_(mask.any(1),mask.any(0)) ].shape[0] if(check_shape == 0): return img else: img1=img[:,:,0][np.ix_(mask.any(1),mask.any(0)) ] img2=img[:,:,1][np.ix_(mask.any(1),mask.any(0)) ] img3=img[:,:,2][np.ix_(mask.any(1),mask.any(0)) ] img = np.stack([img1,img2,img3],axis=-1) return img class MyDataset(Dataset): def __init__(self, dataframe, transform=None): self.df = dataframe self.transform = transform def __len__(self): return len(self.df) def __getitem__(self, idx): label = self.df.diagnosis.values[idx] label = np.expand_dims(label, -1) p = self.df.id_code.values[idx] p_path = expand_path(p) image = cv2.imread(p_path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image = crop_image_from_gray(image) image = cv2.resize(image,(IMG_SIZE, IMG_SIZE)) image = cv2.addWeighted(image,4, cv2.GaussianBlur(image ,(0,0), 30),-4 ,128) image = transforms.ToPILImage()(image) if self.transform: image = self.transform(image) return image, label test_transform = transforms.Compose([ transforms.RandomHorizontalFlip() , transforms.RandomRotation(( -120, 120)) , transforms.ToTensor() , transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) testset = MyDataset(pd.read_csv('.. /input/aptos2019-blindness-detection/sample_submission.csv'), transform=test_transform) test_loader = torch.utils.data.DataLoader(testset, batch_size=16, shuffle=False) model = EfficientNet.from_name('efficientnet-b0') in_features = model._fc.in_features model._fc = nn.Linear(in_features, num_classes) model.load_state_dict(torch.load('.. /input/enet-test/weight_best(3 ).pt')) model.cuda() for param in model.parameters() : param.requires_grad = False sample = pd.read_csv('.. /input/aptos2019-blindness-detection/sample_submission.csv') test_pred = np.zeros(( len(sample), 1)) model.eval() for _ in range(TTA): with torch.no_grad() : for i, data in tqdm(enumerate(test_loader)) : images, _ = data images = images.cuda() pred = model(images) test_pred[i * 16:(i + 1)* 16] += pred.detach().cpu().squeeze().numpy().reshape(-1, 1) output = test_pred / TTA preds1 = output.copy() coef = [0.57, 1.37, 2.57, 3.57] for i, pred in enumerate(output): if pred < coef[0]: output[i] = 0 elif pred >= coef[0] and pred < coef[1]: output[i] = 1 elif pred >= coef[1] and pred < coef[2]: output[i] = 2 elif pred >= coef[2] and pred < coef[3]: output[i] = 3 else: output[i] = 4 submission1 = pd.DataFrame({'id_code':pd.read_csv('.. /input/aptos2019-blindness-detection/sample_submission.csv' ).id_code.values, 'diagnosis':np.squeeze(output ).astype(int)} )<set_options>
test.isnull().sum()
Titanic - Machine Learning from Disaster
8,325,212
package_dir = ".. /input/pretrained-models/pretrained-models/pretrained-models.pytorch-master/" sys.path.insert(0, package_dir) device = torch.device("cuda:0") ImageFile.LOAD_TRUNCATED_IMAGES = True<load_from_csv>
test["Age"] = test[["Age","Pclass"]].apply(find_age,axis=1) df.head()
Titanic - Machine Learning from Disaster
8,325,212
class RetinopathyDatasetTest(Dataset): def __init__(self, csv_file, transform): self.data = pd.read_csv(csv_file) self.transform = transform def __len__(self): return len(self.data) def __getitem__(self, idx): img_name = os.path.join('.. /input/aptos2019-blindness-detection/test_images', self.data.loc[idx, 'id_code'] + '.png') image = Image.open(img_name) image = self.transform(image) return {'image': image} model = pretrainedmodels.__dict__['resnet101'](pretrained=None) model.avg_pool = nn.AdaptiveAvgPool2d(1) model.last_linear = nn.Sequential( nn.BatchNorm1d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True), nn.Dropout(p=0.25), nn.Linear(in_features=2048, out_features=2048, bias=True), nn.ReLU() , nn.BatchNorm1d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True), nn.Dropout(p=0.5), nn.Linear(in_features=2048, out_features=1, bias=True), ) model.load_state_dict(torch.load(".. /input/mmmodel/model.bin")) model = model.to(device) for param in model.parameters() : param.requires_grad = False model.eval() test_transform = transforms.Compose([ transforms.Resize(( 224, 224)) , transforms.RandomHorizontalFlip() , transforms.ToTensor() , transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) test_dataset = RetinopathyDatasetTest(csv_file='.. /input/aptos2019-blindness-detection/sample_submission.csv', transform=test_transform) test_data_loader = torch.utils.data.DataLoader(test_dataset, batch_size=32, shuffle=False, num_workers=4) test_preds1 = np.zeros(( len(test_dataset), 1)) tk0 = tqdm(test_data_loader) for i, x_batch in enumerate(tk0): x_batch = x_batch["image"] pred = model(x_batch.to(device)) test_preds1[i * 32:(i + 1)* 32] = pred.detach().cpu().squeeze().numpy().ravel().reshape(-1, 1) test_data_loader = torch.utils.data.DataLoader(test_dataset, batch_size=32, shuffle=False, num_workers=4) test_preds2 = np.zeros(( len(test_dataset), 1)) tk0 = tqdm(test_data_loader) for i, x_batch in enumerate(tk0): x_batch = x_batch["image"] pred = model(x_batch.to(device)) test_preds2[i * 32:(i + 1)* 32] = pred.detach().cpu().squeeze().numpy().ravel().reshape(-1, 1) test_data_loader = torch.utils.data.DataLoader(test_dataset, batch_size=32, shuffle=False, num_workers=4) test_preds3 = np.zeros(( len(test_dataset), 1)) tk0 = tqdm(test_data_loader) for i, x_batch in enumerate(tk0): x_batch = x_batch["image"] pred = model(x_batch.to(device)) test_preds3[i * 32:(i + 1)* 32] = pred.detach().cpu().squeeze().numpy().ravel().reshape(-1, 1) test_data_loader = torch.utils.data.DataLoader(test_dataset, batch_size=32, shuffle=False, num_workers=4) test_preds4 = np.zeros(( len(test_dataset), 1)) tk0 = tqdm(test_data_loader) for i, x_batch in enumerate(tk0): x_batch = x_batch["image"] pred = model(x_batch.to(device)) test_preds4[i * 32:(i + 1)* 32] = pred.detach().cpu().squeeze().numpy().ravel().reshape(-1, 1) test_data_loader = torch.utils.data.DataLoader(test_dataset, batch_size=32, shuffle=False, num_workers=4) test_preds5 = np.zeros(( len(test_dataset), 1)) tk0 = tqdm(test_data_loader) for i, x_batch in enumerate(tk0): x_batch = x_batch["image"] pred = model(x_batch.to(device)) test_preds5[i * 32:(i + 1)* 32] = pred.detach().cpu().squeeze().numpy().ravel().reshape(-1, 1) test_data_loader = torch.utils.data.DataLoader(test_dataset, batch_size=32, shuffle=False, num_workers=4) test_preds6 = np.zeros(( len(test_dataset), 1)) tk0 = tqdm(test_data_loader) for i, x_batch in enumerate(tk0): x_batch = x_batch["image"] pred = model(x_batch.to(device)) test_preds6[i * 32:(i + 1)* 32] = pred.detach().cpu().squeeze().numpy().ravel().reshape(-1, 1) test_data_loader = torch.utils.data.DataLoader(test_dataset, batch_size=32, shuffle=False, num_workers=4) test_preds7 = np.zeros(( len(test_dataset), 1)) tk0 = tqdm(test_data_loader) for i, x_batch in enumerate(tk0): x_batch = x_batch["image"] pred = model(x_batch.to(device)) test_preds7[i * 32:(i + 1)* 32] = pred.detach().cpu().squeeze().numpy().ravel().reshape(-1, 1) test_data_loader = torch.utils.data.DataLoader(test_dataset, batch_size=32, shuffle=False, num_workers=4) test_preds8 = np.zeros(( len(test_dataset), 1)) tk0 = tqdm(test_data_loader) for i, x_batch in enumerate(tk0): x_batch = x_batch["image"] pred = model(x_batch.to(device)) test_preds8[i * 32:(i + 1)* 32] = pred.detach().cpu().squeeze().numpy().ravel().reshape(-1, 1) test_data_loader = torch.utils.data.DataLoader(test_dataset, batch_size=32, shuffle=False, num_workers=4) test_preds9 = np.zeros(( len(test_dataset), 1)) tk0 = tqdm(test_data_loader) for i, x_batch in enumerate(tk0): x_batch = x_batch["image"] pred = model(x_batch.to(device)) test_preds9[i * 32:(i + 1)* 32] = pred.detach().cpu().squeeze().numpy().ravel().reshape(-1, 1) test_data_loader = torch.utils.data.DataLoader(test_dataset, batch_size=32, shuffle=False, num_workers=4) test_preds10 = np.zeros(( len(test_dataset), 1)) tk0 = tqdm(test_data_loader) for i, x_batch in enumerate(tk0): x_batch = x_batch["image"] pred = model(x_batch.to(device)) test_preds10[i * 32:(i + 1)* 32] = pred.detach().cpu().squeeze().numpy().ravel().reshape(-1, 1) test_preds =(test_preds1 + test_preds2 + test_preds3 + test_preds4 + test_preds5 + test_preds6 + test_preds7 + test_preds8 + test_preds9 + test_preds10)/ 10.0 coef = [0.5, 1.5, 2.5, 3.5] preds2 = test_preds.copy() for i, pred in enumerate(test_preds): if pred < coef[0]: test_preds[i] = 0 elif pred >= coef[0] and pred < coef[1]: test_preds[i] = 1 elif pred >= coef[1] and pred < coef[2]: test_preds[i] = 2 elif pred >= coef[2] and pred < coef[3]: test_preds[i] = 3 else: test_preds[i] = 4 submission2 = pd.read_csv(".. /input/aptos2019-blindness-detection/sample_submission.csv") submission2.diagnosis = test_preds.astype(int )<import_modules>
test = test.fillna(df.mean()) test.isnull().sum()
Titanic - Machine Learning from Disaster
8,325,212
BatchNormalization, Input, Conv2D, GlobalAveragePooling2D,concatenate,Concatenate) WORKERS = 2 CHANNEL = 3 warnings.filterwarnings("ignore") SIZE = 300 NUM_CLASSES = 5<load_from_csv>
le = LabelEncoder() le.fit(["S","C","Q"]) test["Embarked"] = le.fit_transform(test["Embarked"] )
Titanic - Machine Learning from Disaster
8,325,212
df_train = pd.read_csv('.. /input/aptos2019-blindness-detection/train.csv') df_test = pd.read_csv('.. /input/aptos2019-blindness-detection/test.csv') x = df_train['id_code'] y = df_train['diagnosis'] x, y = shuffle(x, y, random_state=8) y = to_categorical(y, num_classes=NUM_CLASSES) train_x, valid_x, train_y, valid_y = train_test_split(x, y, test_size=0.15, stratify=y, random_state=8) sometimes = lambda aug: iaa.Sometimes(0.5, aug) seq = iaa.Sequential( [ iaa.Fliplr(0.5), iaa.Flipud(0.2), sometimes(iaa.Affine( scale={"x":(0.9, 1.1), "y":(0.9, 1.1)}, translate_percent={"x":(-0.1, 0.1), "y":(-0.1, 0.1)}, rotate=(-10, 10), shear=(-5, 5), order=[0, 1], cval=(0, 255), mode=ia.ALL )) , iaa.SomeOf(( 0, 5), [ sometimes(iaa.Superpixels(p_replace=(0, 1.0), n_segments=(20, 200))), iaa.OneOf([ iaa.GaussianBlur(( 0, 1.0)) , iaa.AverageBlur(k=(3, 5)) , iaa.MedianBlur(k=(3, 5)) , ]), iaa.Sharpen(alpha=(0, 1.0), lightness=(0.9, 1.1)) , iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)) , iaa.SimplexNoiseAlpha(iaa.OneOf([ iaa.EdgeDetect(alpha=(0.5, 1.0)) , iaa.DirectedEdgeDetect(alpha=(0.5, 1.0), direction=(0.0, 1.0)) , ])) , iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.01*255), per_channel=0.5), iaa.OneOf([ iaa.Dropout(( 0.01, 0.05), per_channel=0.5), iaa.CoarseDropout(( 0.01, 0.03), size_percent=(0.01, 0.02), per_channel=0.2), ]), iaa.Invert(0.01, per_channel=True), iaa.Add(( -2, 2), per_channel=0.5), iaa.AddToHueAndSaturation(( -1, 1)) , iaa.OneOf([ iaa.Multiply(( 0.9, 1.1), per_channel=0.5), iaa.FrequencyNoiseAlpha( exponent=(-1, 0), first=iaa.Multiply(( 0.9, 1.1), per_channel=True), second=iaa.ContrastNormalization(( 0.9, 1.1)) ) ]), sometimes(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)) , sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.05))), sometimes(iaa.PerspectiveTransform(scale=(0.01, 0.1))) ], random_order=True ) ], random_order=True) class My_Generator(Sequence): def __init__(self, image_filenames, labels, batch_size, is_train=True, mix=False, augment=False): self.image_filenames, self.labels = image_filenames, labels self.batch_size = batch_size self.is_train = is_train self.is_augment = augment if(self.is_train): self.on_epoch_end() self.is_mix = mix def __len__(self): return int(np.ceil(len(self.image_filenames)/ float(self.batch_size))) def __getitem__(self, idx): batch_x = self.image_filenames[idx * self.batch_size:(idx + 1)* self.batch_size] batch_y = self.labels[idx * self.batch_size:(idx + 1)* self.batch_size] if(self.is_train): return self.train_generate(batch_x, batch_y) return self.valid_generate(batch_x, batch_y) def on_epoch_end(self): if(self.is_train): self.image_filenames, self.labels = shuffle(self.image_filenames, self.labels) else: pass def mix_up(self, x, y): lam = np.random.beta(0.2, 0.4) ori_index = np.arange(int(len(x))) index_array = np.arange(int(len(x))) np.random.shuffle(index_array) mixed_x = lam * x[ori_index] +(1 - lam)* x[index_array] mixed_y = lam * y[ori_index] +(1 - lam)* y[index_array] return mixed_x, mixed_y def train_generate(self, batch_x, batch_y): batch_images = [] for(sample, label)in zip(batch_x, batch_y): img = cv2.imread('.. /input/aptos2019-blindness-detection/train_images/'+sample+'.png') img = cv2.resize(img,(SIZE, SIZE)) if(self.is_augment): img = seq.augment_image(img) batch_images.append(img) batch_images = np.array(batch_images, np.float32)/ 255 batch_y = np.array(batch_y, np.float32) if(self.is_mix): batch_images, batch_y = self.mix_up(batch_images, batch_y) return batch_images, batch_y def valid_generate(self, batch_x, batch_y): batch_images = [] for(sample, label)in zip(batch_x, batch_y): img = cv2.imread('.. /input/aptos2019-blindness-detection/train_images/'+sample+'.png') img = cv2.resize(img,(SIZE, SIZE)) batch_images.append(img) batch_images = np.array(batch_images, np.float32)/ 255 batch_y = np.array(batch_y, np.float32) return batch_images, batch_y def create_model(input_shape, n_out): input_tensor = Input(shape=input_shape) base_model = DenseNet121(include_top=False, weights=None, input_tensor=input_tensor) base_model.load_weights(".. /input/densenet-keras/DenseNet-BC-121-32-no-top.h5") x = GlobalAveragePooling2D()(base_model.output) x = Dropout(0.5 )(x) x = Dense(1024, activation='relu' )(x) x = Dropout(0.5 )(x) final_output = Dense(n_out, activation='softmax', name='final_output' )(x) model = Model(input_tensor, final_output) return model EarlyStopping, ReduceLROnPlateau,CSVLogger) epochs = 30; batch_size = 32 checkpoint = ModelCheckpoint('.. /working/densenet_.h5', monitor='val_loss', verbose=1, save_best_only=True, mode='min', save_weights_only = True) reduceLROnPlat = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=4, verbose=1, mode='auto', epsilon=0.0001) early = EarlyStopping(monitor="val_loss", mode="min", patience=9) csv_logger = CSVLogger(filename='.. /working/training_log.csv', separator=',', append=True) train_generator = My_Generator(train_x, train_y, 128, is_train=True) train_mixup = My_Generator(train_x, train_y, batch_size, is_train=True, mix=False, augment=True) valid_generator = My_Generator(valid_x, valid_y, batch_size, is_train=False) model = create_model( input_shape=(SIZE,SIZE,3), n_out=NUM_CLASSES) def kappa_loss(y_true, y_pred, y_pow=2, eps=1e-12, N=5, bsize=32, name='kappa'): with tf.name_scope(name): y_true = tf.to_float(y_true) repeat_op = tf.to_float(tf.tile(tf.reshape(tf.range(0, N), [N, 1]), [1, N])) repeat_op_sq = tf.square(( repeat_op - tf.transpose(repeat_op))) weights = repeat_op_sq / tf.to_float(( N - 1)** 2) pred_ = y_pred ** y_pow try: pred_norm = pred_ /(eps + tf.reshape(tf.reduce_sum(pred_, 1), [-1, 1])) except Exception: pred_norm = pred_ /(eps + tf.reshape(tf.reduce_sum(pred_, 1), [bsize, 1])) hist_rater_a = tf.reduce_sum(pred_norm, 0) hist_rater_b = tf.reduce_sum(y_true, 0) conf_mat = tf.matmul(tf.transpose(pred_norm), y_true) nom = tf.reduce_sum(weights * conf_mat) denom = tf.reduce_sum(weights * tf.matmul( tf.reshape(hist_rater_a, [N, 1]), tf.reshape(hist_rater_b, [1, N])) / tf.to_float(bsize)) return nom*0.5 /(denom + eps)+ categorical_crossentropy(y_true, y_pred)*0.5 class QWKEvaluation(Callback): def __init__(self, validation_data=() , batch_size=64, interval=1): super(Callback, self ).__init__() self.interval = interval self.batch_size = batch_size self.valid_generator, self.y_val = validation_data self.history = [] def on_epoch_end(self, epoch, logs={}): if epoch % self.interval == 0: y_pred = self.model.predict_generator(generator=self.valid_generator, steps=np.ceil(float(len(self.y_val)) / float(self.batch_size)) , workers=1, use_multiprocessing=False, verbose=1) def flatten(y): return np.argmax(y, axis=1 ).reshape(-1) score = cohen_kappa_score(flatten(self.y_val), flatten(y_pred), labels=[0,1,2,3,4], weights='quadratic') print(" epoch: %d - QWK_score: %.6f " %(epoch+1, score)) self.history.append(score) if score >= max(self.history): print('saving checkpoint: ', score) self.model.save('.. /working/densenet_bestqwk.h5') qwk = QWKEvaluation(validation_data=(valid_generator, valid_y), batch_size=batch_size, interval=1) for layer in model.layers: layer.trainable = False for i in range(-3,0): model.layers[i].trainable = True model.compile( loss='categorical_crossentropy', optimizer=Adam(1e-3)) for layer in model.layers: layer.trainable = True callbacks_list = [checkpoint, csv_logger, reduceLROnPlat, early, qwk] model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=1e-4)) submission3 = pd.read_csv('.. /input/aptos2019-blindness-detection/sample_submission.csv') model.load_weights('.. /input/keras-base/m_aptos_vote/densenet_bestqwk.h5') predicted = [] for i, name in tqdm(enumerate(submission3['id_code'])) : path = os.path.join('.. /input/aptos2019-blindness-detection/test_images/', name+'.png') image = cv2.imread(path) image = cv2.resize(image,(SIZE, SIZE)) X = np.array(( image[np.newaxis])/255) score_predict=(( model.predict(X ).ravel() *model.predict(X[:, ::-1, :, :] ).ravel() *model.predict(X[:, ::-1, ::-1, :] ).ravel() *model.predict(X[:, :, ::-1, :] ).ravel())**0.25 ).tolist() label_predict = np.argmax(score_predict) predicted.append(label_predict) submission3['diagnosis'] = predicted<import_modules>
le.fit(["male","female"]) test["Sex"] = le.fit_transform(test["Sex"]) test["Male"] = pd.get_dummies(test["Sex"],drop_first=True) test.head()
Titanic - Machine Learning from Disaster
8,325,212
import torch import numpy as np import pandas as pd import matplotlib.pyplot as plt from matplotlib import style import seaborn as sns from sklearn.model_selection import StratifiedKFold from joblib import load, dump from sklearn.metrics import cohen_kappa_score from sklearn.metrics import confusion_matrix from fastai import * from fastai.vision import * from fastai.callbacks import * from torchvision import models as md from torch import nn from torch.nn import functional as F import re import math import collections from functools import partial from torch.utils import model_zoo from sklearn import metrics from collections import Counter import json<categorify>
scaler = StandardScaler()
Titanic - Machine Learning from Disaster
8,325,212
GlobalParams = collections.namedtuple('GlobalParams', [ 'batch_norm_momentum', 'batch_norm_epsilon', 'dropout_rate', 'num_classes', 'width_coefficient', 'depth_coefficient', 'depth_divisor', 'min_depth', 'drop_connect_rate', 'image_size']) BlockArgs = collections.namedtuple('BlockArgs', [ 'kernel_size', 'num_repeat', 'input_filters', 'output_filters', 'expand_ratio', 'id_skip', 'stride', 'se_ratio']) GlobalParams.__new__.__defaults__ =(None,)* len(GlobalParams._fields) BlockArgs.__new__.__defaults__ =(None,)* len(BlockArgs._fields) def relu_fn(x): return x * torch.sigmoid(x) def round_filters(filters, global_params): multiplier = global_params.width_coefficient if not multiplier: return filters divisor = global_params.depth_divisor min_depth = global_params.min_depth filters *= multiplier min_depth = min_depth or divisor new_filters = max(min_depth, int(filters + divisor / 2)// divisor * divisor) if new_filters < 0.9 * filters: new_filters += divisor return int(new_filters) def round_repeats(repeats, global_params): multiplier = global_params.depth_coefficient if not multiplier: return repeats return int(math.ceil(multiplier * repeats)) def drop_connect(inputs, p, training): if not training: return inputs batch_size = inputs.shape[0] keep_prob = 1 - p random_tensor = keep_prob random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device) binary_tensor = torch.floor(random_tensor) output = inputs / keep_prob * binary_tensor return output def get_same_padding_conv2d(image_size=None): if image_size is None: return Conv2dDynamicSamePadding else: return partial(Conv2dStaticSamePadding, image_size=image_size) class Conv2dDynamicSamePadding(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True): super().__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias) self.stride = self.stride if len(self.stride)== 2 else [self.stride[0]]*2 def forward(self, x): ih, iw = x.size() [-2:] kh, kw = self.weight.size() [-2:] sh, sw = self.stride oh, ow = math.ceil(ih / sh), math.ceil(iw / sw) pad_h = max(( oh - 1)* self.stride[0] +(kh - 1)* self.dilation[0] + 1 - ih, 0) pad_w = max(( ow - 1)* self.stride[1] +(kw - 1)* self.dilation[1] + 1 - iw, 0) if pad_h > 0 or pad_w > 0: x = F.pad(x, [pad_w//2, pad_w - pad_w//2, pad_h//2, pad_h - pad_h//2]) return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) class Conv2dStaticSamePadding(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, image_size=None, **kwargs): super().__init__(in_channels, out_channels, kernel_size, **kwargs) self.stride = self.stride if len(self.stride)== 2 else [self.stride[0]] * 2 assert image_size is not None ih, iw = image_size if type(image_size)== list else [image_size, image_size] kh, kw = self.weight.size() [-2:] sh, sw = self.stride oh, ow = math.ceil(ih / sh), math.ceil(iw / sw) pad_h = max(( oh - 1)* self.stride[0] +(kh - 1)* self.dilation[0] + 1 - ih, 0) pad_w = max(( ow - 1)* self.stride[1] +(kw - 1)* self.dilation[1] + 1 - iw, 0) if pad_h > 0 or pad_w > 0: self.static_padding = nn.ZeroPad2d(( pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2)) else: self.static_padding = Identity() def forward(self, x): x = self.static_padding(x) x = F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) return x class Identity(nn.Module): def __init__(self,): super(Identity, self ).__init__() def forward(self, input): return input def efficientnet_params(model_name): params_dict = { 'efficientnet-b0':(1.0, 1.0, 224, 0.2), 'efficientnet-b1':(1.0, 1.1, 240, 0.2), 'efficientnet-b2':(1.1, 1.2, 260, 0.3), 'efficientnet-b3':(1.2, 1.4, 300, 0.3), 'efficientnet-b4':(1.4, 1.8, 380, 0.4), 'efficientnet-b5':(1.6, 2.2, 456, 0.4), 'efficientnet-b6':(1.8, 2.6, 528, 0.5), 'efficientnet-b7':(2.0, 3.1, 600, 0.5), } return params_dict[model_name] class BlockDecoder(object): @staticmethod def _decode_block_string(block_string): assert isinstance(block_string, str) ops = block_string.split('_') options = {} for op in ops: splits = re.split(r'(\d.*)', op) if len(splits)>= 2: key, value = splits[:2] options[key] = value assert(( 's' in options and len(options['s'])== 1)or (len(options['s'])== 2 and options['s'][0] == options['s'][1])) return BlockArgs( kernel_size=int(options['k']), num_repeat=int(options['r']), input_filters=int(options['i']), output_filters=int(options['o']), expand_ratio=int(options['e']), id_skip=('noskip' not in block_string), se_ratio=float(options['se'])if 'se' in options else None, stride=[int(options['s'][0])]) @staticmethod def _encode_block_string(block): args = [ 'r%d' % block.num_repeat, 'k%d' % block.kernel_size, 's%d%d' %(block.strides[0], block.strides[1]), 'e%s' % block.expand_ratio, 'i%d' % block.input_filters, 'o%d' % block.output_filters ] if 0 < block.se_ratio <= 1: args.append('se%s' % block.se_ratio) if block.id_skip is False: args.append('noskip') return '_'.join(args) @staticmethod def decode(string_list): assert isinstance(string_list, list) blocks_args = [] for block_string in string_list: blocks_args.append(BlockDecoder._decode_block_string(block_string)) return blocks_args @staticmethod def encode(blocks_args): block_strings = [] for block in blocks_args: block_strings.append(BlockDecoder._encode_block_string(block)) return block_strings def efficientnet(width_coefficient=None, depth_coefficient=None, dropout_rate=0.2, drop_connect_rate=0.2, image_size=None, num_classes=1000): blocks_args = [ 'r1_k3_s11_e1_i32_o16_se0.25', 'r2_k3_s22_e6_i16_o24_se0.25', 'r2_k5_s22_e6_i24_o40_se0.25', 'r3_k3_s22_e6_i40_o80_se0.25', 'r3_k5_s11_e6_i80_o112_se0.25', 'r4_k5_s22_e6_i112_o192_se0.25', 'r1_k3_s11_e6_i192_o320_se0.25', ] blocks_args = BlockDecoder.decode(blocks_args) global_params = GlobalParams( batch_norm_momentum=0.99, batch_norm_epsilon=1e-3, dropout_rate=dropout_rate, drop_connect_rate=drop_connect_rate, num_classes=num_classes, width_coefficient=width_coefficient, depth_coefficient=depth_coefficient, depth_divisor=8, min_depth=None, image_size=image_size, ) return blocks_args, global_params def get_model_params(model_name, override_params): if model_name.startswith('efficientnet'): w, d, s, p = efficientnet_params(model_name) blocks_args, global_params = efficientnet( width_coefficient=w, depth_coefficient=d, dropout_rate=p, image_size=s) else: raise NotImplementedError('model name is not pre-defined: %s' % model_name) if override_params: global_params = global_params._replace(**override_params) return blocks_args, global_params url_map = { 'efficientnet-b0': 'http://storage.googleapis.com/public-models/efficientnet-b0-08094119.pth', 'efficientnet-b1': 'http://storage.googleapis.com/public-models/efficientnet-b1-dbc7070a.pth', 'efficientnet-b2': 'http://storage.googleapis.com/public-models/efficientnet-b2-27687264.pth', 'efficientnet-b3': 'http://storage.googleapis.com/public-models/efficientnet-b3-c8376fa2.pth', 'efficientnet-b4': 'http://storage.googleapis.com/public-models/efficientnet-b4-e116e8b3.pth', 'efficientnet-b5': 'http://storage.googleapis.com/public-models/efficientnet-b5-586e6cc6.pth', } def load_pretrained_weights(model, model_name, load_fc=True): state_dict = model_zoo.load_url(url_map[model_name]) if load_fc: model.load_state_dict(state_dict) else: state_dict.pop('_fc.weight') state_dict.pop('_fc.bias') res = model.load_state_dict(state_dict, strict=False) assert str(res.missing_keys)== str(['_fc.weight', '_fc.bias']), 'issue loading pretrained weights' print('Loaded pretrained weights for {}'.format(model_name)) class MBConvBlock(nn.Module): def __init__(self, block_args, global_params): super().__init__() self._block_args = block_args self._bn_mom = 1 - global_params.batch_norm_momentum self._bn_eps = global_params.batch_norm_epsilon self.has_se =(self._block_args.se_ratio is not None)and(0 < self._block_args.se_ratio <= 1) self.id_skip = block_args.id_skip Conv2d = get_same_padding_conv2d(image_size=global_params.image_size) inp = self._block_args.input_filters oup = self._block_args.input_filters * self._block_args.expand_ratio if self._block_args.expand_ratio != 1: self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False) self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps) k = self._block_args.kernel_size s = self._block_args.stride self._depthwise_conv = Conv2d( in_channels=oup, out_channels=oup, groups=oup, kernel_size=k, stride=s, bias=False) self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps) if self.has_se: num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio)) self._se_reduce = Conv2d(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1) self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1) final_oup = self._block_args.output_filters self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False) self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps) def forward(self, inputs, drop_connect_rate=None): x = inputs if self._block_args.expand_ratio != 1: x = relu_fn(self._bn0(self._expand_conv(inputs))) x = relu_fn(self._bn1(self._depthwise_conv(x))) if self.has_se: x_squeezed = F.adaptive_avg_pool2d(x, 1) x_squeezed = self._se_expand(relu_fn(self._se_reduce(x_squeezed))) x = torch.sigmoid(x_squeezed)* x x = self._bn2(self._project_conv(x)) input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters if self.id_skip and self._block_args.stride == 1 and input_filters == output_filters: if drop_connect_rate: x = drop_connect(x, p=drop_connect_rate, training=self.training) x = x + inputs return x class EfficientNet(nn.Module): def __init__(self, blocks_args=None, global_params=None): super().__init__() assert isinstance(blocks_args, list), 'blocks_args should be a list' assert len(blocks_args)> 0, 'block args must be greater than 0' self._global_params = global_params self._blocks_args = blocks_args Conv2d = get_same_padding_conv2d(image_size=global_params.image_size) bn_mom = 1 - self._global_params.batch_norm_momentum bn_eps = self._global_params.batch_norm_epsilon in_channels = 3 out_channels = round_filters(32, self._global_params) self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False) self._bn0 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps) self._blocks = nn.ModuleList([]) for block_args in self._blocks_args: block_args = block_args._replace( input_filters=round_filters(block_args.input_filters, self._global_params), output_filters=round_filters(block_args.output_filters, self._global_params), num_repeat=round_repeats(block_args.num_repeat, self._global_params) ) self._blocks.append(MBConvBlock(block_args, self._global_params)) if block_args.num_repeat > 1: block_args = block_args._replace(input_filters=block_args.output_filters, stride=1) for _ in range(block_args.num_repeat - 1): self._blocks.append(MBConvBlock(block_args, self._global_params)) in_channels = block_args.output_filters out_channels = round_filters(1280, self._global_params) self._conv_head = Conv2d(in_channels, out_channels, kernel_size=1, bias=False) self._bn1 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps) self._dropout = self._global_params.dropout_rate self._fc = nn.Linear(out_channels, self._global_params.num_classes) def extract_features(self, inputs): x = relu_fn(self._bn0(self._conv_stem(inputs))) for idx, block in enumerate(self._blocks): drop_connect_rate = self._global_params.drop_connect_rate if drop_connect_rate: drop_connect_rate *= float(idx)/ len(self._blocks) x = block(x, drop_connect_rate=drop_connect_rate) x = relu_fn(self._bn1(self._conv_head(x))) return x def forward(self, inputs): x = self.extract_features(inputs) x = F.adaptive_avg_pool2d(x, 1 ).squeeze(-1 ).squeeze(-1) if self._dropout: x = F.dropout(x, p=self._dropout, training=self.training) x = self._fc(x) return x @classmethod def from_name(cls, model_name, override_params=None): cls._check_model_name_is_valid(model_name) blocks_args, global_params = get_model_params(model_name, override_params) return EfficientNet(blocks_args, global_params) @classmethod def from_pretrained(cls, model_name, num_classes=1000): model = EfficientNet.from_name(model_name, override_params={'num_classes': num_classes}) return model @classmethod def get_image_size(cls, model_name): cls._check_model_name_is_valid(model_name) _, _, res, _ = efficientnet_params(model_name) return res @classmethod def _check_model_name_is_valid(cls, model_name, also_need_pretrained_weights=False): num_models = 4 if also_need_pretrained_weights else 8 valid_models = ['efficientnet_b'+str(i)for i in range(num_models)] if model_name.replace('-','_')not in valid_models: raise ValueError('model_name should be one of: ' + ', '.join(valid_models)) md_ef = EfficientNet.from_pretrained('efficientnet-b5', num_classes=1) !mkdir models !cp '.. /input/kaggle-public-copy/abcdef.pth' 'models' def get_df() : base_image_dir = os.path.join('.. ', 'input/aptos2019-blindness-detection/') train_dir = os.path.join(base_image_dir,'train_images/') df = pd.read_csv(os.path.join(base_image_dir, 'train.csv')) df['path'] = df['id_code'].map(lambda x: os.path.join(train_dir,'{}.png'.format(x))) df = df.drop(columns=['id_code']) df = df.sample(frac=1 ).reset_index(drop=True) test_df = pd.read_csv('.. /input/aptos2019-blindness-detection/sample_submission.csv') return df, test_df df, test_df = get_df() bs = 64 sz = 224 tfms = get_transforms(do_flip=True,flip_vert=True) data =(ImageList.from_df(df=df,path='./',cols='path') .split_by_rand_pct(0.2) .label_from_df(cols='diagnosis',label_cls=FloatList) .transform(tfms,size=sz,resize_method=ResizeMethod.SQUISH,padding_mode='zeros') .databunch(bs=bs,num_workers=4) .normalize(imagenet_stats) ) def qk(y_pred, y): return torch.tensor(cohen_kappa_score(torch.round(y_pred), y, weights='quadratic'), device='cuda:0') learn = Learner(data, md_ef, metrics = [qk], model_dir="models" ).to_fp16() learn.data.add_test(ImageList.from_df(test_df, '.. /input/aptos2019-blindness-detection', folder='test_images', suffix='.png')) learn.load('abcdef'); class OptimizedRounder(object): def __init__(self): self.coef_ = 0 def _kappa_loss(self, coef, X, y): X_p = np.copy(X) for i, pred in enumerate(X_p): if pred < coef[0]: X_p[i] = 0 elif pred >= coef[0] and pred < coef[1]: X_p[i] = 1 elif pred >= coef[1] and pred < coef[2]: X_p[i] = 2 elif pred >= coef[2] and pred < coef[3]: X_p[i] = 3 else: X_p[i] = 4 ll = metrics.cohen_kappa_score(y, X_p, weights='quadratic') return -ll def fit(self, X, y): loss_partial = partial(self._kappa_loss, X=X, y=y) initial_coef = [0.5, 1.5, 2.5, 3.5] self.coef_ = sp.optimize.minimize(loss_partial, initial_coef, method='nelder-mead') print(-loss_partial(self.coef_['x'])) def predict(self, X, coef): X_p = np.copy(X) for i, pred in enumerate(X_p): if pred < coef[0]: X_p[i] = 0 elif pred >= coef[0] and pred < coef[1]: X_p[i] = 1 elif pred >= coef[1] and pred < coef[2]: X_p[i] = 2 elif pred >= coef[2] and pred < coef[3]: X_p[i] = 3 else: X_p[i] = 4 return X_p def coefficients(self): return self.coef_['x'] coefficients=[0.5, 1.5, 2.5, 3.5] opt = OptimizedRounder() preds4,y = learn.get_preds(DatasetType.Test) tst_pred = opt.predict(preds4, coefficients) test_df.diagnosis = tst_pred.astype(int) submission4 = test_df.copy() submission4.to_csv('submission4.csv',index=False) print('done' )<load_from_csv>
s = scaler.fit(test[["Fare"]] )
Titanic - Machine Learning from Disaster
8,325,212
img = np.loadtxt(".. /input/aptos2019-blindness-detection/train.csv", delimiter=",", skiprows=1, usecols=(0), dtype = "str" ) img label = np.loadtxt(".. /input/aptos2019-blindness-detection/train.csv", delimiter=",", skiprows=1, usecols=(1), dtype = "int" ) label img_label_trains = [] img_label_validations = [] for i in range(3): data_train, data_test, labels_train, labels_test = train_test_split(img, label, train_size=0.75,random_state=i*5,stratify=label) img_label_train = np.stack([data_train, labels_train],axis=1) img_label_validation = np.stack([data_test, labels_test],axis=1) img_label_trains.append(img_label_train) img_label_validations.append(img_label_validation) img_width, img_height = 299, 299 num_train = int(len(data_train)) num_val = int(len(data_test)) batch_size = 4 print(num_train, num_val) abs_path = ".. /input/aptos2019-blindness-detection/train_images/" def vertical_flip(image, rate=0.5): if np.random.rand() < rate: image = image[::-1, :, :] return image def horizontal_flip(image): image = image[:, ::-1, :] return image def image_translation(img): params = np.random.randint(-150, 151) if not isinstance(params, list): params = [params, params] rows, cols, ch = img.shape M = np.float32([[1, 0, params[0]], [0, 1, params[1]]]) dst = cv2.warpAffine(img, M,(cols, rows)) return dst def image_shear(img): params = np.random.randint(-5, 6)*0.1 rows, cols, ch = img.shape factor = params*(-1.0) M = np.float32([[1, factor, 0], [0, 1, 0]]) dst = cv2.warpAffine(img, M,(cols, rows)) return dst def image_rotation(img): params = np.random.randint(-30, 31) rows, cols, ch = img.shape M = cv2.getRotationMatrix2D(( cols/2, rows/2), params, 1) dst = cv2.warpAffine(img, M,(cols, rows)) return dst def image_contrast(img): params = np.random.randint(5, 20)*0.1 alpha = params new_img = cv2.multiply(img, np.array([alpha])) return new_img def image_blur(img): params = params = np.random.randint(1, 21) blur = [] if params == 1: blur = cv2.blur(img,(3, 3)) if params == 2: blur = cv2.blur(img,(4, 4)) if params == 3: blur = cv2.blur(img,(5, 5)) if params == 4: blur = cv2.GaussianBlur(img,(3, 3), 0) if params == 5: blur = cv2.GaussianBlur(img,(5, 5), 0) if params == 6: blur = cv2.GaussianBlur(img,(7, 7), 0) if params == 7: blur = cv2.medianBlur(img, 3) if params == 8: blur = cv2.medianBlur(img, 5) if params == 9: blur = cv2.blur(img,(6, 6)) if params == 10: blur = cv2.bilateralFilter(img, 9, 75, 75) if params > 10: blur = img return blur def image_brightness2(img): params = np.random.randint(-21, 22) beta = params b, g, r = cv2.split(img) b = cv2.add(b, beta) g = cv2.add(g, beta) r = cv2.add(r, beta) new_img = cv2.merge(( b, g, r)) return new_img def get_random_data(image_lines_1, abs_path, img_width, img_height): image_file = abs_path + image_lines_1[0] + ".png" label = np.eye(5)[int(image_lines_1[1])] seed_image = cv2.imread(image_file) seed_image = cv2.cvtColor(seed_image, cv2.COLOR_BGR2RGB) seed_image = cv2.resize(seed_image, dsize=(img_width, img_height)) seed_image = vertical_flip(seed_image) seed_image = horizontal_flip(seed_image) seed_image = image_shear(seed_image) seed_image = image_rotation(seed_image) seed_image = image_contrast(seed_image) seed_image = image_blur(seed_image) seed_image = image_brightness2(seed_image) seed_image = seed_image / 255 return seed_image, label def data_generator(image_lines, batch_size, abs_path, img_width, img_height): n = len(image_lines) i = 0 while True: image_data = [] label_data = [] for b in range(batch_size): if i==0: np.random.shuffle(image_lines) image, label = get_random_data(image_lines[i], abs_path, img_width, img_height) image_data.append(image) label_data.append(label) i =(i+1)% n image_data = np.array(image_data) label_data = np.array(label_data) yield image_data, label_data def data_generator_wrapper(image_lines, batch_size, abs_path, img_width, img_height): n = len(image_lines) if n==0 or batch_size<=0: return None return data_generator(image_lines, batch_size, abs_path, img_width, img_height) models = [] for i in range(3): input_tensor = Input(shape=(img_height, img_width, 3)) xception_model = Xception(include_top=False, weights=None, input_tensor=input_tensor) xception_model.load_weights(".. /input/keras-pretrained-models/xception_weights_tf_dim_ordering_tf_kernels_notop.h5") x = xception_model.output x = GlobalAveragePooling2D()(x) x = Dense(1024, activation='relu' )(x) x = Dropout(0.3 )(x) outputs = Dense(5, activation='softmax' )(x) model = Model(inputs=xception_model.input, outputs=outputs) model.compile(optimizer=optimizers.SGD(lr=0.001,momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy']) model.summary() models.append(model) modelpath = '/kaggle/input/xception-0-757/' img_test = np.loadtxt(".. /input/aptos2019-blindness-detection/test.csv", delimiter=",", skiprows=1, dtype = "str" ) models[0].load_weights("/kaggle/input/xception-0-757/best_weight0.h5") models[1].load_weights("/kaggle/input/xception-0-757/best_weight1.h5") models[2].load_weights("/kaggle/input/xception-0-757/best_weight2.h5") test_abs_path = ".. /input/aptos2019-blindness-detection/test_images/" data = [] for i in range(len(img_test)) : image_file = test_abs_path + img_test[i] + ".png" seed_image = cv2.imread(image_file) seed_image = cv2.cvtColor(seed_image, cv2.COLOR_BGR2RGB) seed_image = cv2.resize(seed_image, dsize=(img_width, img_height)) seed_image = np.expand_dims(seed_image, axis=0) seed_image = seed_image / 255 predict1 = models[0].predict(seed_image) predict2 = models[1].predict(seed_image) predict3 = models[2].predict(seed_image) predict_mean =(predict1+predict2+predict3)/3 x = np.array([img_test[i], np.argmax(predict_mean)]) data.append(x) data = np.array(data) columns = ['id_code', 'diagnosis'] name = 'sample' <save_to_csv>
test["Fare"] = s.transform(test[["Fare"]]) test.head()
Titanic - Machine Learning from Disaster
8,325,212
d = pd.DataFrame(data=data, columns=columns, dtype='str') d['diagnosis'] = d['diagnosis'].astype(int) d.to_csv("submission_xce_.csv",index=False )<define_variables>
x = test[['Male', 'Agegroup', 'SibSp','Pclass', 'Parch', 'Fare', 'Embarked']].values
Titanic - Machine Learning from Disaster
8,325,212
wei = [0.4, 0.6] ker = [submission3, d]<define_variables>
ypredict = tree.predict(x) ypredict
Titanic - Machine Learning from Disaster
8,325,212
numClass = 5 subemp = np.zeros(( ker[0].shape[0],numClass))<prepare_output>
submission = pd.DataFrame({'PassengerId':test['PassengerId'],'Survived':ypredict} )
Titanic - Machine Learning from Disaster
8,325,212
for i in range(len(ker)) : subemp[ker[i].index, ker[i].diagnosis.tolist() ] += wei[i] print(subemp) <save_to_csv>
filename = 'Titanic1.csv' submission.to_csv(filename,index=False) print('Saved file: ' + filename )
Titanic - Machine Learning from Disaster
7,976,123
subKER = pd.read_csv('.. /input/aptos2019-blindness-detection/sample_submission.csv') subKER['diagnosis'] = subemp.argmax(1 ).astype(int) subKER.to_csv('submissionKER.csv', index=False )<define_variables>
sns.set() pd.set_option('display.max_rows',None )
Titanic - Machine Learning from Disaster
7,976,123
score = [0.777, 0.758, 0.749, 0.783] weight = [0.29, 0.16, 0.09,0.06, 0.40] subData = [submission1, submission2, d, submission3, submission4] predsData = [preds1, preds2, preds4]<save_to_csv>
train=pd.read_csv('/kaggle/input/titanic/train.csv') test=pd.read_csv('/kaggle/input/titanic/test.csv' )
Titanic - Machine Learning from Disaster
7,976,123
numClass = 5 subTemp = np.zeros(( subData[0].shape[0],numClass)) for i in range(len(subData)) : subTemp[subData[i].index, subData[i].diagnosis.tolist() ] += weight[i] print(subTemp) sub = pd.read_csv('.. /input/aptos2019-blindness-detection/sample_submission.csv') sub['diagnosis'] = subTemp.argmax(1 ).astype(int) sub.to_csv('submission.csv', index=False )<define_variables>
print('Total by Sex') print(train.Sex.value_counts()) print(' Total Survived by Sex') print(train.loc[train.Survived==1].Sex.value_counts() )
Titanic - Machine Learning from Disaster
7,976,123
numClass = 5 subTemp = np.zeros(( subData[0].shape[0],numClass)) for i in range(len(subData)) : subTemp[subData[i].index, subData[i].diagnosis.tolist() ] += weight[i] print(subTemp) <save_to_csv>
train_with_age=train.query('Age!="NaN"') print('Total Survived by Age under 10') print(train_with_age.loc[train_with_age.Age <= 10].Survived.value_counts()) print(' Total Survived by Age between 10 and 20') print(train_with_age.loc[(train_with_age.Age > 10)&(train_with_age.Age <= 20)].Survived.value_counts()) print(' Total Survived by Age between 20 and 30') print(train_with_age.loc[(train_with_age.Age > 20)&(train_with_age.Age <= 30)].Survived.value_counts()) print(' Total Survived by Age between 30 and 45') print(train_with_age.loc[(train_with_age.Age > 30)&(train_with_age.Age <= 45)].Survived.value_counts()) print(' Total Survived by Age above 45') print(train_with_age.loc[(train_with_age.Age > 45)].Survived.value_counts() )
Titanic - Machine Learning from Disaster
7,976,123
sub = pd.read_csv('.. /input/aptos2019-blindness-detection/sample_submission.csv') sub['diagnosis'] = subTemp.argmax(1 ).astype(int) sub.to_csv('submission.csv', index=False )<load_from_csv>
print('Total by Pclass') print(train.Pclass.value_counts()) print(' Total Survived by Pclass') print(train.loc[train.Survived==1].Pclass.value_counts() )
Titanic - Machine Learning from Disaster
7,976,123
<set_options>
complete=pd.concat([train,test],ignore_index=True )
Titanic - Machine Learning from Disaster
7,976,123
%matplotlib inline<define_variables>
complete.drop(['Cabin','Ticket'],axis=1,inplace=True )
Titanic - Machine Learning from Disaster
7,976,123
TRAINING = True<load_from_csv>
complete['Title']=complete.Name.str.extract('([A-Za-z]+)\.' )
Titanic - Machine Learning from Disaster
7,976,123
test_df = pd.read_csv('.. /input/aptos2019-blindness-detection/test.csv') print(test_df.shape) if TRAINING: train_df = pd.read_csv('.. /input/aptos2019-blindness-detection/train.csv') print(train_df.shape) train_df.head()<load_from_csv>
q1_fare=complete.Fare.quantile(0.25) q3_fare=complete.Fare.quantile(0.75) IQR=q3_fare-q1_fare min_val=q1_fare-(IQR*1.5) max_val=q3_fare+(IQR*1.5) print('Minimum: {}'.format(min_val)) print('Maximum: {}'.format(max_val))
Titanic - Machine Learning from Disaster
7,976,123
<define_variables>
for row in range(len(complete)) : if np.isnan(complete.loc[row,'Age'])==True: complete.loc[row,'Age']=complete.loc[(complete.Title==(complete.loc[row,'Title'])) &(complete.SibSp==(complete.loc[row,'SibSp'])) ].Age.mean()
Titanic - Machine Learning from Disaster
7,976,123
IMG_SIZE = 224 NB_CHANNELS = 3<categorify>
complete.Fare.fillna(complete.Fare.mean() ,inplace=True )
Titanic - Machine Learning from Disaster
7,976,123
def get_pad_width(im, new_shape, is_rgb=True): pad_diff = new_shape - im.shape[0], new_shape - im.shape[1] t, b = math.floor(pad_diff[0]/2), math.ceil(pad_diff[0]/2) l, r = math.floor(pad_diff[1]/2), math.ceil(pad_diff[1]/2) if is_rgb: pad_width =(( t,b),(l,r),(0, 0)) else: pad_width =(( t,b),(l,r)) return pad_width def standardize(x): x = x.astype(np.float32) x = x / np.max(x) return(x - np.mean(x)) /(np.std(x)) def normalize(img): img =(( img - np.min(img)) /(np.max(img)- np.min(img)))* 255 return img.astype(np.uint8) def crop_image(img, tol=10): def crop_image_1(img): mask = img > tol return img[np.ix_(mask.any(1), mask.any(0)) ] if img.ndim == 2: return crop_image_1(img) elif img.ndim == 3: try: img_cpy = img.copy() h, w, _ = img.shape img1 = cv2.resize(crop_image_1(img[:, :, 0]),(w, h)) img2 = cv2.resize(crop_image_1(img[:, :, 1]),(w, h)) img3 = cv2.resize(crop_image_1(img[:, :, 2]),(w, h)) img[:,:,0] = img1 img[:,:,1] = img2 img[:,:,2] = img3 except: return img_cpy return img def preprocess_image(im): im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) im = normalize(im) im = crop_image(im) im = cv2.resize(im,(IMG_SIZE, IMG_SIZE)) im = cv2.addWeighted(im, 4, cv2.GaussianBlur(im,(0, 0), IMG_SIZE / 10), -4, 128) return im.astype(np.uint8 )<prepare_x_and_y>
complete.loc[complete.Age.isna() ]
Titanic - Machine Learning from Disaster
7,976,123
if TRAINING: N = train_df.shape[0] x_train = np.empty(( N, 224, 224, 3), dtype=np.uint8) for i, image_id in enumerate(tqdm(train_df['id_code'])) : x_train[i, :, :, :] = preprocess_image(cv2.imread( f'.. /input/aptos2019-blindness-detection/train_images/{image_id}.png' ))<prepare_x_and_y>
for row in range(len(complete)) : if np.isnan(complete.loc[row,'Age'])==True: complete.loc[row,'Age']=complete.loc[complete.Title==(complete.loc[row,'Title'])].Age.mean()
Titanic - Machine Learning from Disaster
7,976,123
N = test_df.shape[0] x_test = np.empty(( N, 224, 224, 3), dtype=np.uint8) for i, image_id in enumerate(tqdm(test_df['id_code'])) : x_test[i, :, :, :] = preprocess_image(cv2.imread( f'.. /input/aptos2019-blindness-detection/test_images/{image_id}.png' ))<categorify>
complete.drop(['Name','Embarked'],axis=1,inplace=True )
Titanic - Machine Learning from Disaster
7,976,123
if TRAINING: y_train = pd.get_dummies(train_df['diagnosis'] ).values print(x_train.shape) print(y_train.shape) print(x_test.shape )<split>
def sex(x): if x == 'male': return 0 else: return 1 complete['Sex']=complete.Sex.apply(sex )
Titanic - Machine Learning from Disaster
7,976,123
if TRAINING: x_train, x_val, y_train, y_val = train_test_split( x_train, y_train, test_size=0.2 )<filter>
def age(x): if x <= 10: return 0 elif x <= 20: return 1 elif x <= 30: return 2 elif x <= 45: return 3 else: return 4 complete['Age']=complete.Age.apply(age )
Titanic - Machine Learning from Disaster
7,976,123
<data_type_conversions>
train=complete.loc[0:890] test=complete.loc[891:]
Titanic - Machine Learning from Disaster
7,976,123
<categorify>
from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score
Titanic - Machine Learning from Disaster
7,976,123
<prepare_x_and_y>
def tuning_random_forest(MaxLeafNodes,MaxDepth,NEstimators): model=RandomForestClassifier(random_state=1,max_leaf_nodes=MaxLeafNodes,max_depth=MaxDepth,n_estimators=NEstimators) X=train[['Age','Fare','Pclass','Sex']] y=train.Survived X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=0.2,random_state=1) model.fit(X_train,y_train) predicted=model.predict(X_test) accuracy=accuracy_score(y_test,predicted) if accuracy > 0.8: print('Max Leaf Nodes:{} Max Depth:{} N Estimators:{}'.format(MaxLeafNodes,MaxDepth,NEstimators)) print('Accuracy: {}'.format(accuracy_score(y_test,predicted))) print(' ')
Titanic - Machine Learning from Disaster
7,976,123
<data_type_conversions>
model=RandomForestClassifier(random_state=1,max_leaf_nodes=30,max_depth=10,n_estimators=200) X=train[['Age','Fare','Pclass','Sex']] y=train.Survived X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=0.2,random_state=1) model.fit(X_train,y_train) predicted=model.predict(X_test) accuracy_score(y_test,predicted )
Titanic - Machine Learning from Disaster
7,976,123
<randomize_order>
test2=test.loc[:,['Age','Fare','Pclass','Sex']] X=train[['Age','Fare','Pclass','Sex']] y=train.Survived model.fit(X,y) predicted=model.predict(test2 )
Titanic - Machine Learning from Disaster
7,976,123
if TRAINING: indexes = np.random.permutation(len(x_train)) x_train = x_train[indexes] y_train = y_train[indexes]<categorify>
test2=test.loc[:,['PassengerId']] submission=pd.DataFrame({'PassengerId':test2.PassengerId,'Survived':predicted}) submission=submission.astype('int32') submission.to_csv('submission.csv',index=False )
Titanic - Machine Learning from Disaster
5,213,367
if TRAINING: for i in range(3, -1, -1): y_train[:, i] = np.logical_or(y_train[:, i], y_train[:, i + 1]) y_val[:, i] = np.logical_or(y_val[:, i], y_val[:, i + 1]) print("Multilabel version:", y_train.sum(axis=0))<normalization>
train_data = '.. /input/titanic/train.csv' test_data = '.. /input/titanic/test.csv'
Titanic - Machine Learning from Disaster
5,213,367
if TRAINING: for i in tqdm(range(len(x_val))): x_val[i] = cv2.resize(x_val[i, 20: -20, 20: -20, :],(IMG_SIZE, IMG_SIZE)) for i in tqdm(range(len(x_train))): x_train[i] = cv2.resize(x_train[i, 20: -20, 20: -20, :],(IMG_SIZE, IMG_SIZE))<define_variables>
train_set = pd.read_csv(train_data) test_set = pd.read_csv(test_data )
Titanic - Machine Learning from Disaster
5,213,367
class MixupGenerator() : def __init__(self, X_train, y_train, seq=None, batch_size=32, alpha=0.2, shuffle=True, datagen=None): self.X_train = X_train self.y_train = y_train self.batch_size = batch_size self.alpha = alpha self.shuffle = shuffle self.sample_num = len(X_train) self.datagen = datagen self.seq = seq def __call__(self): while True: indexes = self.__get_exploration_order() itr_num = int(len(indexes)//(self.batch_size * 2)) for i in range(itr_num): batch_ids = indexes[i * self.batch_size * 2:(i + 1)* self.batch_size * 2] X, y = self.__data_generation(batch_ids) yield X, y def __get_exploration_order(self): indexes = np.arange(self.sample_num) if self.shuffle: np.random.shuffle(indexes) return indexes def __data_generation(self, batch_ids): _, h, w, c = self.X_train.shape l = np.random.beta(self.alpha, self.alpha, self.batch_size) X_l = l.reshape(self.batch_size, 1, 1, 1) y_l = l.reshape(self.batch_size, 1) X1 = self.X_train[batch_ids[:self.batch_size]] X2 = self.X_train[batch_ids[self.batch_size:]] X = X1 * X_l + X2 *(1 - X_l) if self.datagen: for i in range(self.batch_size): X[i] = self.datagen.random_transform(X[i]) X[i] = self.datagen.standardize(X[i]) if self.seq: X = X.astype(np.float32) X /= np.max(X) X *= 255 X = X.astype(np.uint8) X = self.seq.augment_images(X) if isinstance(self.y_train, list): y = [] for y_train_ in self.y_train: y1 = y_train_[batch_ids[:self.batch_size]] y2 = y_train_[batch_ids[self.batch_size:]] y.append(y1 * y_l + y2 *(1 - y_l)) else: y1 = self.y_train[batch_ids[:self.batch_size]] y2 = self.y_train[batch_ids[self.batch_size:]] y = y1 * y_l + y2 *(1 - y_l) return X, y def get_sample(self): return self.__data_generation(self.__get_exploration_order() [i * self.batch_size * 2:(i + 1)* self.batch_size * 2] )<train_model>
train_set.isnull().sum(axis=0 )
Titanic - Machine Learning from Disaster
5,213,367
BATCH_SIZE = 32 def create_datagen() : return ImageDataGenerator( preprocessing_function=seq.augment_image ) if TRAINING: data_generator = create_datagen().flow(x_train, y_train, batch_size=BATCH_SIZE) mixup_generator = MixupGenerator(x_train, y_train, seq=seq, batch_size=BATCH_SIZE, alpha=0.2 )()<define_variables>
%matplotlib inline sns.set_style('whitegrid') warnings.filterwarnings('ignore' )
Titanic - Machine Learning from Disaster
5,213,367
true_labels = np.array([1, 0, 1, 1, 0, 1]) pred_labels = np.array([1, 0, 0, 0, 0, 1] )<compute_test_metric>
train_set.isnull().sum()
Titanic - Machine Learning from Disaster
5,213,367
accuracy_score(true_labels, pred_labels )<compute_test_metric>
train_set['Cabin'].isnull().sum()
Titanic - Machine Learning from Disaster
5,213,367
cohen_kappa_score(true_labels, pred_labels )<predict_on_test>
train_set['Cabin'].value_counts().head()
Titanic - Machine Learning from Disaster
5,213,367
class Metrics(Callback): def on_train_begin(self, logs={}): self.val_kappas = [] def on_epoch_end(self, epoch, logs={}): X_val, y_val = self.validation_data[:2] y_val =(y_val.sum(axis=1)- 1 ).clip(0, 4) y_pred = self.model.predict(X_val)> 0.5 y_pred =(y_pred.astype(int ).sum(axis=1)- 1 ).clip(0, 4) _val_kappa = cohen_kappa_score( y_val, y_pred, weights='quadratic' ) self.val_kappas.append(_val_kappa) print(f"val_kappa: {_val_kappa:.4f}") if _val_kappa == max(self.val_kappas): print("Validation Kappa has improved.Saving model.") self.model.save('model.h5') return<load_pretrained>
merged = pd.concat([train_set,test_set], sort = False) merged.head(3 )
Titanic - Machine Learning from Disaster
5,213,367
base_model = Xception( weights=None, include_top=False, input_shape=(224,224,3) ) base_model.load_weights(".. /input/keras-pretrained-models/xception_weights_tf_dim_ordering_tf_kernels_notop.h5" )<choose_model_class>
merged['Cabin'].value_counts().head(3 )
Titanic - Machine Learning from Disaster
5,213,367
def build_model() : model = Sequential() model.add(base_model) model.add(layers.GlobalAveragePooling2D()) model.add(layers.Dropout(0.5)) model.add(layers.Dense(5, activation='sigmoid')) model.compile( loss='binary_crossentropy', optimizer=Adam(lr=0.00005), metrics=['accuracy'] ) return model<train_on_grid>
merged['Cabin'].fillna('X', inplace=True )
Titanic - Machine Learning from Disaster
5,213,367
if TRAINING: kappa_metrics = Metrics() rlr = callbacks.ReduceLROnPlateau(factor=0.5, patience=4, verbose=1) es = callbacks.EarlyStopping(patience=10, verbose=1, mode="min") history = model.fit_generator( data_generator, steps_per_epoch=x_train.shape[0] / BATCH_SIZE, epochs=200, validation_data=(x_val, y_val), callbacks=[kappa_metrics, rlr, es] )<save_to_csv>
merged['Title'] = merged['Name'].str.extract('([A-Za-z]+)\.') merged['Title'].head()
Titanic - Machine Learning from Disaster
5,213,367
if TRAINING: with open('history.json', 'w')as f: json.dump(str(history.history), f) history_df = pd.DataFrame(history.history) history_df[['loss', 'val_loss']].plot() history_df[['acc', 'val_acc']].plot()<data_type_conversions>
merged['Title'].value_counts()
Titanic - Machine Learning from Disaster
5,213,367
<save_to_csv>
merged['Title'].replace(to_replace = ['Dr', 'Rev', 'Col', 'Major', 'Capt'], value = 'Officer', inplace=True) merged['Title'].replace(to_replace = ['Dona', 'Jonkheer', 'Countess', 'Sir', 'Lady', 'Don'], value = 'Aristocrat', inplace = True) merged['Title'].replace({'Mlle':'Miss', 'Ms':'Miss', 'Mme':'Mrs'}, inplace = True )
Titanic - Machine Learning from Disaster
5,213,367
if TRAINING: weights_path = 'model.h5' else: weights_path = '.. /input/weights/model.h5' model.load_weights(weights_path) y_test = model.predict(x_test)> 0.5 y_test =(y_test.astype(int ).sum(axis=1)- 1 ).clip(0, 4) test_df['diagnosis'] = y_test test_df.to_csv('submission.csv',index=False )<set_options>
merged['SibSp'].value_counts()
Titanic - Machine Learning from Disaster
5,213,367
%reload_ext autoreload %autoreload 2 %matplotlib inline %matplotlib inline <define_variables>
merged['Parch'].value_counts()
Titanic - Machine Learning from Disaster
5,213,367
package_dir = '.. /input/efficientnet/efficientnet_pytorch' sys.path.insert(0, package_dir) <load_pretrained>
merged['Family_size'] = merged.SibSp + merged.Parch + 1 merged['Family_size'].value_counts()
Titanic - Machine Learning from Disaster
5,213,367
md_ef = EfficientNet.from_pretrained('efficientnet-b5', num_classes=1 )<load_from_csv>
merged['Family_size'].replace(to_replace = [1], value = 'single', inplace = True) merged['Family_size'].replace(to_replace = [2,3], value = 'small', inplace = True) merged['Family_size'].replace(to_replace = [4,5], value = 'medium', inplace = True) merged['Family_size'].replace(to_replace = [6, 7, 8, 11], value = 'large', inplace = True )
Titanic - Machine Learning from Disaster
5,213,367
def get_df() : base_image_dir = os.path.join('.. ', 'input/aptos2019-blindness-detection/') train_dir = os.path.join(base_image_dir,'train_images/') df = pd.read_csv(os.path.join(base_image_dir, 'train.csv')) df['path'] = df['id_code'].map(lambda x: os.path.join(train_dir,'{}.png'.format(x))) df = df.drop(columns=['id_code']) df = df.sample(frac=1 ).reset_index(drop=True) test_df = pd.read_csv('.. /input/aptos2019-blindness-detection/sample_submission.csv') return df, test_df df, test_df = get_df()<feature_engineering>
merged['Family_size'].value_counts()
Titanic - Machine Learning from Disaster
5,213,367
bs = 64 sz = 224 tfms = get_transforms(do_flip=True,flip_vert=True )<compute_test_metric>
ticket = [] for x in list(merged['Ticket']): if x.isdigit() : ticket.append('N') else: ticket.append(x.replace('.','' ).replace('/','' ).strip().split(' ')[0]) merged['Ticket'] = ticket
Titanic - Machine Learning from Disaster
5,213,367
def qk(y_pred, y): return torch.tensor(cohen_kappa_score(torch.round(y_pred), y, weights='quadratic'), device='cuda:0' )<load_pretrained>
merged['Ticket'].value_counts()
Titanic - Machine Learning from Disaster
5,213,367
learn = Learner(data, md_ef, metrics = [qk], model_dir="models" ).to_fp16() learn.data.add_test(ImageList.from_df(test_df, '.. /input/aptos2019-blindness-detection', folder='test_images', suffix='.png'))<train_model>
merged['Ticket'] = merged['Ticket'].apply(lambda x: x[0]) merged['Ticket'].value_counts()
Titanic - Machine Learning from Disaster
5,213,367
learn.fit_one_cycle(10,1e-3 )<compute_train_metric>
outliers(merged['Fare'] )
Titanic - Machine Learning from Disaster
5,213,367
class OptimizedRounder(object): def __init__(self): self.coef_ = 0 def _kappa_loss(self, coef, X, y): X_p = np.copy(X) for i, pred in enumerate(X_p): if pred < coef[0]: X_p[i] = 0 elif pred >= coef[0] and pred < coef[1]: X_p[i] = 1 elif pred >= coef[1] and pred < coef[2]: X_p[i] = 2 elif pred >= coef[2] and pred < coef[3]: X_p[i] = 3 else: X_p[i] = 4 ll = metrics.cohen_kappa_score(y, X_p, weights='quadratic') return -ll def fit(self, X, y): loss_partial = partial(self._kappa_loss, X=X, y=y) initial_coef = [0.5, 1.5, 2.5, 3.5] self.coef_ = sp.optimize.minimize(loss_partial, initial_coef, method='nelder-mead') print(-loss_partial(self.coef_['x'])) def predict(self, X, coef): X_p = np.copy(X) for i, pred in enumerate(X_p): if pred < coef[0]: X_p[i] = 0 elif pred >= coef[0] and pred < coef[1]: X_p[i] = 1 elif pred >= coef[1] and pred < coef[2]: X_p[i] = 2 elif pred >= coef[2] and pred < coef[3]: X_p[i] = 3 else: X_p[i] = 4 return X_p def coefficients(self): return self.coef_['x']<save_to_csv>
merged.isnull().sum()
Titanic - Machine Learning from Disaster
5,213,367
def run_subm(learn=learn, coefficients=[0.5, 1.5, 2.5, 3.5]): opt = OptimizedRounder() preds,y = learn.get_preds(DatasetType.Test) tst_pred = opt.predict(preds, coefficients) test_df.diagnosis = tst_pred.astype(int) test_df.to_csv('submission.csv',index=False) print('done' )<set_options>
merged['Embarked'].value_counts()
Titanic - Machine Learning from Disaster
5,213,367
%reload_ext autoreload %autoreload 2 %matplotlib inline <set_options>
merged['Embarked'].fillna(value = 'S', inplace=True )
Titanic - Machine Learning from Disaster
5,213,367
warnings.filterwarnings('always') warnings.filterwarnings('ignore') %matplotlib inline style.use('fivethirtyeight') sns.set(style='whitegrid', color_codes=True) <set_options>
merged['Fare'].fillna(value= merged['Fare'].median() , inplace=True )
Titanic - Machine Learning from Disaster
5,213,367
def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True seed_everything(42 )<feature_engineering>
df = merged.loc[:, ['Sex', 'Pclass', 'Embarked', 'Title', 'Family_size', 'Parch', 'SibSp', 'Cabin', 'Ticket']] LE = LabelEncoder() df = df.apply(LE.fit_transform) df.head(5)
Titanic - Machine Learning from Disaster
5,213,367
temp = vision.data.open_image <load_from_csv>
df['Age'] = merged['Age'] df.head(2 )
Titanic - Machine Learning from Disaster