kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
8,662,763
def get_mat(rotation, shear, height_zoom, width_zoom, height_shift, width_shift): rotation = math.pi * rotation / 180. shear = math.pi * shear / 180. c1 = tf.math.cos(rotation) s1 = tf.math.sin(rotation) one = tf.constant([1],dtype='float32') zero = tf.constant([0],dtype='float32') rotation_matrix = tf.reshape(tf.concat([c1,s1,zero, -s1,c1,zero, zero,zero,one],axis=0),[3,3]) c2 = tf.math.cos(shear) s2 = tf.math.sin(shear) shear_matrix = tf.reshape(tf.concat([one,s2,zero, zero,c2,zero, zero,zero,one],axis=0),[3,3]) zoom_matrix = tf.reshape(tf.concat([one/height_zoom,zero,zero, zero,one/width_zoom,zero, zero,zero,one],axis=0),[3,3]) shift_matrix = tf.reshape(tf.concat([one,zero,height_shift, zero,one,width_shift, zero,zero,one],axis=0),[3,3]) return K.dot(K.dot(rotation_matrix, shear_matrix), K.dot(zoom_matrix, shift_matrix)) def my_ceil(a, precision=0): return np.round(a + 0.5 * 10**(-precision), precision) def transform(image, label): DIM = IMAGE_SIZE[0] XDIM = DIM%2 rot =(my_ceil(random.uniform(0, 1), 1)* 180.0)* tf.random.normal([1], dtype='float32') shr = 5.* tf.random.normal([1],dtype='float32') h_zoom = 1.0 + tf.random.normal([1],dtype='float32')/10. w_zoom = 1.0 + tf.random.normal([1],dtype='float32')/10. h_shift = 16.* tf.random.normal([1],dtype='float32') w_shift = 16.* tf.random.normal([1],dtype='float32') m = get_mat(rot,shr,h_zoom,w_zoom,h_shift,w_shift) x = tf.repeat(tf.range(DIM//2,-DIM//2,-1), DIM) y = tf.tile(tf.range(-DIM//2,DIM//2),[DIM]) z = tf.ones([DIM*DIM],dtype='int32') idx = tf.stack([x,y,z]) idx2 = K.dot(m,tf.cast(idx,dtype='float32')) idx2 = K.cast(idx2,dtype='int32') idx2 = K.clip(idx2,-DIM//2+XDIM+1,DIM//2) idx3 = tf.stack([DIM//2-idx2[0,], DIM//2-1+idx2[1,]]) d = tf.gather_nd(image['inp1'],tf.transpose(idx3)) return {'inp1': tf.reshape(d,[DIM,DIM,3]), 'inp2': image['inp2']}, label def cutmix(image, label): DIM = IMAGE_SIZE[0] imgs = []; labs = [] for j in range(BATCH_SIZE): P = tf.cast(tf.random.uniform([], 0, 1)<= cutmix_rate, tf.int32) k = tf.cast(tf.random.uniform([], 0, BATCH_SIZE), tf.int32) x = tf.cast(tf.random.uniform([], 0, DIM), tf.int32) y = tf.cast(tf.random.uniform([], 0, DIM), tf.int32) b = tf.random.uniform([], 0, 1) WIDTH = tf.cast(DIM * tf.math.sqrt(1-b),tf.int32)* P ya = tf.math.maximum(0,y-WIDTH//2) yb = tf.math.minimum(DIM,y+WIDTH//2) xa = tf.math.maximum(0,x-WIDTH//2) xb = tf.math.minimum(DIM,x+WIDTH//2) one = image['inp1'][j,ya:yb,0:xa,:] two = image['inp1'][k,ya:yb,xa:xb,:] three = image['inp1'][j,ya:yb,xb:DIM,:] middle = tf.concat([one,two,three],axis=1) img = tf.concat([image['inp1'][j,0:ya,:,:],middle,image['inp1'][j,yb:DIM,:,:]],axis=0) imgs.append(img) a = tf.cast(WIDTH*WIDTH/DIM/DIM,tf.float32) lab1 = label[j,] lab2 = label[k,] labs.append(( 1-a)*lab1 + a*lab2) image2 = tf.reshape(tf.stack(imgs),(BATCH_SIZE,DIM,DIM,3)) label2 = tf.reshape(tf.stack(labs),(BATCH_SIZE, 1)) return {'inp1': image2, 'inp2': image['inp2']}, label2 def transform_gridmask(image, inv_mat, image_shape): h, w, c = image_shape cx, cy = w//2, h//2 new_xs = tf.repeat(tf.range(-cx, cx, 1), h) new_ys = tf.tile(tf.range(-cy, cy, 1), [w]) new_zs = tf.ones([h*w], dtype=tf.int32) old_coords = tf.matmul(inv_mat, tf.cast(tf.stack([new_xs, new_ys, new_zs]), tf.float32)) old_coords_x, old_coords_y = tf.round(old_coords[0, :] + w//2), tf.round(old_coords[1, :] + h//2) clip_mask_x = tf.logical_or(old_coords_x<0, old_coords_x>w-1) clip_mask_y = tf.logical_or(old_coords_y<0, old_coords_y>h-1) clip_mask = tf.logical_or(clip_mask_x, clip_mask_y) old_coords_x = tf.boolean_mask(old_coords_x, tf.logical_not(clip_mask)) old_coords_y = tf.boolean_mask(old_coords_y, tf.logical_not(clip_mask)) new_coords_x = tf.boolean_mask(new_xs+cx, tf.logical_not(clip_mask)) new_coords_y = tf.boolean_mask(new_ys+cy, tf.logical_not(clip_mask)) old_coords = tf.cast(tf.stack([old_coords_y, old_coords_x]), tf.int32) new_coords = tf.cast(tf.stack([new_coords_y, new_coords_x]), tf.int64) rotated_image_values = tf.gather_nd(image, tf.transpose(old_coords)) rotated_image_channel = list() for i in range(c): vals = rotated_image_values[:,i] sparse_channel = tf.SparseTensor(tf.transpose(new_coords), vals, [h, w]) rotated_image_channel.append(tf.sparse.to_dense(sparse_channel, default_value=0, validate_indices=False)) return tf.transpose(tf.stack(rotated_image_channel), [1,2,0]) def random_rotate(image, angle, image_shape): def get_rotation_mat_inv(angle): angle = math.pi * angle / 180 cos_val = tf.math.cos(angle) sin_val = tf.math.sin(angle) one = tf.constant([1], tf.float32) zero = tf.constant([0], tf.float32) rot_mat_inv = tf.concat([cos_val, sin_val, zero, -sin_val, cos_val, zero, zero, zero, one], axis=0) rot_mat_inv = tf.reshape(rot_mat_inv, [3,3]) return rot_mat_inv angle = float(angle)* tf.random.normal([1],dtype='float32') rot_mat_inv = get_rotation_mat_inv(angle) return transform_gridmask(image, rot_mat_inv, image_shape) def GridMask(image_height, image_width, d1, d2, rotate_angle=1, ratio=0.5): h, w = image_height, image_width hh = int(np.ceil(np.sqrt(h*h+w*w))) hh = hh+1 if hh%2==1 else hh d = tf.random.uniform(shape=[], minval=d1, maxval=d2, dtype=tf.int32) l = tf.cast(tf.cast(d,tf.float32)*ratio+0.5, tf.int32) st_h = tf.random.uniform(shape=[], minval=0, maxval=d, dtype=tf.int32) st_w = tf.random.uniform(shape=[], minval=0, maxval=d, dtype=tf.int32) y_ranges = tf.range(-1 * d + st_h, -1 * d + st_h + l) x_ranges = tf.range(-1 * d + st_w, -1 * d + st_w + l) for i in range(0, hh//d+1): s1 = i * d + st_h s2 = i * d + st_w y_ranges = tf.concat([y_ranges, tf.range(s1,s1+l)], axis=0) x_ranges = tf.concat([x_ranges, tf.range(s2,s2+l)], axis=0) x_clip_mask = tf.logical_or(x_ranges < 0 , x_ranges > hh-1) y_clip_mask = tf.logical_or(y_ranges < 0 , y_ranges > hh-1) clip_mask = tf.logical_or(x_clip_mask, y_clip_mask) x_ranges = tf.boolean_mask(x_ranges, tf.logical_not(clip_mask)) y_ranges = tf.boolean_mask(y_ranges, tf.logical_not(clip_mask)) hh_ranges = tf.tile(tf.range(0,hh), [tf.cast(tf.reduce_sum(tf.ones_like(x_ranges)) , tf.int32)]) x_ranges = tf.repeat(x_ranges, hh) y_ranges = tf.repeat(y_ranges, hh) y_hh_indices = tf.transpose(tf.stack([y_ranges, hh_ranges])) x_hh_indices = tf.transpose(tf.stack([hh_ranges, x_ranges])) y_mask_sparse = tf.SparseTensor(tf.cast(y_hh_indices, tf.int64), tf.zeros_like(y_ranges), [hh, hh]) y_mask = tf.sparse.to_dense(y_mask_sparse, 1, False) x_mask_sparse = tf.SparseTensor(tf.cast(x_hh_indices, tf.int64), tf.zeros_like(x_ranges), [hh, hh]) x_mask = tf.sparse.to_dense(x_mask_sparse, 1, False) mask = tf.expand_dims(tf.clip_by_value(x_mask + y_mask, 0, 1), axis=-1) mask = random_rotate(mask, rotate_angle, [hh, hh, 1]) mask = tf.image.crop_to_bounding_box(mask,(hh-h)//2,(hh-w)//2, image_height, image_width) return mask def apply_grid_mask(image, image_shape, PROBABILITY = gridmask_rate): AugParams = { 'd1' : 100, 'd2': 160, 'rotate' : 45, 'ratio' : 0.3 } mask = GridMask(image_shape[0], image_shape[1], AugParams['d1'], AugParams['d2'], AugParams['rotate'], AugParams['ratio']) if image_shape[-1] == 3: mask = tf.concat([mask, mask, mask], axis=-1) mask = tf.cast(mask,tf.float32) P = tf.cast(tf.random.uniform([], 0, 1)<= PROBABILITY, tf.int32) if P==1: return image*mask else: return image def gridmask(img_batch, label_batch): return {'inp1': apply_grid_mask(img_batch['inp1'],(*IMAGE_SIZE, 3)) , 'inp2': img_batch['inp2']}, label_batch def seed_everything(seed): random.seed(seed) np.random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) tf.random.set_seed(seed) def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "target": tf.io.FixedLenFeature([], tf.int64), "age_approx": tf.io.FixedLenFeature([], tf.int64), "sex": tf.io.FixedLenFeature([], tf.int64), "anatom_site_general_challenge": tf.io.FixedLenFeature([], tf.int64) } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = tf.cast(example['target'], tf.float32) data = {} data['age_approx'] = tf.cast(example['age_approx'], tf.int32) data['sex'] = tf.cast(example['sex'], tf.int32) data['anatom_site_general_challenge'] = tf.cast(tf.one_hot(example['anatom_site_general_challenge'], 7), tf.int32) return image, label, data def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "image_name": tf.io.FixedLenFeature([], tf.string), "age_approx": tf.io.FixedLenFeature([], tf.int64), "sex": tf.io.FixedLenFeature([], tf.int64), "anatom_site_general_challenge": tf.io.FixedLenFeature([], tf.int64) } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) image_name = example['image_name'] data = {} data['age_approx'] = tf.cast(example['age_approx'], tf.int32) data['sex'] = tf.cast(example['sex'], tf.int32) data['anatom_site_general_challenge'] = tf.cast(tf.one_hot(example['anatom_site_general_challenge'], 7), tf.int32) return image, image_name, data def load_dataset(filenames, labeled = True, ordered = False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads = AUTO) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls = AUTO) return dataset def setup_input1(image, label, data): anatom = [tf.cast(data['anatom_site_general_challenge'][i], dtype = tf.float32)for i in range(7)] tab_data = [tf.cast(data[tfeat], dtype = tf.float32)for tfeat in ['age_approx', 'sex']] tabular = tf.stack(tab_data + anatom) return {'inp1': image, 'inp2': tabular}, label def setup_input2(image, image_name, data): anatom = [tf.cast(data['anatom_site_general_challenge'][i], dtype = tf.float32)for i in range(7)] tab_data = [tf.cast(data[tfeat], dtype = tf.float32)for tfeat in ['age_approx', 'sex']] tabular = tf.stack(tab_data + anatom) return {'inp1': image, 'inp2': tabular}, image_name def setup_input3(image, image_name, target, data): anatom = [tf.cast(data['anatom_site_general_challenge'][i], dtype = tf.float32)for i in range(7)] tab_data = [tf.cast(data[tfeat], dtype = tf.float32)for tfeat in ['age_approx', 'sex']] tabular = tf.stack(tab_data + anatom) return {'inp1': image, 'inp2': tabular}, image_name, target def data_augment(data, label): data['inp1'] = tf.image.random_flip_left_right(data['inp1']) data['inp1'] = tf.image.random_flip_up_down(data['inp1']) data['inp1'] = tf.image.random_hue(data['inp1'], 0.01) data['inp1'] = tf.image.random_saturation(data['inp1'], 0.7, 1.3) data['inp1'] = tf.image.random_contrast(data['inp1'], 0.8, 1.2) data['inp1'] = tf.image.random_brightness(data['inp1'], 0.1) return data, label def get_training_dataset(filenames, labeled = True, ordered = False): dataset = load_dataset(filenames, labeled = labeled, ordered = ordered) dataset = dataset.map(setup_input1, num_parallel_calls = AUTO) dataset = dataset.map(data_augment, num_parallel_calls = AUTO) dataset = dataset.map(transform, num_parallel_calls = AUTO) dataset = dataset.repeat() dataset = dataset.shuffle(2048) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.map(cutmix, num_parallel_calls = AUTO) dataset = dataset.map(gridmask, num_parallel_calls = AUTO) dataset = dataset.prefetch(AUTO) return dataset def get_validation_dataset(filenames, labeled = True, ordered = True): dataset = load_dataset(filenames, labeled = labeled, ordered = ordered) dataset = dataset.map(setup_input1, num_parallel_calls = AUTO) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def get_test_dataset(filenames, labeled = False, ordered = True): dataset = load_dataset(filenames, labeled = labeled, ordered = ordered) dataset = dataset.map(setup_input2, num_parallel_calls = AUTO) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames] return np.sum(n) def read_tfrecord_full(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "image_name": tf.io.FixedLenFeature([], tf.string), "target": tf.io.FixedLenFeature([], tf.int64), "age_approx": tf.io.FixedLenFeature([], tf.int64), "sex": tf.io.FixedLenFeature([], tf.int64), "anatom_site_general_challenge": tf.io.FixedLenFeature([], tf.int64) } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) image_name = example['image_name'] target = tf.cast(example['target'], tf.float32) data = {} data['age_approx'] = tf.cast(example['age_approx'], tf.int32) data['sex'] = tf.cast(example['sex'], tf.int32) data['anatom_site_general_challenge'] = tf.cast(tf.one_hot(example['anatom_site_general_challenge'], 7), tf.int32) return image, image_name, target, data def load_dataset_full(filenames): dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads = AUTO) dataset = dataset.map(read_tfrecord_full, num_parallel_calls = AUTO) return dataset def get_data_full(filenames): dataset = load_dataset_full(filenames) dataset = dataset.map(setup_input3, num_parallel_calls = AUTO) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset NUM_TRAINING_IMAGES = int(count_data_items(TRAINING_FILENAMES)* 0.8) NUM_VALIDATION_IMAGES = int(count_data_items(TRAINING_FILENAMES)* 0.2) NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES) STEPS_PER_EPOCH = NUM_TRAINING_IMAGES // BATCH_SIZE print('Dataset: {} training images, {} validation images, {} unlabeled test images'.format(NUM_TRAINING_IMAGES, NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES))<compute_train_metric>
epochs_num = 100 batch_size = 20 input_dim = len(x_train[0] )
Titanic - Machine Learning from Disaster
8,662,763
def binary_focal_loss(gamma=2., alpha=.25): def binary_focal_loss_fixed(y_true, y_pred): pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred)) pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred)) epsilon = K.epsilon() pt_1 = K.clip(pt_1, epsilon, 1.- epsilon) pt_0 = K.clip(pt_0, epsilon, 1.- epsilon) return -K.sum(alpha * K.pow(1.- pt_1, gamma)* K.log(pt_1)) \ -K.sum(( 1 - alpha)* K.pow(pt_0, gamma)* K.log(1.- pt_0)) return binary_focal_loss_fixed def get_model() : with strategy.scope() : inp1 = tf.keras.layers.Input(shape =(*IMAGE_SIZE, 3), name='inp1') inp2 = tf.keras.layers.Input(shape =(9), name='inp2') efn1 = efn.EfficientNetB3(input_shape=(*IMAGE_SIZE,3), weights = 'imagenet', include_top = False) x1 = efn1(inp1) x1 = tf.keras.layers.GlobalAveragePooling2D()(x1) small = tf.keras.layers.AveragePooling2D(pool_size=2, strides=2 )(inp1) efn2 = efn.EfficientNetB4(input_shape=(256,256,3), weights='imagenet', include_top=False) x2 = efn2(small) x2 = tf.keras.layers.GlobalAveragePooling2D()(x2) x = tf.keras.layers.Concatenate()([x1,x2]) xtab = tf.keras.layers.Dense(50 )(inp2) xtab = tf.keras.layers.BatchNormalization()(xtab) xtab = tf.keras.layers.Activation('relu' )(xtab) concat = tf.keras.layers.concatenate([x, xtab]) output = tf.keras.layers.Dense(1, activation = 'sigmoid' )(concat) model = tf.keras.models.Model(inputs = [inp1, inp2], outputs = [output]) opt = tf.keras.optimizers.Adam(learning_rate = LR) model.compile(optimizer=opt, loss=[binary_focal_loss(gamma = 2.0, alpha = 0.80)], metrics=[tf.keras.metrics.BinaryAccuracy() , tf.keras.metrics.AUC() ]) return model def train_and_predict(SUB, folds = 3): models = [] oof_image_name = [] oof_target = [] oof_prediction = [] seed_everything(SEED) kfold = KFold(folds, shuffle = True, random_state = SEED) for fold,(trn_ind, val_ind)in enumerate(kfold.split(TRAINING_FILENAMES)) : print(' ') print('-'*50) print(f'Training fold {fold + 1}') train_dataset = get_training_dataset([TRAINING_FILENAMES[x] for x in trn_ind], labeled = True, ordered = False) val_dataset = get_validation_dataset([TRAINING_FILENAMES[x] for x in val_ind], labeled = True, ordered = True) K.clear_session() model = get_model() early_stopping = tf.keras.callbacks.EarlyStopping(monitor = 'val_auc', mode = 'max', patience = 5, verbose = 1, min_delta = 0.0001, restore_best_weights = True) cb_lr_schedule = tf.keras.callbacks.ReduceLROnPlateau(monitor = 'val_auc', factor = 0.4, patience = 2, verbose = 1, min_delta = 0.0001, mode = 'max') history = model.fit(train_dataset, steps_per_epoch = STEPS_PER_EPOCH, epochs = EPOCHS, callbacks = [early_stopping, cb_lr_schedule], validation_data = val_dataset, verbose = 2) models.append(model) number_of_files = count_data_items([TRAINING_FILENAMES[x] for x in val_ind]) dataset = get_data_full([TRAINING_FILENAMES[x] for x in val_ind]) image_name = dataset.map(lambda image, image_name, target: image_name ).unbatch() image_name = next(iter(image_name.batch(number_of_files)) ).numpy().astype('U') target = dataset.map(lambda image, image_name, target: target ).unbatch() target = next(iter(target.batch(number_of_files)) ).numpy() image = dataset.map(lambda image, image_name, target: image) probabilities = model.predict(image) oof_image_name.extend(list(image_name)) oof_target.extend(list(target)) oof_prediction.extend(list(np.concatenate(probabilities))) print(' ') print('-'*50) oof_df = pd.DataFrame({'image_name': oof_image_name, 'target': oof_target, 'predictions': oof_prediction}) oof_df.to_csv('EfficientNetB4_5_ensemble.csv', index = False) test_ds = get_test_dataset(TEST_FILENAMES, labeled = False, ordered = True) test_images_ds = test_ds.map(lambda image, image_name: image) print('Computing predictions...') probabilities = np.average([np.concatenate(models[i].predict(test_images_ds)) for i in range(folds)], axis = 0) print('Generating submission.csv file...') test_ids_ds = test_ds.map(lambda image, image_name: image_name ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') pred_df = pd.DataFrame({'image_name': test_ids, 'target': probabilities}) SUB.drop('target', inplace = True, axis = 1) SUB = SUB.merge(pred_df, on = 'image_name') SUB.to_csv('sub_EfficientNetB4_5_ensemble.csv', index = False) return oof_target, oof_prediction oof_target, oof_prediction = train_and_predict(SUB )<compute_test_metric>
def get_model(input_dim): model = models.Sequential() model.add(layers.Dense(units = 7, kernel_initializer = 'lecun_uniform', activation = 'relu', input_dim = input_dim)) model.add(layers.Dense(units = 5, kernel_initializer = 'lecun_uniform', activation = 'relu')) model.add(layers.Dense(units = 1, kernel_initializer = 'lecun_uniform', activation = 'sigmoid')) model.compile(optimizer=optimizers.Adam(learning_rate=0.01), loss='binary_crossentropy', metrics=['accuracy']) return model
Titanic - Machine Learning from Disaster
8,662,763
roc_auc = metrics.roc_auc_score(oof_target, oof_prediction) print('Our out of folds roc auc score is: ', roc_auc )<set_options>
model = get_model(input_dim) history = model.fit(x_train, y_train, epochs=epochs_num, batch_size=batch_size, verbose=1)
Titanic - Machine Learning from Disaster
8,662,763
warnings.filterwarnings('ignore') <load_from_csv>
predict = model.predict(x_test )
Titanic - Machine Learning from Disaster
8,662,763
def seed_everything(seed): random.seed(seed) np.random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) SEED = 22 seed_everything(SEED) def read_data() : train = pd.read_csv('/kaggle/input/siim-isic-melanoma-classification/train.csv') test = pd.read_csv('/kaggle/input/siim-isic-melanoma-classification/test.csv') sub = pd.read_csv('/kaggle/input/siim-isic-melanoma-classification/sample_submission.csv') train1 = pd.read_csv('.. /input/melanoma-subs/EfficientNetB0_256.csv') train2 = pd.read_csv('.. /input/melanoma-subs/EfficientNetB0_512EX.csv') train3 = pd.read_csv('.. /input/melanoma-subs/EfficientNetB1_256.csv') train4 = pd.read_csv('.. /input/melanoma-subs/EfficientNetB1_384.csv') train5 = pd.read_csv('.. /input/melanoma-subs/EfficientNetB1_512.csv') train6 = pd.read_csv('.. /input/melanoma-subs/EfficientNetB1_512EX.csv') train7 = pd.read_csv('.. /input/melanoma-subs/EfficientNetB2_256.csv') train8 = pd.read_csv('.. /input/melanoma-subs/EfficientNetB2_384.csv') train9 = pd.read_csv('.. /input/melanoma-subs/EfficientNetB2_512.csv') train10 = pd.read_csv('.. /input/melanoma-subs/EfficientNetB2_512EX.csv') train11 = pd.read_csv('.. /input/melanoma-subs/EfficientNetB3_256.csv') train12 = pd.read_csv('.. /input/melanoma-subs/EfficientNetB3_384.csv') train13 = pd.read_csv('.. /input/melanoma-subs/EfficientNetB3_512.csv') train14 = pd.read_csv('.. /input/melanoma-subs/EfficientNetB4_256.csv') train15 = pd.read_csv('.. /input/melanoma-subs/EfficientNetB4_384.csv') train16 = pd.read_csv('.. /input/melanoma-subs/EfficientNetB4_512.csv') train17 = pd.read_csv('.. /input/melanoma-subs/EfficientNetB5_256.csv') train18 = pd.read_csv('.. /input/melanoma-subs/EfficientNetB5_384.csv') train19 = pd.read_csv('.. /input/melanoma-subs/EfficientNetB6_384.csv') test1 = pd.read_csv('.. /input/melanoma-subs/sub_EfficientNetB0_256.csv') test2 = pd.read_csv('.. /input/melanoma-subs/sub_EfficientNetB0_512EX.csv') test3 = pd.read_csv('.. /input/melanoma-subs/sub_EfficientNetB1_256.csv') test4 = pd.read_csv('.. /input/melanoma-subs/sub_EfficientNetB1_384.csv') test5 = pd.read_csv('.. /input/melanoma-subs/sub_EfficientNetB1_512.csv') test6 = pd.read_csv('.. /input/melanoma-subs/sub_EfficientNetB1_512EX.csv') test7 = pd.read_csv('.. /input/melanoma-subs/sub_EfficientNetB2_256.csv') test8 = pd.read_csv('.. /input/melanoma-subs/sub_EfficientNetB2_384.csv') test9 = pd.read_csv('.. /input/melanoma-subs/sub_EfficientNetB2_512.csv') test10 = pd.read_csv('.. /input/melanoma-subs/sub_EfficientNetB2_512EX.csv') test11 = pd.read_csv('.. /input/melanoma-subs/sub_EfficientNetB3_256.csv') test12 = pd.read_csv('.. /input/melanoma-subs/sub_EfficientNetB3_384.csv') test13 = pd.read_csv('.. /input/melanoma-subs/sub_EfficientNetB3_512.csv') test14 = pd.read_csv('.. /input/melanoma-subs/sub_EfficientNetB4_256.csv') test15 = pd.read_csv('.. /input/melanoma-subs/sub_EfficientNetB4_384.csv') test16 = pd.read_csv('.. /input/melanoma-subs/sub_EfficientNetB4_512.csv') test17 = pd.read_csv('.. /input/melanoma-subs/sub_EfficientNetB5_256.csv') test18 = pd.read_csv('.. /input/melanoma-subs/sub_EfficientNetB5_384.csv') test19 = pd.read_csv('.. /input/melanoma-subs/sub_EfficientNetB6_384.csv') def print_roc_auc(df, model): roc_auc = metrics.roc_auc_score(df['target'], df['predictions']) print(f'Our model {model} out of folds roc auc score is {roc_auc}') print('-'*50) print(' ') print_roc_auc(train1, 1) print_roc_auc(train2, 2) print_roc_auc(train3, 3) print_roc_auc(train4, 4) print_roc_auc(train5, 5) print_roc_auc(train6, 6) print_roc_auc(train7, 7) print_roc_auc(train8, 8) print_roc_auc(train9, 9) print_roc_auc(train10, 10) print_roc_auc(train11, 11) print_roc_auc(train12, 12) print_roc_auc(train13, 13) print_roc_auc(train14, 14) print_roc_auc(train15, 15) print_roc_auc(train16, 16) print_roc_auc(train17, 17) print_roc_auc(train18, 18) print_roc_auc(train19, 19) def fix_predictions(train, test, model): test.columns = ['image_name', 'predictions_{}'.format(model)] train = train[['image_name', 'predictions']] train.columns = ['image_name', 'predictions_{}'.format(model)] return train, test train1, test1 = fix_predictions(train1, test1, 1) train2, test2 = fix_predictions(train2, test2, 2) train3, test3 = fix_predictions(train3, test3, 3) train4, test4 = fix_predictions(train4, test4, 4) train5, test5 = fix_predictions(train5, test5, 5) train6, test6 = fix_predictions(train6, test6, 6) train7, test7 = fix_predictions(train7, test7, 7) train8, test8 = fix_predictions(train8, test8, 8) train9, test9 = fix_predictions(train9, test9, 9) train10, test10 = fix_predictions(train10, test10, 10) train11, test11 = fix_predictions(train11, test11, 11) train12, test12 = fix_predictions(train12, test12, 12) train13, test13 = fix_predictions(train13, test13, 13) train14, test14 = fix_predictions(train14, test14, 14) train15, test15 = fix_predictions(train15, test15, 15) train16, test16 = fix_predictions(train16, test16, 16) train17, test17 = fix_predictions(train17, test17, 17) train18, test18 = fix_predictions(train18, test18, 18) train19, test19 = fix_predictions(train19, test19, 19) train = train.merge(train1, on = 'image_name' ).merge(train2, on = 'image_name' ).merge(train3, on = 'image_name' ).merge(train4, on = 'image_name' ).merge(train5, on = 'image_name' ).merge(train6, on = 'image_name' ).merge(train7, on = 'image_name' ).merge(train8, on = 'image_name' ).merge(train9, on = 'image_name' ).merge(train10, on = 'image_name' ).merge(train11, on = 'image_name' ).merge(train12, on = 'image_name' ).merge(train13, on = 'image_name' ).merge(train14, on = 'image_name' ).merge(train15, on = 'image_name' ).merge(train16, on = 'image_name' ).merge(train17, on = 'image_name' ).merge(train18, on = 'image_name' ).merge(train19, on = 'image_name') test = test.merge(test1, on = 'image_name' ).merge(test2, on = 'image_name' ).merge(test3, on = 'image_name' ).merge(test4, on = 'image_name' ).merge(test5, on = 'image_name' ).merge(test6, on = 'image_name' ).merge(test7, on = 'image_name' ).merge(test8, on = 'image_name' ).merge(test9, on = 'image_name' ).merge(test10, on = 'image_name' ).merge(test11, on = 'image_name' ).merge(test12, on = 'image_name' ).merge(test13, on = 'image_name' ).merge(test14, on = 'image_name' ).merge(test15, on = 'image_name' ).merge(test16, on = 'image_name' ).merge(test17, on = 'image_name' ).merge(test18, on = 'image_name' ).merge(test19, on = 'image_name') return train, test, sub def feature_engineering(train, test): trn_images = train['image_name'].values trn_sizes = np.zeros(( trn_images.shape[0], 2)) for i, img_path in enumerate(tqdm(trn_images)) : img = Image.open(os.path.join('/kaggle/input/siim-isic-melanoma-classification/jpeg/train/', f'{img_path}.jpg')) trn_sizes[i] = np.array([img.size[0], img.size[1]]) test_images = test['image_name'].values test_sizes = np.zeros(( test_images.shape[0],2)) for i, img_path in enumerate(tqdm(test_images)) : img = Image.open(os.path.join('/kaggle/input/siim-isic-melanoma-classification/jpeg/test/', f'{img_path}.jpg')) test_sizes[i] = np.array([img.size[0],img.size[1]]) train['w'] = trn_sizes[:,0] train['h'] = trn_sizes[:,1] test['w'] = test_sizes[:,0] test['h'] = test_sizes[:,1] return train, test def encode_categorical(train, test): for col in ['sex', 'anatom_site_general_challenge']: encoder = preprocessing.LabelEncoder() train[col].fillna('unknown', inplace = True) test[col].fillna('unknown', inplace = True) train[col] = encoder.fit_transform(train[col]) test[col] = encoder.transform(test[col]) train['patient_id'].fillna('unknown', inplace = True) return train, test def train_and_evaluate_lgbm(train, test, params, verbose_eval, folds = 5): features = [col for col in train.columns if col not in ['image_name', 'patient_id', 'diagnosis', 'benign_malignant', 'target', 'source']] if verbose_eval != False: print('Training with features: ', features) kf = GroupKFold(n_splits = folds) target = 'target' oof_pred = np.zeros(len(train)) y_pred = np.zeros(len(test)) for fold,(tr_ind, val_ind)in enumerate(kf.split(train, groups = train['patient_id'])) : if verbose_eval != False: print(' ') print('-'*50) print(f'Training fold {fold + 1}"') x_train, x_val = train[features].iloc[tr_ind], train[features].iloc[val_ind] y_train, y_val = train[target][tr_ind], train[target][val_ind] train_set = lgb.Dataset(x_train, y_train) val_set = lgb.Dataset(x_val, y_val) model = lgb.train(params, train_set, num_boost_round = 10000, early_stopping_rounds = 50, valid_sets = [train_set, val_set], verbose_eval = verbose_eval) oof_pred[val_ind] = model.predict(x_val) y_pred += model.predict(test[features])/ kf.n_splits rauc = metrics.roc_auc_score(train['target'], oof_pred) if verbose_eval != False: print(f'Our oof roc auc score for our lgbm model is {rauc}') gc.collect() return rauc, y_pred train, test, sub = read_data() train, test = feature_engineering(train, test) train, test = encode_categorical(train, test )<choose_model_class>
my_submission = pd.DataFrame({ 'PassengerId': test.PassengerId, 'Survived': pd.Series(predict.reshape(( 1,-1)) [0] ).round().astype(int) }) my_submission.head()
Titanic - Machine Learning from Disaster
8,662,763
<install_modules><EOS>
my_submission.to_csv('submission.csv', index=False )
Titanic - Machine Learning from Disaster
9,033,381
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<import_modules>
%%markdown Titanic competition in Kaggle *https://www.kaggle.com/c/titanic/*
Titanic - Machine Learning from Disaster
9,033,381
import random import numpy as np import pandas as pd import torch import PIL.Image as pil import matplotlib.pyplot as plt from fastai.vision import * from efficientnet_pytorch import EfficientNet from sklearn.model_selection import StratifiedKFold import os<set_options>
%matplotlib inline
Titanic - Machine Learning from Disaster
9,033,381
warnings.filterwarnings("ignore", category=UserWarning, module="torch.nn.functional" )<install_modules>
input_path = '/kaggle/input/titanic/' train_set = pd.read_csv(input_path+'train.csv') test_set = pd.read_csv(input_path+'test.csv') dataset = [train_set, test_set]
Titanic - Machine Learning from Disaster
9,033,381
!pip install torch==1.4.0 torchvision==0.5.0<set_options>
%%markdown
Titanic - Machine Learning from Disaster
9,033,381
%reload_ext autoreload %autoreload 2 %matplotlib inline<set_options>
def missing_values_df(df): missing_values = df.isnull().sum().sort_values(ascending = False) missing_values = missing_values[missing_values>0] ratio = missing_values/len(df)*100 output_df= pd.concat([missing_values, ratio], axis=1, keys=['Total missing values', 'Percentage']) return output_df print('Missing values in the columns of training dataset with percentage') display(missing_values_df(train_set)) print(' ------------------------------------------------ ') print('Missing values in the columns of test dataset with percentage') display(missing_values_df(test_set))
Titanic - Machine Learning from Disaster
9,033,381
seed = 42 def random_seed(seed_value): random.seed(seed_value) np.random.seed(seed_value) torch.manual_seed(seed_value) os.environ['PYTHONHASHSEED'] = str(seed_value) if torch.cuda.is_available() : torch.cuda.manual_seed(seed_value) torch.cuda.manual_seed_all(seed_value) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False random_seed(seed )<define_variables>
%%markdown 1)fill the *NaN* values with mean(*Age feature*)our more frequent values(*Embarked feature*) 2)Add *titles* of passengers from names then delete names 3)Encode categorical features into integers 4)make 4 bins of age to categorize it 5)Normalize our datasets(training_set and test_set) 6)Split our training set into training and validation sets(80%/20%)
Titanic - Machine Learning from Disaster
9,033,381
path = '/kaggle/input/siim-isic-melanoma-classification' path<define_variables>
for i in range(len(dataset)) : freq_port = dataset[i]['Embarked'].dropna().mode() [0] dataset[i]['Embarked'] = dataset[i]['Embarked'].fillna(freq_port) dataset[i] = dataset[i].fillna(dataset[i].mean() )
Titanic - Machine Learning from Disaster
9,033,381
img_path = '/kaggle/input/melanoma-merged-external-data-512x512-jpeg' img_path<load_from_csv>
print("Titels of passengers by sex") dataset[0]['Title'] = dataset[0].Name.str.extract('([A-Za-z]+)\.', expand=False) display(pd.crosstab(dataset[0]['Sex'], dataset[0]['Title']))
Titanic - Machine Learning from Disaster
9,033,381
train_df = pd.read_csv(img_path + '/folds_13062020.csv') train_df.head()<load_from_csv>
for i, data in enumerate(dataset): dataset[i]['Title'] = data.Name.str.extract('([A-Za-z]+)\.', expand=False) dataset[i]['Title'] = data['Title'].replace(['Lady', 'Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') dataset[i]['Title'] = data['Title'].replace(['Mlle', 'Ms'], 'Miss') dataset[i]['Title'] = data['Title'].replace('Mme', 'Mrs') print(dataset[i]['Title'].unique() )
Titanic - Machine Learning from Disaster
9,033,381
test_df = pd.read_csv(path + '/test.csv') test_df.head()<load_from_csv>
encoder = LabelEncoder() categoricalFeatures = dataset[0].select_dtypes(include=['object'] ).columns for i, data in enumerate(dataset): data[categoricalFeatures]=data[categoricalFeatures].astype(str) encoded = data[categoricalFeatures].apply(encoder.fit_transform) for j in categoricalFeatures: dataset[i][j]=encoded[j] dataset[0].head()
Titanic - Machine Learning from Disaster
9,033,381
sample_df = pd.read_csv(path + '/sample_submission.csv') sample_df.head()<feature_engineering>
bins = [0,18,60,80] labels = [1,2,3] for i, data in enumerate(dataset): dataset[i] = dataset[i].drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1) dataset[i]['Age']=pd.cut(dataset[i]['Age'],bins=bins ,labels=labels) dataset[i]['Age']=dataset[i]['Age'].astype('int64') print('training dataset:') display(dataset[0].head()) print('testing dataset:') display(dataset[1].head() )
Titanic - Machine Learning from Disaster
9,033,381
tfms = get_transforms(flip_vert=True, max_rotate=15, max_zoom=1.2, max_lighting=0.3, max_warp=0, p_affine=0, p_lighting=0.8 )<compute_train_metric>
X=dataset[0].iloc[:, 1:] Y=dataset[0].iloc[:, 0] x_test=dataset[1].iloc[:, 0:] normalized_data = X normalized_data=normalized_data.append(x_test) normalized_x_train = normalized_data.values normalized_x_train /= np.max(np.abs(normalized_x_train),axis=0) X = pd.DataFrame(normalized_x_train[:891,:], columns=['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked', 'Title']) print(X.head()) print(len(X)) x_test = pd.DataFrame(normalized_x_train[891:,:], columns=['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked', 'Title']) display(x_test.head()) print(len(x_test)) display(X.columns )
Titanic - Machine Learning from Disaster
9,033,381
class FocalLoss(nn.Module): def __init__(self, gamma=2., reduction='mean'): super().__init__() self.gamma = gamma self.reduction = reduction def forward(self, inputs, targets): CE_loss = nn.CrossEntropyLoss(reduction='none' )(inputs, targets) pt = torch.exp(-CE_loss) F_loss =(( 1 - pt)**self.gamma)* CE_loss if self.reduction == 'sum': return F_loss.sum() elif self.reduction == 'mean': return F_loss.mean()<init_hyperparams>
X_train, X_val, y_train, y_val = train_test_split(X, Y, test_size = 0.20) print("Training set shape: "+str(X_train.shape)) print("Validation set shape: "+str(X_val.shape))
Titanic - Machine Learning from Disaster
9,033,381
submission_ver = '0002' arch = [EfficientNet.from_pretrained('efficientnet-b0', num_classes=2)] fc_size = 1280 lin_size = 1000 n_folds = 5 size = [256] bs = 32 stage_1_epochs = 3 lr1 = [1e-1] lr_eff_1 = [1e-3] is_stage_2 = False stage_2_epochs = 4 lr2 = [slice(1e-7, 1e-4)] lr_eff_2 = [slice(1e-4, 1e-3)] custom_loss = True loss_func = FocalLoss() w_decay = 0.01 use_fp16 = True num_wkrs = os.cpu_count() use_tta = True submit_after_train = False oversampling_flag = True oversampling_size = 5 preds_to_int = False smooth_preds = False smooth_alpha = 0.01<count_unique_values>
%%markdown 1)Logistic Regression 2)Decision Tree 3)Random Forest 4)XGBoost
Titanic - Machine Learning from Disaster
9,033,381
num_classes = len(np.unique(train_df['target'])) num_classes<feature_engineering>
accuracies_list = list() accuracies = namedtuple('accuracies',('Model', 'accuracy'))
Titanic - Machine Learning from Disaster
9,033,381
test_df['image_name'] = '512x512-test/512x512-test/' + test_df['image_name'] + '.jpg'<create_dataframe>
%%markdown
Titanic - Machine Learning from Disaster
9,033,381
test_data = ImageList.from_df(test_df, img_path) test_data<prepare_output>
logreg = LogisticRegression() logreg.fit(X_train, y_train) Y_pred = logreg.predict(X_val) acc_log = round(logreg.score(X_train, y_train)* 100, 2) print('accuracy: {}'.format(acc_log)) accuracies_list.append(accuracies('Logistic Regression', acc_log))
Titanic - Machine Learning from Disaster
9,033,381
labels_df = train_df[['image_id', 'target']].copy() labels_df.head()<categorify>
%%markdown
Titanic - Machine Learning from Disaster
9,033,381
def k_fold(df, num_fld, seed = seed): for fold in range(num_fld): df.loc[df.fold == fold, f'is_valid_{fold}'] = True df.loc[df.fold != fold, f'is_valid_{fold}'] = False<concatenate>
decisiontree = DecisionTreeClassifier() decisiontree.fit(X_train, y_train) y_pred = decisiontree.predict(X_val) acc_decisiontree = round(accuracy_score(y_pred, y_val)* 100, 2) print('accuracy: {}'.format(acc_decisiontree)) accuracies_list.append(accuracies('Decision Tree', acc_decisiontree))
Titanic - Machine Learning from Disaster
9,033,381
k_fold(train_df, n_folds, seed )<filter>
%%markdown
Titanic - Machine Learning from Disaster
9,033,381
def oversample(fld, df, os_size, num_fld=5): train_df_fld = df.loc[df['fold'] != fld] valid_df_fld = df.loc[df['fold'] == fld] train_df_md = train_df_fld.loc[train_df_fld['target'] == 1] if os_size == 'auto': os_size = int(np.floor(train_df_fld.loc[train_df_fld['target'] == 0]['target'].value_counts() [0]/train_df_fld.loc[train_df_fld['target'] == 1]['target'].value_counts() [1])) train_df_md = train_df_md.append([train_df_md] *(os_size - 1)) full_df_fld = pd.concat([train_df_fld, train_df_md, valid_df_fld] ).sample(frac=1.0, random_state=seed ).reset_index(drop=True) return full_df_fld for x in range(n_folds): fold_df = train_df.copy() if oversampling_flag: fold_df = oversample(x, train_df, oversampling_size) globals() ['src_%s' %x] =(ImageList.from_df(fold_df, img_path + '/512x512-dataset-melanoma', folder='512x512-dataset-melanoma', suffix='.jpg' ).split_from_df(col =(3 + x))) <categorify>
clf = RandomForestClassifier(max_depth=10, max_leaf_nodes =20,random_state=0) clf.fit(X_train,y_train) y_pred=clf.predict(X_val) acc_random_forest = round(accuracy_score(y_pred, y_val)* 100, 2) print('accuracy: {}'.format(acc_random_forest)) accuracies_list.append(accuracies('Random Forest', acc_random_forest))
Titanic - Machine Learning from Disaster
9,033,381
def get_data(fold, size, bs, padding_mode='reflection'): return(globals() ['src_%s' %fold].label_from_df(cols='target') .add_test(test_data) .transform(tfms, size=size, padding_mode=padding_mode) .databunch(bs=bs, num_workers = num_wkrs ).normalize(imagenet_stats))<categorify>
%%markdown
Titanic - Machine Learning from Disaster
9,033,381
def preds_smoothing(encodings , alpha): K = encodings.shape[1] y_ls =(1 - alpha)* encodings + alpha / K return y_ls<compute_test_metric>
xgb = xgboost.XGBClassifier(random_state=5,learning_rate=0.01) xgb.fit(X_train, y_train) y_pred = xgb.predict(X_val) acc_xgb = round(accuracy_score(y_pred, y_val)* 100, 2) print('accuracy: {}'.format(acc_xgb)) accuracies_list.append(accuracies('XGBoost', acc_xgb))
Titanic - Machine Learning from Disaster
9,033,381
def print_metrics(val_preds, val_labels): targs, preds = LongTensor([]), Tensor([]) val_preds = F.softmax(val_preds, dim=1)[:,-1] preds = torch.cat(( preds, val_preds.cpu())) targs = torch.cat(( targs, val_labels.cpu().long())) print('AUCROC = ' + str(auc_roc_score(preds, targs ).item()))<set_options>
%%markdown 1)Declare consts 2)Training Set && Testing Set preparation for pytorch 3)Define our DL model class 4)Instantiate our model, loss and optimizer 5)Define fit function 6)Training process 7)Define Predict Function 8)Preprare for submission
Titanic - Machine Learning from Disaster
9,033,381
gc.collect()<feature_engineering>
BATCH_SIZE = 1 LEARNING_RATE = 0.001 EPOCHS = 800 INPUT_NODES = 8
Titanic - Machine Learning from Disaster
9,033,381
for model in arch: if hasattr(model, '__name__'): model_name = model.__name__ else: model_name = "EfficientNet" globals() [model_name + '___val_preds'] = [] globals() [model_name + '___val_labels'] = [] globals() [model_name + '___test_preds'] = [] print(f'/////////////////////////////////////////////////////') print(f'//////////////// MODEL: {model_name} ////////////////') print(f'///////////////////////////////////////////////////// ') for fld in range(n_folds): print(f' //////////////// FOLD {fld} //////////////// ') for sz in size: print(f'-------- Size: {sz} -------- ') print("Preparing data & applying settings... ") data = get_data(fld, sz, bs) gc.collect() if sz == size[0]: if model_name != "EfficientNet": learn = cnn_learner(data, model, metrics=[AUROC() ], bn_final=True) else: model._fc = nn.Sequential(nn.Linear(fc_size, lin_size, bias=True), nn.ReLU() , nn.Dropout(p=0.5), nn.Linear(lin_size, num_classes, bias = True)) learn = Learner(data, model, metrics=[AUROC() ]) learn = learn.split([learn.model._conv_stem,learn.model._blocks,learn.model._conv_head]) else: learn.data = data if custom_loss: learn.loss_func = loss_func if use_fp16: learn = learn.to_fp16() if model_name != "EfficientNet": lr = lr1[size.index(sz)] else: lr = lr_eff_1[size.index(sz)] print("Data is ready.Learning - Stage 1...") learn.fit_one_cycle(stage_1_epochs, slice(lr), wd=w_decay) if is_stage_2: print("Stage 1 complete.Stage 2...") if model_name != "EfficientNet": lr = lr2[size.index(sz)] else: lr = lr_eff_2[size.index(sz)] learn.unfreeze() learn.fit_one_cycle(stage_2_epochs, lr, wd=w_decay) print(f"Final learning is over for size {sz} ") val_preds, val_labels = learn.get_preds() print_metrics(val_preds, val_labels) globals() [model_name + '___val_preds'].append(val_preds) globals() [model_name + '___val_labels'].append(val_labels) if use_tta == False: print(f' Saving test results for fold {fld}...') test_preds, _ = learn.get_preds(DatasetType.Test) globals() [model_name + '___test_preds'].append(test_preds[:, 1]) else: print(f' Saving test TTA results for fold {fld}...') test_preds, _ = learn.TTA(ds_type=DatasetType.Test) globals() [model_name + '___test_preds'].append(test_preds[:, 1]) print("Done!") gc.collect() print("All folds are trained successfully ") print_metrics(torch.cat(globals() [model_name + '___val_preds']), torch.cat(globals() [model_name + '___val_labels'])) print(" Writing submission file...") test_df_output = pd.concat([test_df, pd.DataFrame(np.mean(np.stack(globals() [model_name + '___test_preds']), axis=0), columns=['target'])], axis=1) sample_df.iloc[:,1:] = test_df_output.iloc[:,5] sample_df.to_csv(f'submission_v{submission_ver}.csv', index=False) print(f'File is ready to submit ') if submit_after_train: print("Submitting to Kaggle ") !kaggle competitions submit -c siim-isic-melanoma-classification -f 'submission_v{submission_ver}.csv' -m 'Md: {model_name}, Fd: {n_folds}, Bs: {bs}, Sz: {size[0]}, Os: {oversampling_flag}, TTa: {use_tta}' print(' ') <set_options>
X_train_torch = torch.from_numpy(X_train.values ).type(torch.FloatTensor) y_train_torch = torch.from_numpy(y_train.values ).type(torch.LongTensor) X_val_torch = torch.from_numpy(X_val.values ).type(torch.FloatTensor) y_val_torch = torch.from_numpy(y_val.values ).type(torch.LongTensor) x_test_torch = torch.from_numpy(x_test.values ).type(torch.FloatTensor) train = torch.utils.data.TensorDataset(X_train_torch,y_train_torch) val = torch.utils.data.TensorDataset(X_val_torch, y_val_torch) test = torch.utils.data.TensorDataset(x_test_torch) data_loader = torch.utils.data.DataLoader(train) val_loader = torch.utils.data.DataLoader(val) test_loader = torch.utils.data.DataLoader(test )
Titanic - Machine Learning from Disaster
9,033,381
sns.set() sns.set_style('dark') <define_variables>
%%markdown Input Features --> Fully Connected layer(512 nodes)--> Dropout(50%)--> Fully Connected layer(256 nodes)--> Dropout(50%)--> Fully Connected layer(128 nodes)--> Dropout(50%)--> Fully Connected layer(1 node )
Titanic - Machine Learning from Disaster
9,033,381
IS_LOCAL = False USE_REDUCED = False data_index = 2*int(IS_LOCAL)+ int(USE_REDUCED) train_path =('.. /input/santander-customer-transaction-prediction/train.csv', '.. /input/santandersmall/train_small.csv', 'train.csv', 'train_small.csv')[data_index] test_path =('.. /input/santander-customer-transaction-prediction/test.csv', '.. /input/santandersmall/test_small_with_targets.csv', 'test.csv', 'test_small.csv')[data_index] train_df = pd.read_csv(train_path) test_df = pd.read_csv(test_path )<concatenate>
class Titanic_NN(nn.Module): def __init__(self, INPUT_NODES): super(Titanic_NN, self ).__init__() self.fc1 = nn.Linear(INPUT_NODES,512) self.fc2 = nn.Linear(512,256) self.dropout = nn.Dropout(0.5) self.fc3 = nn.Linear(256, 128) self.fc4 = nn.Linear(128,1) def forward(self, x): x = self.fc1(x) x = F.relu(x) x = self.dropout(x) x = F.relu(self.fc2(x)) x = self.dropout(x) x = self.fc3(x) x = F.relu(x) x = self.dropout(x) x = self.fc4(x) return F.sigmoid(x )
Titanic - Machine Learning from Disaster
9,033,381
features = [col for col in train_df.columns if col not in ['target', 'ID_code']] if not 'target' in test_df: test_df['target'] = -1 all_df = pd.concat([train_df, test_df], sort=False )<count_unique_values>
model = Titanic_NN(INPUT_NODES) try: model.load_state_dict(torch.load(input_path+'titanic_model_4layers')) except: pass error = nn.BCELoss() optimizer = torch.optim.SGD(model.parameters() , lr=LEARNING_RATE) print(model )
Titanic - Machine Learning from Disaster
9,033,381
unique_count = np.zeros(( test_df.shape[0], len(features))) for f, feature in tqdm(enumerate(features), total=len(features)) : _, i, c = np.unique(test_df[feature], return_counts=True, return_index=True) unique_count[i[c == 1], f] += 1 real_sample_indices = np.argwhere(np.sum(unique_count, axis=1)> 0)[:, 0] synthetic_sample_indices = np.argwhere(np.sum(unique_count, axis=1)== 0)[:, 0] print('Real:', len(real_sample_indices)) print('Synthetic:', len(synthetic_sample_indices)) del unique_count<concatenate>
def fit(model, data, phase='training', batch_size = 1, is_cuda=False, input_dim = 8): if phase == 'training': model.train() elif phase == 'validation': model.eval() loss_values = 0.0 correct_values = 0 for _,(features, label)in enumerate(data): if is_cuda: features, label = features.cuda() , label.cuda() features, label = Variable(features.view(batch_size, 1, input_dim)) , Variable(label.float().view(-1, 1)) if phase == 'training': optimizer.zero_grad() output = model(features) loss = error(output, label) loss_values += loss.data if output[0] > 0.5: predictions = torch.Tensor([1]) else: predictions = torch.Tensor([0]) correct_values += predictions.eq(label.data.view_as(predictions)).cpu().sum() if phase == 'training': loss.backward() optimizer.step() loss = loss_values / len(data.dataset) accuracy = 100.* correct_values / len(data.dataset) {phase} loss is {loss:{5}.{2}} and {phase} accuracy is \ =============================================') return loss, accuracy
Titanic - Machine Learning from Disaster
9,033,381
all_real_df = pd.concat([train_df, test_df.iloc[real_sample_indices, :]], sort=False) for feature in tqdm(features): real_series = all_real_df[feature] counts = real_series.groupby(real_series ).count() full_series = all_df[feature] all_df[f'{feature}_count'] = full_series.map(counts) del all_real_df del real_series del full_series<normalization>
train_loss_list, val_loss_list = [], [] train_accuracy_list, val_accuracy_list = [], [] for epoch in range(EPOCHS): train_epoch_loss, train_epoch_accuracy = fit(model, data_loader, batch_size=BATCH_SIZE, input_dim=INPUT_NODES) val_epoch_loss, val_epoch_accuracy = fit(model, val_loader, phase='validation', batch_size=BATCH_SIZE, input_dim=INPUT_NODES) if epoch % 50 == 0: torch.save(model.state_dict() , 'titanic_model_4layers') train_loss_list.append(train_epoch_loss) train_accuracy_list.append(train_epoch_accuracy) val_loss_list.append(val_epoch_loss) val_accuracy_list.append(val_epoch_accuracy )
Titanic - Machine Learning from Disaster
9,033,381
for feature in tqdm(features): all_df[feature] = StandardScaler().fit_transform(all_df[feature].values.reshape(-1, 1)) all_df[f'{feature}_count'] = MinMaxScaler().fit_transform(all_df[f'{feature}_count'].values.reshape(-1, 1))<count_values>
accuracies_list.append(accuracies('Neural Network __Validation_Set__', val_accuracy_list[-1]))
Titanic - Machine Learning from Disaster
9,033,381
for f in range(len(features)) : features.append(f'{features[f]}_count' )<split>
def predict(model, data): model.eval() test_predictions = list() for _,(feature,)in enumerate(data): feature = Variable(feature.view(1, 1, INPUT_NODES)) output = model(feature) if output[0] > 0.5: prediction = 1 else: prediction = 0 test_predictions.append(prediction) return test_predictions
Titanic - Machine Learning from Disaster
9,033,381
train_df = all_df.iloc[:train_df.shape[0], :] test_df = all_df.iloc[train_df.shape[0]:, :] del all_df<choose_model_class>
pred_df = pd.DataFrame(np.c_[np.arange(892, len(test_set)+892)[:,None], predict(model, test_loader)], columns=['PassengerId', 'Survived']) pred_df.to_csv('titanic_submission.csv', index=False )
Titanic - Machine Learning from Disaster
10,543,398
N_SPLITS = 5 BATCH_SIZE = 256 EPOCHS = 100 EARLY_STOPPING_PATIENCE = 15 OPTIMIZER = tf.keras.optimizers.Nadam() LOSS='binary_crossentropy' METRICS=[tf.keras.metrics.AUC() ]<choose_model_class>
data_test = pd.read_csv('.. /input/titanic/test.csv',index_col='PassengerId') data_train = pd.read_csv('.. /input/titanic/train.csv',index_col='PassengerId') data_train
Titanic - Machine Learning from Disaster
10,543,398
def get_cnn_model_1() : model = tf.keras.models.Sequential([ tf.keras.layers.Reshape(( len(features)* 1, 1), input_shape=(len(features)* 1,)) , tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.BatchNormalization() , tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.BatchNormalization() , tf.keras.layers.Dense(1024, activation='relu'), tf.keras.layers.BatchNormalization() , tf.keras.layers.Flatten() , tf.keras.layers.Dense(1, activation='sigmoid'), ]) return model def get_cnn_model() : model = tf.keras.models.Sequential([ tf.keras.layers.Reshape(( len(features)* 1, 1), input_shape=(len(features)* 1,)) , tf.keras.layers.Conv1D(32, 1, activation='elu'), tf.keras.layers.BatchNormalization() , tf.keras.layers.Conv1D(64, 1, activation='elu'), tf.keras.layers.BatchNormalization() , tf.keras.layers.Flatten() , tf.keras.layers.Dense(1024, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.01)) , tf.keras.layers.BatchNormalization() , tf.keras.layers.Dense(512, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.005)) , tf.keras.layers.BatchNormalization() , tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.BatchNormalization() , tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.BatchNormalization() , tf.keras.layers.Dense(1, activation='sigmoid'), ]) return model def get_nn_model_2() : model = tf.keras.models.Sequential([ tf.keras.layers.Reshape(( len(features)* 1, 1), input_shape=(len(features)* 1,)) , tf.keras.layers.Flatten() , tf.keras.layers.Dense(64, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.02)) , tf.keras.layers.BatchNormalization() , tf.keras.layers.Dense(16, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.01)) , tf.keras.layers.BatchNormalization() , tf.keras.layers.Dense(1, activation='sigmoid'), ]) return model<prepare_x_and_y>
data_train.isnull().sum()
Titanic - Machine Learning from Disaster
10,543,398
kfold = StratifiedKFold(n_splits=N_SPLITS, shuffle=True, random_state=42) models = [] histories = [] for fold_num,(train_index, val_index)in tqdm(enumerate(kfold.split(train_df[features].values, train_df['target'].values)) , total=N_SPLITS): print(f'Fold {fold_num+1}/{N_SPLITS}:') X_train = train_df.loc[train_index, features].values y_train = train_df.loc[train_index, 'target'].values.reshape(-1, 1) X_val = train_df.loc[val_index, features].values y_val = train_df.loc[val_index, 'target'].values.reshape(-1, 1) model = get_cnn_model_1() model.compile(optimizer=OPTIMIZER, loss=LOSS, metrics=METRICS) early_stopping_callback = tf.keras.callbacks.EarlyStopping(monitor='val_auc', mode='max', patience=EARLY_STOPPING_PATIENCE, restore_best_weights=True) history = model.fit(X_train, y_train, validation_data=(X_val, y_val), epochs=EPOCHS, batch_size=BATCH_SIZE, callbacks=[early_stopping_callback]) histories.append(history) val_preds = model.predict(X_val) val_auc = roc_auc_score(y_val, val_preds) print(f'Fold validation AUC: {val_auc}') print() models.append(model )<predict_on_test>
for i in data_train.columns: print(i ,': ',len(data_train[i].unique()))
Titanic - Machine Learning from Disaster
10,543,398
train_preds = np.zeros(train_df.shape) test_preds = np.zeros(test_df.shape) for model in models: pred_train = model.predict(train_df[features].values) pred_test = model.predict(test_df[features].values) train_preds += pred_train test_preds += pred_test train_preds /= len(models) test_preds /= len(models )<split>
columnsForDrop = ['Name', 'Cabin','Ticket','SibSp','Parch'] data_train.drop(columns=columnsForDrop, inplace=True) data_test.drop(columns=columnsForDrop, inplace=True) data_train
Titanic - Machine Learning from Disaster
10,543,398
train_preds = train_preds[:, 0] test_preds = test_preds[:, 0]<compute_test_metric>
print(data_train.Sex.value_counts()) print('----------------------------------------------') print(data_train.Embarked.value_counts() )
Titanic - Machine Learning from Disaster
10,543,398
train_auc = roc_auc_score(train_df['target'], train_preds) print(f'Train AUC: {train_auc}' )<load_from_csv>
y = data_train.Survived X = data_train.drop(columns=['Survived'] )
Titanic - Machine Learning from Disaster
10,543,398
test_df = pd.read_csv('test_small_with_targets.csv' )<compute_test_metric>
X_train, X_test, y_train, y_test = train_test_split(X, y)
Titanic - Machine Learning from Disaster
10,543,398
if test_df['target'][0] != -1: test_auc = roc_auc_score(test_df['target'], test_preds) print(f'Test AUC: {test_auc}' )<save_to_csv>
my_imputer = SimpleImputer() imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train)) imputed_X_test = pd.DataFrame(my_imputer.transform(X_test)) imputed_X_train.columns = X_train.columns imputed_X_test.columns = X_test.columns
Titanic - Machine Learning from Disaster
10,543,398
sub = pd.DataFrame({'ID_code': test_df['ID_code'], 'target': test_preds}) sub.to_csv('submission.csv', index=False )<import_modules>
from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import accuracy_score, classification_report, f1_score from sklearn.neighbors import KNeighborsClassifier
Titanic - Machine Learning from Disaster
10,543,398
FileLink('submission.csv' )<load_from_csv>
parameters = {'max_depth': list(range(6, 30, 10)) , 'max_leaf_nodes': list(range(50, 500, 100)) , 'n_estimators': list(range(50, 1001, 150)) } gsearch = GridSearchCV(estimator=RandomForestClassifier() , param_grid = parameters, scoring='f1', n_jobs=4,cv=5,verbose=7) gsearch.fit(imputed_X_train, y_train )
Titanic - Machine Learning from Disaster
10,543,398
train = pd.read_csv(r".. /input/train.csv") test = pd.read_csv(r".. /input/test.csv" )<prepare_x_and_y>
print(gsearch.best_params_.get('max_leaf_nodes')) print(gsearch.best_params_.get('max_depth'))
Titanic - Machine Learning from Disaster
10,543,398
cols = train.columns.values.tolist() [2: ] predictors = train[cols] target = train[['target']] pre_test = test[cols]<split>
data_test.Age.fillna(X.Age.mean() , inplace=True) data_test.Fare.fillna(X.Fare.mean() , inplace=True) data_test.isna().sum()
Titanic - Machine Learning from Disaster
10,543,398
%%time sfl = StratifiedKFold(n_splits = 3, shuffle=True) pred_test_y = np.zeros(( test.shape[0])) seed = 2019 N = 0 for train_indices, test_indices in sfl.split(predictors, target): params = { 'num_leaves': 15, 'max_bin': 119, 'min_data_in_leaf': 11, 'learning_rate': 0.02, 'min_sum_hessian_in_leaf': 0.00245, 'bagging_fraction': 1.0, 'bagging_freq': 5, 'feature_fraction': 0.05, 'lambda_l1': 4.972, 'lambda_l2': 2.276, 'min_gain_to_split': 0.65, 'max_depth': 14, 'save_binary': True, 'seed': seed, 'feature_fraction_seed': 1337, 'bagging_seed': 1337, 'drop_seed': 1337, 'data_random_seed': 1337, 'objective': 'binary', 'boosting_type': 'gbdt', 'verbose': 1, 'metric': 'auc', 'is_unbalance': True, 'boost_from_average': False, } X_train, X_test = predictors.iloc[train_indices], predictors.iloc[test_indices] y_train, y_test = target.iloc[train_indices], target.iloc[test_indices] lgtrain = lgb.Dataset(X_train, label=y_train) lgval = lgb.Dataset(X_test, label=y_test) evals_result = {} model2 = lgb.train(params, lgtrain, 10000, valid_sets=[lgval], early_stopping_rounds=100, verbose_eval=20, evals_result=evals_result) pred_val = model2.predict(X_test, num_iteration=model2.best_iteration) pred_test_y += model2.predict(pre_test, num_iteration=model2.best_iteration) print("Validation score is :", roc_auc_score(y_test, pred_val)) print(N, "Iteration completed") seed+= 2000 N+=1 pred_test = pred_test_y/N<define_variables>
preds = final_model.predict(data_test) print(preds.shape) print(data_test.shape )
Titanic - Machine Learning from Disaster
10,543,398
<split><EOS>
test_out = pd.DataFrame({ 'PassengerId': data_test.index, 'Survived': preds }) test_out.to_csv('submission.csv', index=False) print('Done' )
Titanic - Machine Learning from Disaster
10,537,096
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<define_variables>
import pandas as pd import matplotlib.pyplot as plt import numpy as np
Titanic - Machine Learning from Disaster
10,537,096
predictions = pred_test*0.5 + pred_test2*0.5<save_to_csv>
data = pd.read_csv('.. /input/titanic/train.csv',index_col = "PassengerId") test = pd.read_csv('.. /input/titanic/test.csv',index_col = "PassengerId")
Titanic - Machine Learning from Disaster
10,537,096
predictions = pd.DataFrame(predictions, columns =['target']) sub = pd.concat([test[['ID_code']], predictions[['target']]], axis = 1) sub.to_csv('submission.csv', index=False )<set_options>
indexs= test.index
Titanic - Machine Learning from Disaster
10,537,096
%reload_ext autoreload %autoreload 2 %matplotlib inline<import_modules>
X = data.iloc[:,1:] y = data.iloc[:,0]
Titanic - Machine Learning from Disaster
10,537,096
from fastai import * from fastai.vision import *<define_variables>
X['Ticket'].mode
Titanic - Machine Learning from Disaster
10,537,096
path = Path('.. /input/aerial-cactus-identification/') <load_from_csv>
X =X.drop(columns =['Name'] )
Titanic - Machine Learning from Disaster
10,537,096
train = pd.read_csv('.. /input/aerial-cactus-identification/train.csv') test = pd.read_csv('.. /input/aerial-cactus-identification/sample_submission.csv' )<define_variables>
imputer_no = SimpleImputer(missing_values= np.nan ,strategy = 'mean') imputer_no.fit(X[['Pclass','Age','SibSp','Fare','Parch']]) X[['Pclass','Age','SibSp','Fare','Parch']] = imputer_no.transform(X[['Pclass','Age','SibSp','Fare','Parch']])
Titanic - Machine Learning from Disaster
10,537,096
np.random.seed(50) <feature_engineering>
imputer_cat = SimpleImputer(missing_values= np.nan ,strategy = 'most_frequent') imputer_cat.fit(X[['Sex','Cabin','Embarked','Ticket']]) X[['Sex','Cabin','Embarked','Ticket']]=imputer_cat.transform(X[['Sex','Cabin','Embarked','Ticket']] )
Titanic - Machine Learning from Disaster
10,537,096
tfms = get_transforms(do_flip = True, )<define_variables>
from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder
Titanic - Machine Learning from Disaster
10,537,096
data.show_batch(rows = 3,figsize=(7,8))<choose_model_class>
ct = ColumnTransformer(transformers= [('encoder',OneHotEncoder(handle_unknown='ignore'),[1,5,7,8])],remainder = 'passthrough') X= ct.fit_transform(X )
Titanic - Machine Learning from Disaster
10,537,096
learn = cnn_learner(data , models.resnet50 , metrics = error_rate )<train_model>
train_X,test_X,train_y,test_y = train_test_split(X,y )
Titanic - Machine Learning from Disaster
10,537,096
learn.fit_one_cycle(4) <create_dataframe>
for i in range(10,300,10): classifier = RandomForestClassifier(n_estimators= i,criterion='gini') classifier.fit(train_X, train_y) y_predict = classifier.predict(test_X) print('for {} estimators and {}'.format({i},{accuracy_score(y_true=test_y,y_pred=y_predict)}))
Titanic - Machine Learning from Disaster
10,537,096
test_data = ImageList.from_df(test, path=path/'test', folder='test') data.add_test(test_data )<predict_on_test>
test =test.drop(columns =['Name'] )
Titanic - Machine Learning from Disaster
10,537,096
preds, _ = learn.get_preds(ds_type=DatasetType.Test) test.has_cactus = preds.numpy() [:, 0]<save_to_csv>
imputer_no.fit(test[['Pclass','Age','SibSp','Fare','Parch']]) test[['Pclass','Age','SibSp','Fare','Parch']] = imputer_no.transform(test[['Pclass','Age','SibSp','Fare','Parch']]) imputer_cat.fit(test[['Sex','Cabin','Embarked','Ticket']]) test[['Sex','Cabin','Embarked','Ticket']]=imputer_cat.transform(test[['Sex','Cabin','Embarked','Ticket']]) test= ct.transform(test)
Titanic - Machine Learning from Disaster
10,537,096
test.to_csv("submit.csv", index=False )<load_from_csv>
classifier = RandomForestClassifier(n_estimators= 150,criterion='gini') classifier.fit(X, y) y_predict = classifier.predict(test) pd.DataFrame(y_predict,index=indexs,columns=['Survived'] ).to_csv('output.csv' )
Titanic - Machine Learning from Disaster
10,537,096
train_dir=".. /input/train/train" test_dir=".. /input/test/test" train = pd.read_csv('.. /input/train.csv') test = pd.read_csv(".. /input/sample_submission.csv") data_folder = Path(".. /input") <choose_model_class>
Titanic - Machine Learning from Disaster
10,537,096
learn = cnn_learner(train_img, models.densenet161, metrics=[error_rate, accuracy]) <find_best_params>
Titanic - Machine Learning from Disaster
10,513,115
learn.lr_find() <train_model>
t_data= pd.read_csv('/kaggle/input/titanic/train.csv',index_col='PassengerId') t_data.head()
Titanic - Machine Learning from Disaster
10,513,115
lr = 1e-02 learn.fit_one_cycle(10, slice(lr)) <predict_on_test>
t_data.drop(columns=['Name','Ticket','Fare','Cabin'],inplace=True )
Titanic - Machine Learning from Disaster
10,513,115
preds,_ = learn.get_preds(ds_type=DatasetType.Test )<filter>
for col in range(len(t_data.columns)) : print(t_data[t_data.columns[col]].value_counts() )
Titanic - Machine Learning from Disaster
10,513,115
test.has_cactus = preds.numpy() [:, 0]<save_to_csv>
t_data.isna().sum()
Titanic - Machine Learning from Disaster
10,513,115
test.to_csv('submission.csv', index=False )<import_modules>
t_data.Age.value_counts().mode()
Titanic - Machine Learning from Disaster
10,513,115
import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.image as mpimg import glob import scipy import cv2 import keras<import_modules>
from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score
Titanic - Machine Learning from Disaster
10,513,115
import random<load_from_csv>
target_col="Survived" y = t_data[target_col] X = t_data[['Pclass','Sex','Age','SibSp','Parch','Embarked']] X = pd.get_dummies(X) train_X, val_X, train_y, val_y = train_test_split(X, y) val_X
Titanic - Machine Learning from Disaster
10,513,115
train_data = pd.read_csv('.. /input/train.csv' )<count_values>
cols_with_missing = [col for col in train_X.columns if train_X[col].isnull().any() ] red_X_train=train_X.drop(columns=cols_with_missing) red_X_val=val_X.drop(columns=cols_with_missing )
Titanic - Machine Learning from Disaster
10,513,115
train_data.has_cactus.value_counts()<define_search_model>
def get_accuracy(n_estimators,max_depth,train_X, val_X, train_y, val_y): model = RandomForestClassifier(n_estimators=n_estimators, max_depth=max_depth ,random_state=1) model.fit(train_X,train_y) preds = model.predict(val_X) lr_accuracy = accuracy_score(val_y,preds) return lr_accuracy
Titanic - Machine Learning from Disaster
10,513,115
def image_generator(batch_size = 16, all_data=True, shuffle=True, train=True, indexes=None): while True: if indexes is None: if train: if all_data: indexes = np.arange(train_data.shape[0]) else: indexes = np.arange(train_data[:15000].shape[0]) if shuffle: np.random.shuffle(indexes) else: indexes = np.arange(train_data[15000:].shape[0]) N = int(len(indexes)/ batch_size) for i in range(N): current_indexes = indexes[i*batch_size:(i+1)*batch_size] batch_input = [] batch_output = [] for index in current_indexes: img = mpimg.imread('.. /input/train/train/' + train_data.id[index]) batch_input += [img] batch_input += [img[::-1, :, :]] batch_input += [img[:, ::-1, :]] batch_input += [np.rot90(img)] temp_img = np.zeros_like(img) temp_img[:28, :, :] = img[4:, :, :] batch_input += [temp_img] temp_img = np.zeros_like(img) temp_img[:, :28, :] = img[:, 4:, :] batch_input += [temp_img] temp_img = np.zeros_like(img) temp_img[4:, :, :] = img[:28, :, :] batch_input += [temp_img] temp_img = np.zeros_like(img) temp_img[:, 4:, :] = img[:, :28, :] batch_input += [temp_img] batch_input += [cv2.resize(img[2:30, 2:30, :],(32, 32)) ] batch_input += [scipy.ndimage.interpolation.rotate(img, 10, reshape=False)] batch_input += [scipy.ndimage.interpolation.rotate(img, 5, reshape=False)] for _ in range(11): batch_output += [train_data.has_cactus[index]] batch_input = np.array(batch_input) batch_output = np.array(batch_output) yield(batch_input, batch_output.reshape(-1, 1))<choose_model_class>
accuracy=get_accuracy(200,10,red_X_train,red_X_val,train_y,val_y) print("Validation accurcy for Random Forest Model: {}".format(accuracy))
Titanic - Machine Learning from Disaster
10,513,115
model = keras.models.Sequential() model.add(keras.layers.Conv2D(64,(5, 5), input_shape=(32, 32, 3))) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.LeakyReLU(alpha=0.3)) model.add(keras.layers.Conv2D(64,(5, 5))) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.LeakyReLU(alpha=0.3)) model.add(keras.layers.Conv2D(128,(5, 5))) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.LeakyReLU(alpha=0.3)) model.add(keras.layers.Conv2D(128,(5, 5))) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.LeakyReLU(alpha=0.3)) model.add(keras.layers.Conv2D(256,(3, 3))) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.LeakyReLU(alpha=0.3)) model.add(keras.layers.Conv2D(256,(3, 3))) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.LeakyReLU(alpha=0.3)) model.add(keras.layers.Conv2D(512,(3, 3))) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.LeakyReLU(alpha=0.3)) model.add(keras.layers.Flatten()) model.add(keras.layers.Dense(100)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.LeakyReLU(alpha=0.3)) model.add(keras.layers.Dense(1, activation='sigmoid'))<choose_model_class>
my_imputer = SimpleImputer() imputed_X_train = pd.DataFrame(my_imputer.fit_transform(train_X)) imputed_X_valid = pd.DataFrame(my_imputer.transform(val_X)) imputed_X_train.columns =train_X.columns imputed_X_valid.columns = val_X.columns
Titanic - Machine Learning from Disaster
10,513,115
opt = keras.optimizers.Adam(0.0001) model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'] )<train_model>
accuracy=get_accuracy(1000,10,imputed_X_train,imputed_X_valid,train_y,val_y) print("Validation accurcy for Random Forest Model: {}".format(accuracy))
Titanic - Machine Learning from Disaster
10,513,115
model.fit_generator(image_generator() , steps_per_epoch= train_data.shape[0] / 16, epochs=30 )<find_best_params>
max_accur=.5 max_dep=0 best_tree_size=0 for maxDepth in range(1,11): for i in range(10,101,10): accuracy=get_accuracy(i,maxDepth,imputed_X_train,imputed_X_valid,train_y,val_y) if accuracy>max_accur: max_accur=accuracy max_dep=maxDepth best_tree_size=i print("max accuracy = {} max depth={} best tree size={}".format(max_accur,max_dep,best_tree_size))
Titanic - Machine Learning from Disaster
10,513,115
keras.backend.eval(model.optimizer.lr.assign(0.00001))<train_model>
max_accur=.5 max_dep=0 best_tree_size=0 for maxDepth in range(1,11): for i in range(10,101,10): accuracy=get_accuracy(i,maxDepth,red_X_train,red_X_val,train_y,val_y) if accuracy>max_accur: max_accur=accuracy max_dep=maxDepth best_tree_size=i print("max accuracy = {} max depth={} best tree size={}".format(max_accur,max_dep,best_tree_size))
Titanic - Machine Learning from Disaster
10,513,115
model.fit_generator(image_generator() , steps_per_epoch= train_data.shape[0] / 16, epochs=15 )<load_pretrained>
pd.get_dummies(df, prefix=['col1', 'col2'] )
Titanic - Machine Learning from Disaster
10,513,115
indexes = np.arange(train_data.shape[0]) N = int(len(indexes)/ 64) batch_size = 64 wrong_ind = [] for i in range(N): current_indexes = indexes[i*64:(i+1)*64] batch_input = [] batch_output = [] for index in current_indexes: img = mpimg.imread('.. /input/train/train/' + train_data.id[index]) batch_input += [img] batch_output.append(train_data.has_cactus[index]) batch_input = np.array(batch_input) model_pred = model.predict_classes(batch_input) for j in range(len(batch_output)) : if model_pred[j] != batch_output[j]: wrong_ind.append(i*batch_size+j )<load_pretrained>
accuracy=get_accuracy(60,4,red_X_train,red_X_val,train_y,val_y) accuracy
Titanic - Machine Learning from Disaster
10,513,115
indexes = np.arange(train_data.shape[0]) N = int(len(indexes)/ 64) batch_size = 64 wrong_ind = [] for i in range(N): current_indexes = indexes[i*64:(i+1)*64] batch_input = [] batch_output = [] for index in current_indexes: img = mpimg.imread('.. /input/train/train/' + train_data.id[index]) batch_input += [img[::-1, :, :]] batch_output.append(train_data.has_cactus[index]) batch_input = np.array(batch_input) model_pred = model.predict_classes(batch_input) for j in range(len(batch_output)) : if model_pred[j] != batch_output[j]: wrong_ind.append(i*batch_size+j )<load_pretrained>
test_data= pd.read_csv('/kaggle/input/titanic/test.csv') test_data.info()
Titanic - Machine Learning from Disaster
10,513,115
indexes = np.arange(train_data.shape[0]) N = int(len(indexes)/ 64) batch_size = 64 wrong_ind = [] for i in range(N): current_indexes = indexes[i*64:(i+1)*64] batch_input = [] batch_output = [] for index in current_indexes: img = mpimg.imread('.. /input/train/train/' + train_data.id[index]) batch_input += [img[:, ::-1, :]] batch_output.append(train_data.has_cactus[index]) batch_input = np.array(batch_input) model_pred = model.predict_classes(batch_input) for j in range(len(batch_output)) : if model_pred[j] != batch_output[j]: wrong_ind.append(i*batch_size+j )<define_variables>
model = RandomForestClassifier(n_estimators=60, max_depth=10 ,random_state=1) model.fit(imputed_X_train,train_y) preds = model.predict(imputed_X_valid) model_accuracy = accuracy_score(val_y,preds) print("Accarany = {}:".format(model_accuracy))
Titanic - Machine Learning from Disaster
10,513,115
test_files = os.listdir('.. /input/test/test/' )<predict_on_test>
test=test_data[['Pclass','Sex','Age','SibSp','Parch','Embarked']] final_X_test = pd.get_dummies(test) X_test.info()
Titanic - Machine Learning from Disaster
10,513,115
batch = 40 all_out = [] for i in range(int(4000/batch)) : images = [] for j in range(batch): img = mpimg.imread('.. /input/test/test/'+test_files[i*batch + j]) images += [img] out = model.predict(np.array(images)) all_out += [out]<create_dataframe>
final_X_test = pd.DataFrame(my_imputer.transform(final_X_test))
Titanic - Machine Learning from Disaster
10,513,115
sub_file = pd.DataFrame(data = {'id': test_files, 'has_cactus': all_out.reshape(-1 ).tolist() } )<save_to_csv>
predictions = model.predict(final_X_test) output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions}) output.to_csv('my_submission.csv', index=False) print("Your submission was successfully saved!")
Titanic - Machine Learning from Disaster
9,770,354
sub_file.to_csv('sample_submission.csv', index=False )<set_options>
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
Titanic - Machine Learning from Disaster
9,770,354
pd.set_option('display.float_format', lambda x: '%.3f' % x) RSEED = 100 %matplotlib inline plt.style.use('fivethirtyeight') plt.rcParams['font.size'] = 18 palette = sns.color_palette('Paired', 10 )<load_from_csv>
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
Titanic - Machine Learning from Disaster
9,770,354
data = pd.read_csv('.. /input/train.csv', nrows = 5_000_000, parse_dates = ['pickup_datetime'] ).drop(columns = 'key') data = data.dropna() data.head()<filter>
all_data['Embarked'].fillna(all_data['Embarked'].mode() [0], inplace = True) all_data['Fare'].fillna(all_data['Fare'].median() , inplace = True )
Titanic - Machine Learning from Disaster
9,770,354
print(f"There are {len(data[data['fare_amount'] < 0])} negative fares.") print(f"There are {len(data[data['fare_amount'] == 0])} $0 fares.") print(f"There are {len(data[data['fare_amount'] > 100])} fares greater than $100." )<filter>
all_data['Title'] = all_data.Name.str.extract('([A-Za-z]+)\.', expand=False) all_data['Title'].value_counts() frequent_titles = all_data['Title'].value_counts() [:5].index.tolist() frequent_titles all_data['Title'] = all_data['Title'].apply(lambda x: x if x in frequent_titles else 'Other') all_data['Title']
Titanic - Machine Learning from Disaster
9,770,354
data = data[data['fare_amount'].between(left = 2.5, right = 100)]<compute_test_metric>
median_ages = {} for title in frequent_titles: median_ages[title] = all_data.loc[all_data['Title'] == title]['Age'].median() median_ages['Other'] = all_data['Age'].median() all_data.loc[all_data['Age'].isnull() , 'Age'] = all_data[all_data['Age'].isnull() ]['Title'].map(median_ages) all_data['Age']
Titanic - Machine Learning from Disaster
9,770,354
def ecdf(x): x = np.sort(x) n = len(x) y = np.arange(1, n + 1, 1)/ n return x, y<filter>
Cat_Features = ['Sex', 'Embarked', 'Title'] for feature in Cat_Features: label = LabelEncoder() all_data[feature] = label.fit_transform(all_data[feature]) all_data[Cat_Features]
Titanic - Machine Learning from Disaster
9,770,354
data = data.loc[data['passenger_count'] < 6]<train_model>
Cont_Features = ['Age', 'Fare'] num_bins = 5 for feature in Cont_Features: bin_feature = feature + 'Bin' all_data[bin_feature] = pd.qcut(all_data[feature], num_bins) label = LabelEncoder() all_data[bin_feature] = label.fit_transform(all_data[bin_feature]) all_data.head(10 )
Titanic - Machine Learning from Disaster
9,770,354
print(f'Initial Observations: {data.shape[0]}' )<define_variables>
all_data['Surname'] = all_data.Name.str.extract(r'([A-Za-z]+),', expand=False) all_data['TicketPrefix'] = all_data.Ticket.str.extract(r' (.*\d)', expand=False) all_data['Surname_Ticket'] = all_data['Surname'] + all_data['TicketPrefix'] all_data['IsFamily'] = all_data.Surname_Ticket.duplicated(keep=False ).astype(int) all_data['Child'] = all_data.Age.map(lambda x: 1 if x <=16 else 0) FamilyWithChild = all_data[(all_data.IsFamily==1)&(all_data.Child==1)]['Surname_Ticket'].unique() all_data['FamilyId'] = 0 for ind, identifier in enumerate(FamilyWithChild): all_data.loc[all_data.Surname_Ticket==identifier, ['FamilyId']] = ind + 1 all_data['FamilySurvival'] = 1 Survived_by_FamilyId = all_data.groupby('FamilyId' ).Survived.sum() for i in range(1, len(FamilyWithChild)+1): if Survived_by_FamilyId[i] >= 1: all_data.loc[all_data.FamilyId==i, ['FamilySurvival']] = 2 elif Survived_by_FamilyId[i] == 0: all_data.loc[all_data.FamilyId==i, ['FamilySurvival']] = 0 all_data.head(10 )
Titanic - Machine Learning from Disaster