kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
14,376,120
modelsEffnet = [] for path in MODELS_EFFNET: state_dict = torch.load(path, map_location=torch.device('cpu')) model = HuBMAPEffNet().to(device) model.load_state_dict(state_dict) model.eval() model.to(device) modelsEffnet.append(model) del state_dict modelsResnet = [] for path in MODELS_RESNET: state_dict = torch.load(path, map_location=torch.device('cpu')) model = HuBMAPResNet().to(device) model.load_state_dict(state_dict) model.eval() model.to(device) modelsResnet.append(model) del state_dict models = modelsEffnet + modelsResnet print(len(models))<predict_on_test>
dResults = pd.DataFrame(columns = ['Model', 'MSE'] )
Tabular Playground Series - Jan 2021
14,376,120
def Make_prediction(img, tta = True): pred = None with torch.no_grad() : for model in models: p_tta = None p = model(img) p = torch.sigmoid(p ).detach() if p_tta is None: p_tta = p else: p_tta += p if tta: flips = [[-1],[-2],[-2,-1]] for f in flips: imgf = torch.flip(img, f) p = model(imgf) p = torch.flip(p, f) p_tta += torch.sigmoid(p ).detach() p_tta /=(1+len(flips)) if pred is None: pred = p_tta else: pred += p_tta pred /= len(models) return pred<categorify>
classifiers = [ DummyRegressor(strategy='median'), SGDRegressor() , BayesianRidge() , LassoLars() , ARDRegression() , PassiveAggressiveRegressor() , LinearRegression() , LGBMRegressor() , RandomForestRegressor() , XGBRegressor() ] for item in classifiers: print(item) clf = item dResults=FitAndScoreModel(dResults,item,item,X_train,y_train,X_test,y_test )
Tabular Playground Series - Jan 2021
14,376,120
names, predictions = [],[] for idx, row in tqdm(df_sample.iterrows() ,total=len(df_sample)) : imageId = row['id'] data = rasterio.open(os.path.join(DATA, imageId+'.tiff'), transform = identity, num_threads='all_cpus') preds = np.zeros(data.shape, dtype=np.uint8) dataset = HuBMAPDataset(data) dataloader = DataLoader(dataset, batch_size, num_workers=0, shuffle=False, pin_memory=True) for i,(img, vertices)in enumerate(dataloader): img = img.to(device) pred = Make_prediction(img) pred = pred.squeeze().cpu().numpy() vertices = vertices.numpy() for p, vert in zip(pred, vertices): x1, x2, y1, y2 = vert p = cv2.resize(p,(window, window)) preds[x1:x2,y1:y2] +=(p > THRESHOLD ).astype(np.uint8) preds =(preds > 0.5 ).astype(np.uint8) rle = rle_encode_less_memory(preds) names.append(imageId) predictions.append(rle) del preds, dataset, dataloader gc.collect()<train_model>
dResults.sort_values(by='MSE', ascending=True,inplace=True) dResults.set_index('MSE',inplace=True) dResults.head(dResults.shape[0] )
Tabular Playground Series - Jan 2021
14,376,120
print('replacement' )<save_to_csv>
import optuna.integration.lightgbm as lgbTune
Tabular Playground Series - Jan 2021
14,376,120
df = pd.DataFrame({'id':names,'predicted':predictions}) df['predicted'].loc[df[df.id == 'd488c759a'].index] = '' df.to_csv('submission.csv', index=False )<load_pretrained>
params={'objective': 'regression', 'metric': 'rmse', 'num_leaves': 234, 'verbosity': -1, 'boosting_type': 'gbdt', 'n_jobs': -1, 'learning_rate': 0.005, 'max_depth': 8, 'tree_learner': 'serial', 'max_bin': 255, 'feature_pre_filter': False, 'bagging_fraction': 0.4134640813947842, 'bagging_freq': 1, 'feature_fraction': 0.4, 'lambda_l1': 9.511141306606756, 'lambda_l2': 1.3196758411622028e-08, 'min_child_samples': 20, 'num_iterations': 20000, 'early_stopping_round': 400}
Tabular Playground Series - Jan 2021
14,376,120
imshow_from_file('.. /input/pics-j/small_scale.png' )<load_pretrained>
n_fold = 10 folds = KFold(n_splits=n_fold, shuffle=True, random_state=42) train_columns = train.columns.values oof = np.zeros(len(train)) LGBMpredictions = np.zeros(len(test)) feature_importance_df = pd.DataFrame() for fold_,(trn_idx, val_idx)in enumerate(folds.split(train, target.values)) : strLog = "fold {}".format(fold_) print(strLog) X_tr, X_val = train.iloc[trn_idx], train.iloc[val_idx] y_tr, y_val = target.iloc[trn_idx], target.iloc[val_idx] model = LGBMRegressor(**params, n_estimators = 20000) model.fit(X_tr, y_tr, eval_set=[(X_tr, y_tr),(X_val, y_val)], eval_metric='rmse', verbose=1000, early_stopping_rounds=400) oof[val_idx] = model.predict(X_val, num_iteration=model.best_iteration_) fold_importance_df = pd.DataFrame() fold_importance_df["Feature"] = train_columns fold_importance_df["importance"] = model.feature_importances_[:len(train_columns)] fold_importance_df["fold"] = fold_ + 1 feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0) LGBMpredictions += model.predict(test, num_iteration=model.best_iteration_)/ folds.n_splits
Tabular Playground Series - Jan 2021
14,376,120
imshow_from_file('.. /input/pics-j/big_scale.png' )<define_variables>
Tabular Playground Series - Jan 2021
14,376,120
IMAGES_DIR = '.. /input/hubmap-kidney-segmentation/test' WEIGHTS_SMALL = '.. /input/weights-j/t198_best_model.ckpt' WEIGHTS_BIG = '.. /input/weights-j/t246_best_model.ckpt' OUTPUT_FILE = 'submission.csv' THRESHOLD_SMALL = 0.6 RESIZE_FACTOR_SMALL = 0.25 ROUNDING_ORIG_SMALL = 64 OVERLAP_SMALL = 448 BASE_SIZE_CROP_SMALL = 4096 NET_SCALE_SMALL = 16 N_INPUT_CHANNELS_SMALL = 3 N_CLASSES_SMALL = 1 N_FEATUREMAPS_SMALL = [N_INPUT_CHANNELS_SMALL, 4, 4, 8, 16, 24, 32, 32, 48, N_CLASSES_SMALL] KERNELS_X_SIZE_SMALL = 3 KERNELS_Y_SIZE_SMALL = 3 KERNELS_INCREASED_X_SIZE_SMALL = 7 KERNELS_INCREASED_Y_SIZE_SMALL = 7 BASE_SIZE_BIG = 900 BASE_SIZE_HALF_BIG = BASE_SIZE_BIG // 2 NET_SCALE_BIG = 8 CENTER =(BASE_SIZE_BIG - 1)/ 2 N_PHI = 384 N_PHI_TILED = 3 * N_PHI N_PHI_OUT = N_PHI // NET_SCALE_BIG N_PHI_OUT_TILLED = 3 * N_PHI_OUT N_R = 128 N_R_OUT = N_R // NET_SCALE_BIG R_MAX = BASE_SIZE_BIG / 2 r_tmp_index = np.arange(N_R ).astype(np.float32 ).reshape(N_R, 1) r_mean_max =(N_R - 1 + 0.5)/ NET_SCALE_BIG - 0.5 NET_SCALE_BIG = 8 N_INPUT_CHANNELS_BIG = 3 N_CLASSES_BIG = 1 N_FEATUREMAPS_BIG = [N_INPUT_CHANNELS_BIG, 4, 4, 8, 16, 16, 32, 32, 32, N_CLASSES_BIG] KERNELS_X_SIZE_BIG = 3 KERNELS_Y_SIZE_BIG = 3 KERNELS_INCREASED_X_SIZE_BIG = 7 KERNELS_INCREASED_Y_SIZE_BIG = 7 KERNELS_INCREASED_X_SIZE_BIG_HALF =(KERNELS_INCREASED_X_SIZE_BIG - 1)/ 2 def int_round(inp): return int(np.round(inp)) RESIZE_FACTOR_SMALL_SCALE = RESIZE_FACTOR_SMALL def bool_to_u8(inp): return 255 *(inp.astype(np.uint8)) def int_round(inp): return int(np.round(inp)) def gauss_kernel(half_width): x = np.arange(-half_width, half_width + 1, dtype=np.float32) pseudo_sigma = 0.9 * half_width y = np.exp(-(x / pseudo_sigma)**2) y = y / np.sum(y) return y def extend_contour(contour, n_extended): contour_extended = np.zeros(( contour.shape[0] + 2 * n_extended, 2), np.float32) contour_extended[n_extended: -n_extended, :] = contour contour_extended[0: n_extended, :] = contour[-n_extended:, :] contour_extended[-n_extended:, :] = contour[0: n_extended, :] return contour_extended def smooth_contour(contour, kernel): kernel_half_width =(kernel.size - 1)// 2 contour_extended = extend_contour(contour, kernel_half_width) contour_smoothed = np.zeros(( contour.shape[0], 2), np.float32) contour_smoothed[:, 0] = np.convolve(contour_extended[:, 0], kernel, mode='valid') contour_smoothed[:, 1] = np.convolve(contour_extended[:, 1], kernel, mode='valid') return contour_smoothed def natural_parametrization(contour, points_per_pixel): x = contour[:, 0] y = contour[:, 1] dx = np.diff(x) dy = np.diff(y) dd = np.sqrt(dx**2 + dy**2) dd = np.concatenate(( [0], dd)) d = np.cumsum(dd) x_extended = np.concatenate(( x, [x[0]])) y_extended = np.concatenate(( y, [y[0]])) dd_extention = np.sqrt(( x[0] - x[-1])**2 +(y[0] - y[-1])**2) d_extended = np.concatenate(( d, [d[-1] + dd_extention])) new_size = int_round(points_per_pixel * d_extended[-1]) d_uniform = np.linspace(0, d_extended[-1], new_size + 1)[0:-1] x_iterpolated = np.interp(d_uniform, d_extended, x_extended) y_iterpolated = np.interp(d_uniform, d_extended, y_extended) contour_interpolated = np.zeros(( x_iterpolated.size, 2), np.float32) contour_interpolated[:, 0] = x_iterpolated contour_interpolated[:, 1] = y_iterpolated return contour_interpolated def delete_files(dir_): files = glob.glob(dir_ + '/*') for file in files: os.remove(file) def prepare_dir(dir_): if os.path.exists(dir_): delete_files(dir_) else: os.makedirs(dir_) def prepare_crop_indexes(x1, x2, size, size_crop): if x1 < 0: x1_src = 0 x2_src = x2 x1_dst = size_crop - x2 x2_dst = size_crop elif x2 > size: x1_src = x1 x2_src = size x1_dst = 0 x2_dst = size_crop -(x2 - size) else: x1_src = x1 x2_src = x2 x1_dst = 0 x2_dst = size_crop return x1_src, x2_src, x1_dst, x2_dst def round_with_mult_higher(inp, mult): return(( np.ceil(inp / mult)) * mult ).astype(np.int64) def crop_with_overlap_1d(size, overlap, base_size, rounding): n_crops_non_integer =(size - overlap)/(base_size - overlap) n_crops = int_round(n_crops_non_integer) delimeters = np.linspace(0, size, n_crops + 1) starts_0 = np.zeros(n_crops, np.float32) starts_0[0] = 0.0 ends_0 = np.zeros(n_crops, np.float32) ends_0[-1] = size starts_0[1: ] = delimeters[1: -1] - overlap / 2 ends_0[0: -1] = delimeters[1: -1] + overlap / 2 centers =(starts_0 + ends_0)/ 2 widths_0 = ends_0 - starts_0 widths = round_with_mult_higher(widths_0, rounding) starts = np.zeros(n_crops, np.int64) starts[0] = 0 starts[1: -1] = np.round(centers[1: -1] - widths[1: -1] / 2 ).astype(np.int64) base_size_rounded = round_with_mult_higher(base_size, rounding) starts[-1] = size - base_size_rounded ends = np.zeros(n_crops, np.int64) ends[-1] = size ends[0] = base_size_rounded ends[1: -1] = starts[1: -1] + widths[1: -1] separators = np.zeros(n_crops + 1, np.float32) separators[0] = 0.0 separators[-1] = size - 1 separators[1:-1] =(ends[0: -1] - 1 + starts[1:])/ 2 return starts, ends, separators def weight_variable(name, shape): return tf.Variable(tf.initializers.GlorotUniform()(shape=shape), name=name) def weight_variable_for_skip_connection(name, shape): return tf.Variable(tf.constant_initializer(1.0 / shape[2] )(shape=shape), name=name) def bias_variable(name, shape): return tf.Variable(tf.zeros_initializer()(shape=shape), name=name) def conv2d(x, W): return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME') def max_pool_2x2(x): return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') def avarage_pool_2x2(x): return tf.nn.avg_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') def avarage_pool_4x4(x): return tf.nn.avg_pool(x, ksize=[1,4,4,1], strides=[1,4,4,1], padding='SAME') class NetModuleSmallScale(tf.Module): def __init__(self): self.W_conv1 = weight_variable("W_conv1", [KERNELS_Y_SIZE_SMALL, KERNELS_X_SIZE_SMALL, N_FEATUREMAPS_SMALL[0], N_FEATUREMAPS_SMALL[1]]) self.b_conv1 = bias_variable("b_conv1", [N_FEATUREMAPS_SMALL[1]]) block_name = "block1" n_featuremaps = N_FEATUREMAPS_SMALL[1: 3 + 1] self.block1_W_conv1 = weight_variable(block_name + "_" + "W_conv1", [KERNELS_Y_SIZE_SMALL, KERNELS_X_SIZE_SMALL, n_featuremaps[0], n_featuremaps[1]]) self.block1_b_conv1 = bias_variable(block_name + "_" + "b_conv1", [n_featuremaps[1]]) self.block1_W_conv2 = weight_variable(block_name + "_" + "W_conv2", [KERNELS_Y_SIZE_SMALL, KERNELS_X_SIZE_SMALL, n_featuremaps[1], n_featuremaps[2]]) self.block1_b_conv2 = bias_variable(block_name + "_" + "b_conv2", [n_featuremaps[2]]) self.block1_W_conv3 = weight_variable_for_skip_connection(block_name + "_" + "W_conv3", [1, 1, n_featuremaps[0], n_featuremaps[2]]) block_name = "block2" n_featuremaps = N_FEATUREMAPS_SMALL[3: 5 + 1] self.block2_W_conv1 = weight_variable(block_name + "_" + "W_conv1", [KERNELS_Y_SIZE_SMALL, KERNELS_X_SIZE_SMALL, n_featuremaps[0], n_featuremaps[1]]) self.block2_b_conv1 = bias_variable(block_name + "_" + "b_conv1", [n_featuremaps[1]]) self.block2_W_conv2 = weight_variable(block_name + "_" + "W_conv2", [KERNELS_Y_SIZE_SMALL, KERNELS_X_SIZE_SMALL, n_featuremaps[1], n_featuremaps[2]]) self.block2_b_conv2 = bias_variable(block_name + "_" + "b_conv2", [n_featuremaps[2]]) self.block2_W_conv3 = weight_variable_for_skip_connection(block_name + "_" + "W_conv3", [1, 1, n_featuremaps[0], n_featuremaps[2]]) block_name = "block3" n_featuremaps = N_FEATUREMAPS_SMALL[5: 10 + 1] self.block3_W_conv1 = weight_variable(block_name + "_" + "W_conv1", [KERNELS_INCREASED_Y_SIZE_SMALL, KERNELS_INCREASED_X_SIZE_SMALL, n_featuremaps[0], n_featuremaps[1]]) self.block3_b_conv1 = bias_variable(block_name + "_" + "b_conv1", [n_featuremaps[1]]) self.block3_W_conv2 = weight_variable(block_name + "_" + "W_conv2", [KERNELS_INCREASED_Y_SIZE_SMALL, KERNELS_INCREASED_X_SIZE_SMALL, n_featuremaps[1], n_featuremaps[2]]) self.block3_b_conv2 = bias_variable(block_name + "_" + "b_conv2", [n_featuremaps[2]]) self.block3_W_conv4 = weight_variable(block_name + "_" + "W_conv4", [KERNELS_INCREASED_Y_SIZE_SMALL, KERNELS_INCREASED_X_SIZE_SMALL, n_featuremaps[2], n_featuremaps[3]]) self.block3_b_conv4 = bias_variable(block_name + "_" + "b_conv4", [n_featuremaps[3]]) self.block3_W_conv5 = weight_variable(block_name + "_" + "W_conv5", [KERNELS_INCREASED_Y_SIZE_SMALL, KERNELS_INCREASED_X_SIZE_SMALL, n_featuremaps[3], n_featuremaps[4]]) self.block3_b_conv5 = bias_variable(block_name + "_" + "b_conv5", [n_featuremaps[4]]) self.block3_W_conv3 = weight_variable_for_skip_connection(block_name + "_" + "W_conv3", [1, 1, n_featuremaps[0], n_featuremaps[4]]) self.var_list = [self.W_conv1, self.b_conv1, self.block1_W_conv1, self.block1_b_conv1, self.block1_W_conv2, self.block1_b_conv2, self.block1_W_conv3, self.block2_W_conv1, self.block2_b_conv1, self.block2_W_conv2, self.block2_b_conv2, self.block2_W_conv3, self.block3_W_conv1, self.block3_b_conv1, self.block3_W_conv2, self.block3_b_conv2, self.block3_W_conv3, self.block3_W_conv4, self.block3_b_conv4, self.block3_W_conv5, self.block3_b_conv5] @tf.function(input_signature=[tf.TensorSpec(shape=[1, None, None, N_INPUT_CHANNELS_SMALL], dtype=tf.float32)]) def predict(self, x): h_conv1 = tf.nn.relu(conv2d(x, self.W_conv1)+ self.b_conv1) featuremaps_input = h_conv1 featuremaps_max_pool_1 = max_pool_2x2(featuremaps_input) block_h_conv1 = tf.nn.relu(conv2d(featuremaps_max_pool_1, self.block1_W_conv1)+ self.block1_b_conv1) featuremaps_max_pool_2 = max_pool_2x2(block_h_conv1) block_h_conv2 = tf.nn.relu(conv2d(featuremaps_max_pool_2, self.block1_W_conv2)+ self.block1_b_conv2) featuremaps_avarage_pool = avarage_pool_4x4(featuremaps_input) block_h_conv3 = conv2d(featuremaps_avarage_pool, self.block1_W_conv3) featuremaps_output = block_h_conv2 + block_h_conv3 featuremaps_input = featuremaps_output featuremaps_max_pool_1 = max_pool_2x2(featuremaps_input) block_h_conv1 = tf.nn.relu(conv2d(featuremaps_max_pool_1, self.block2_W_conv1)+ self.block2_b_conv1) featuremaps_max_pool_2 = max_pool_2x2(block_h_conv1) block_h_conv2 = tf.nn.relu(conv2d(featuremaps_max_pool_2, self.block2_W_conv2)+ self.block2_b_conv2) featuremaps_avarage_pool = avarage_pool_4x4(featuremaps_input) block_h_conv3 = conv2d(featuremaps_avarage_pool, self.block2_W_conv3) featuremaps_output = block_h_conv2 + block_h_conv3 featuremaps_input = featuremaps_output featuremaps_max_pool_1 = featuremaps_input block_h_conv1 = tf.nn.relu(conv2d(featuremaps_max_pool_1, self.block3_W_conv1)+ self.block3_b_conv1) block_h_conv2 = tf.nn.relu(conv2d(block_h_conv1, self.block3_W_conv2)+ self.block3_b_conv2) block_h_conv4 = tf.nn.relu(conv2d(block_h_conv2, self.block3_W_conv4)+ self.block3_b_conv4) block_h_conv5 = conv2d(block_h_conv4, self.block3_W_conv5)+ self.block3_b_conv5 featuremaps_avarage_pool = featuremaps_input block_h_conv3 = conv2d(featuremaps_max_pool_1, self.block3_W_conv3) output = tf.math.add(block_h_conv5, block_h_conv3, name='output') return output def load_weights(self, saved_wights_path): ckpt = tf.train.Checkpoint(var_list=self.var_list) ckpt.read(saved_wights_path) class NetModuleBigScale(tf.Module): def __init__(self): self.W_conv1 = weight_variable("W_conv1", [KERNELS_Y_SIZE_BIG, KERNELS_X_SIZE_BIG, N_FEATUREMAPS_BIG[0], N_FEATUREMAPS_BIG[1]]) self.b_conv1 = bias_variable("b_conv1", [N_FEATUREMAPS_BIG[1]]) block_name = "block1" n_featuremaps = N_FEATUREMAPS_BIG[1: 3 + 1] self.block1_W_conv1 = weight_variable(block_name + "_" + "W_conv1", [KERNELS_Y_SIZE_BIG, KERNELS_X_SIZE_BIG, n_featuremaps[0], n_featuremaps[1]]) self.block1_b_conv1 = bias_variable(block_name + "_" + "b_conv1", [n_featuremaps[1]]) self.block1_W_conv2 = weight_variable(block_name + "_" + "W_conv2", [KERNELS_Y_SIZE_BIG, KERNELS_X_SIZE_BIG, n_featuremaps[1], n_featuremaps[2]]) self.block1_b_conv2 = bias_variable(block_name + "_" + "b_conv2", [n_featuremaps[2]]) self.block1_W_conv3 = weight_variable_for_skip_connection(block_name + "_" + "W_conv3", [1, 1, n_featuremaps[0], n_featuremaps[2]]) block_name = "block2" n_featuremaps = N_FEATUREMAPS_BIG[3: 5 + 1] self.block2_W_conv1 = weight_variable(block_name + "_" + "W_conv1", [KERNELS_Y_SIZE_BIG, KERNELS_X_SIZE_BIG, n_featuremaps[0], n_featuremaps[1]]) self.block2_b_conv1 = bias_variable(block_name + "_" + "b_conv1", [n_featuremaps[1]]) self.block2_W_conv2 = weight_variable(block_name + "_" + "W_conv2", [KERNELS_Y_SIZE_BIG, KERNELS_X_SIZE_BIG, n_featuremaps[1], n_featuremaps[2]]) self.block2_b_conv2 = bias_variable(block_name + "_" + "b_conv2", [n_featuremaps[2]]) self.block2_W_conv3 = weight_variable_for_skip_connection(block_name + "_" + "W_conv3", [1, 1, n_featuremaps[0], n_featuremaps[2]]) block_name = "block3" n_featuremaps = N_FEATUREMAPS_BIG[5: 10 + 1] self.block3_W_conv1 = weight_variable(block_name + "_" + "W_conv1", [KERNELS_INCREASED_Y_SIZE_BIG, KERNELS_INCREASED_X_SIZE_BIG, n_featuremaps[0], n_featuremaps[1]]) self.block3_b_conv1 = bias_variable(block_name + "_" + "b_conv1", [n_featuremaps[1]]) self.block3_W_conv2 = weight_variable(block_name + "_" + "W_conv2", [N_R_OUT, KERNELS_INCREASED_X_SIZE_BIG, n_featuremaps[1], n_featuremaps[2]]) self.block3_b_conv2 = bias_variable(block_name + "_" + "b_conv2", [n_featuremaps[2]]) self.block3_W_conv4 = weight_variable(block_name + "_" + "W_conv4", [1, KERNELS_INCREASED_X_SIZE_BIG, n_featuremaps[2], n_featuremaps[3]]) self.block3_b_conv4 = bias_variable(block_name + "_" + "b_conv4", [n_featuremaps[3]]) self.block3_W_conv5 = weight_variable(block_name + "_" + "W_conv5", [1, KERNELS_INCREASED_X_SIZE_BIG, n_featuremaps[3], n_featuremaps[4]]) self.block3_b_conv5 = bias_variable(block_name + "_" + "b_conv5", [n_featuremaps[4]]) self.block3_W_conv3 = weight_variable_for_skip_connection(block_name + "_" + "W_conv3", [N_R_OUT, 1, n_featuremaps[0], n_featuremaps[4]]) self.upsample_tile_parameter_tf_const = tf.constant([1, 1, 2, 1, 2, 1], tf.int32) self.upsample_reshape_parameter_ft_const = tf.constant([1, N_R_OUT, N_PHI_OUT_TILLED, N_FEATUREMAPS_BIG[-4]], tf.int32) self.var_list = [self.W_conv1, self.b_conv1, self.block1_W_conv1, self.block1_b_conv1, self.block1_W_conv2, self.block1_b_conv2, self.block1_W_conv3, self.block2_W_conv1, self.block2_b_conv1, self.block2_W_conv2, self.block2_b_conv2, self.block2_W_conv3, self.block3_W_conv1, self.block3_b_conv1, self.block3_W_conv2, self.block3_b_conv2, self.block3_W_conv3, self.block3_W_conv4, self.block3_b_conv4, self.block3_W_conv5, self.block3_b_conv5] @tf.function(input_signature=[tf.TensorSpec(shape=[1, N_R, N_PHI_TILED, N_INPUT_CHANNELS_BIG], dtype=tf.float32)]) def predict(self, x): h_conv1 = tf.nn.relu(conv2d(x, self.W_conv1)+ self.b_conv1) featuremaps_input = h_conv1 featuremaps_max_pool_1 = max_pool_2x2(featuremaps_input) block_h_conv1 = tf.nn.relu(conv2d(featuremaps_max_pool_1, self.block1_W_conv1)+ self.block1_b_conv1) featuremaps_max_pool_2 = max_pool_2x2(block_h_conv1) block_h_conv2 = tf.nn.relu(conv2d(featuremaps_max_pool_2, self.block1_W_conv2)+ self.block1_b_conv2) featuremaps_avarage_pool = avarage_pool_4x4(featuremaps_input) block_h_conv3 = conv2d(featuremaps_avarage_pool, self.block1_W_conv3) featuremaps_output = block_h_conv2 + block_h_conv3 featuremaps_input = featuremaps_output featuremaps_max_pool_1 = max_pool_2x2(featuremaps_input) block_h_conv1 = tf.nn.relu(conv2d(featuremaps_max_pool_1, self.block2_W_conv1)+ self.block2_b_conv1) featuremaps_avarage_pool = avarage_pool_2x2(featuremaps_input) block_h_conv3 = conv2d(featuremaps_avarage_pool, self.block2_W_conv3) featuremaps_output = block_h_conv1 + block_h_conv3 input_for_long_skip_connection = featuremaps_output featuremaps_max_pool_2 = max_pool_2x2(block_h_conv1) block_h_conv2 = tf.nn.relu(conv2d(featuremaps_max_pool_2, self.block2_W_conv2)+ self.block2_b_conv2) featuremaps_output = block_h_conv2 featuremaps_input = featuremaps_output featuremaps_max_pool_1 = featuremaps_input block_h_conv1 = tf.nn.relu(conv2d(featuremaps_max_pool_1, self.block3_W_conv1)+ self.block3_b_conv1) upsample_tmp_1 = tf.expand_dims(block_h_conv1, 2) upsample_tmp_2 = tf.expand_dims(upsample_tmp_1, 4) upsample_tmp_3 = tf.tile(upsample_tmp_2, self.upsample_tile_parameter_tf_const) block_h_upsample2 = tf.reshape(upsample_tmp_3, self.upsample_reshape_parameter_ft_const) strides = [1,1,1,1] padding = [[0, 0], [0, 0], [KERNELS_INCREASED_X_SIZE_BIG_HALF, KERNELS_INCREASED_X_SIZE_BIG_HALF], [0, 0]] block_h_conv2 = tf.nn.relu(tf.nn.conv2d(block_h_upsample2, self.block3_W_conv2, strides=strides, padding=padding)+ self.block3_b_conv2) block_h_conv4 = tf.nn.relu(tf.nn.conv2d(block_h_conv2, self.block3_W_conv4, strides=strides, padding=padding)+ self.block3_b_conv4) block_h_conv5 = tf.nn.conv2d(block_h_conv4, self.block3_W_conv5, strides=strides, padding=padding)+ self.block3_b_conv5 padding = [[0, 0], [0, 0], [0, 0], [0, 0]] block_h_conv3 = tf.nn.conv2d(input_for_long_skip_connection, self.block3_W_conv3, strides=strides, padding=padding) output = tf.math.add(block_h_conv5, block_h_conv3, name='output') return output def load_weights(self, saved_wights_path): ckpt = tf.train.Checkpoint(var_list=self.var_list) ckpt.read(saved_wights_path) net_module_small_scale = NetModuleSmallScale() net_module_small_scale.load_weights(WEIGHTS_SMALL) net_module_big_scale = NetModuleBigScale() net_module_big_scale.load_weights(WEIGHTS_BIG) phi = np.linspace(0, 2*np.pi, N_PHI + 1)[0: -1] phi = phi.astype(np.float32) phi_out = phi.reshape(N_PHI // NET_SCALE_BIG, NET_SCALE_BIG) phi_out = np.mean(phi_out, axis=1) r = np.linspace(0, R_MAX, N_R + 1)[0: -1] r = r.astype(np.float32) r_indexes = np.arange(r.size ).astype(np.float32) PHI, R = np.meshgrid(phi, r) map_x = CENTER + R * np.cos(PHI) map_y = CENTER + R * np.sin(PHI) files = glob.glob(IMAGES_DIR + '/*.tiff') output_file_fid = open(OUTPUT_FILE, 'w') output_file_fid.write('id,predicted ') for file_index in range(len(files)) : file = files[file_index] file_base = os.path.basename(file) file_base_no_ext = os.path.splitext(file_base)[0] image_id = file_base_no_ext print(file_index, len(files)- 1) fid = rasterio.open(IMAGES_DIR + '/' + image_id + '.tiff', 'r', num_threads='all_cpus') width = fid.shape[1] height = fid.shape[0] if fid.count == 1: layers = [] for subdataset_index in range(len(fid.subdatasets)) : subdataset = fid.subdatasets[subdataset_index] layer = rasterio.open(subdataset) layers.append(layer) starts_y, ends_y, separators_y = crop_with_overlap_1d(height, OVERLAP_SMALL, BASE_SIZE_CROP_SMALL, ROUNDING_ORIG_SMALL) starts_x, ends_x, separators_x = crop_with_overlap_1d(width, OVERLAP_SMALL, BASE_SIZE_CROP_SMALL, ROUNDING_ORIG_SMALL) x_spot_all = [] y_spot_all = [] for y_index in range(starts_y.size): y1_small = starts_y[y_index] y2_small = ends_y[y_index] separator_y1 = separators_y[y_index] separator_y2 = separators_y[y_index + 1] for x_index in range(starts_x.size): x1_small = starts_x[x_index] x2_small = ends_x[x_index] separator_x1 = separators_x[x_index] separator_x2 = separators_x[x_index + 1] window = Window.from_slices(( y1_small, y2_small),(x1_small, x2_small)) height_tmp = y2_small - y1_small width_tmp = x2_small - x1_small if fid.count == 1: image_crop_small = np.zeros(( height_tmp, width_tmp, 3), np.uint8) for layer_index in range(len(layers)) : layer = layers[layer_index] image_crop_small[:, :, layer_index] = layer.read(1, window=window) else: image_crop_small = np.moveaxis(fid.read([1, 2, 3], window=window), 0, -1) image_crop_small_resized = cv2.resize(image_crop_small, None, fx=RESIZE_FACTOR_SMALL, fy=RESIZE_FACTOR_SMALL, interpolation=cv2.INTER_CUBIC) image_crop_small_resized_4d = np.expand_dims(image_crop_small_resized, 0 ).astype(np.float32)/ 255.0 mask_crop_small = net_module_small_scale.predict(image_crop_small_resized_4d) mask_crop_small = mask_crop_small.numpy() [0, :, :, 0] heatmap_thresholded = 255 *(( mask_crop_small >= THRESHOLD_SMALL ).astype(np.uint8)) n_labels, labels, stats, centroids_tmp_0 = cv2.connectedComponentsWithStats(heatmap_thresholded, connectivity=4) if n_labels > 1: for max_ind in range(1, n_labels): y_bigest_spot, x_bigest_spot = np.where(labels == max_ind) weights = mask_crop_small[y_bigest_spot, x_bigest_spot] weights_sum = np.sum(weights) x_spot_out = np.sum(x_bigest_spot * weights)/ weights_sum y_spot_out = np.sum(y_bigest_spot * weights)/ weights_sum x_spot =(x_spot_out + 0.5)* NET_SCALE_SMALL / RESIZE_FACTOR_SMALL - 0.5 y_spot =(y_spot_out + 0.5)* NET_SCALE_SMALL / RESIZE_FACTOR_SMALL - 0.5 x_spot += x1_small y_spot += y1_small condition = True condition = condition and separator_x1 <= x_spot if x_index ==(starts_x.size - 1): condition = condition and x_spot <= separator_x2 else: condition = condition and x_spot < separator_x2 condition = condition and separator_y1 <= y_spot if y_index ==(starts_y.size - 1): condition = condition and y_spot <= separator_y2 else: condition = condition and y_spot < separator_y2 if condition: x_spot_all.append(x_spot) y_spot_all.append(y_spot) del image_crop_small_resized_4d del image_crop_small del image_crop_small_resized del mask_crop_small gc.collect() if fid.count == 1: for layer in layers: layer.close() del layers fid.close() gc.collect() fid = rasterio.open(file, 'r', num_threads='all_cpus') if fid.count == 1: layers = [] for subdataset_index in range(len(fid.subdatasets)) : subdataset = fid.subdatasets[subdataset_index] layer = rasterio.open(subdataset) layers.append(layer) mask_prediction = np.zeros(( height, width), np.bool) for spot_index in range(len(x_spot_all)) : x_spot = int_round(x_spot_all[spot_index]) y_spot = int_round(y_spot_all[spot_index]) x1_big = x_spot - BASE_SIZE_HALF_BIG x2_big = x1_big + BASE_SIZE_BIG y1_big = y_spot - BASE_SIZE_HALF_BIG y2_big = y1_big + BASE_SIZE_BIG x1_big_src, x2_big_src, x1_big_dst, x2_big_dst = prepare_crop_indexes(x1_big, x2_big, width, BASE_SIZE_BIG) y1_big_src, y2_big_src, y1_big_dst, y2_big_dst = prepare_crop_indexes(y1_big, y2_big, height, BASE_SIZE_BIG) image_crop_big = np.zeros(( BASE_SIZE_BIG, BASE_SIZE_BIG, 3), np.uint8) window = Window.from_slices(( y1_big_src, y2_big_src),(x1_big_src, x2_big_src)) if fid.count == 1: for layer_index in range(len(layers)) : layer = layers[layer_index] image_crop_big[y1_big_dst: y2_big_dst, x1_big_dst: x2_big_dst, layer_index] = layer.read(1, window=window) else: image_crop_big[y1_big_dst: y2_big_dst, x1_big_dst: x2_big_dst, :] = np.moveaxis(fid.read([1, 2, 3], window=window), 0, -1) unrolled = cv2.remap(image_crop_big, map_x, map_y, cv2.INTER_CUBIC) unrolled = np.tile(unrolled,(1, 3, 1)) del image_crop_big unrolled_4d = np.expand_dims(unrolled, 0 ).astype(np.float32)/ 255.0 del unrolled big_predicted_tensor = net_module_big_scale.predict(unrolled_4d) del unrolled_4d r_mean = big_predicted_tensor.numpy() [0, 0, N_PHI_OUT: 2 * N_PHI_OUT, 0] r_mean = r_mean * r_mean_max r_mean =(r_mean + 0.5)* NET_SCALE_BIG - 0.5 r_mean = np.clip(r_mean, 0.0, N_R - 1) r_mean = np.interp(r_mean, r_indexes, r) x_contour = CENTER + r_mean * np.cos(phi_out) y_contour = CENTER + r_mean * np.sin(phi_out) contour_cv2 = np.zeros(( x_contour.size, 1, 2), np.int32) contour_cv2[:, 0, 0] = np.round(x_contour ).astype(np.int32) contour_cv2[:, 0, 1] = np.round(y_contour ).astype(np.int32) mask_center_orig_u8 = np.zeros(( BASE_SIZE_BIG, BASE_SIZE_BIG), np.uint8) mask_center_orig_u8 = cv2.drawContours(mask_center_orig_u8, [contour_cv2], -1,(255, 255, 255), -1) mask_center_orig = mask_center_orig_u8 >= 128 mask_prediction[y1_big_src: y2_big_src, x1_big_src: x2_big_src] = mask_prediction[y1_big_src: y2_big_src, x1_big_src: x2_big_src] | mask_center_orig[y1_big_dst: y2_big_dst, x1_big_dst: x2_big_dst] del mask_center_orig_u8 del mask_center_orig del r_mean gc.collect() if fid.count == 1: for layer in layers: layer.close() del layers fid.close() del fid gc.collect() y, x = np.nonzero(mask_prediction) del mask_prediction gc.collect() indexes = np.sort(x * height + y + 1) del x gc.collect() del y gc.collect() diff = np.diff(indexes) gaps = np.nonzero(diff > 1)[0] del diff gc.collect() index_min = np.min(indexes) index_max = np.max(indexes) starts = [] lengths = [] start_tmp = index_min for intermidieate_index in range(gaps.size): starts.append(start_tmp) lengths.append(indexes[gaps[intermidieate_index]] - start_tmp + 1) start_tmp = indexes[gaps[intermidieate_index] + 1] del indexes gc.collect() del gaps gc.collect() starts.append(start_tmp) lengths.append(index_max - start_tmp + 1) line = image_id + ',' for segment_index in range(len(starts)) : start = starts[segment_index] length = lengths[segment_index] line = line + str(start)+ ' ' + str(length) if segment_index !=(len(starts)- 1): line = line + ' ' del starts gc.collect() del lengths gc.collect() output_file_fid.write(line + ' ') del line gc.collect() output_file_fid.close() <load_from_zip>
XGparams={'colsample_bytree': 0.7, 'learning_rate': 0.01, 'max_depth': 7, 'min_child_weight': 1, 'n_estimators': 4000, 'nthread': 4, 'objective': 'reg:squarederror', 'subsample': 0.7}
Tabular Playground Series - Jan 2021
14,376,120
!mkdir -p /tmp/pip/cache/ !cp.. /input/segmentationmodelspytorch/segmentation_models/efficientnet_pytorch-0.6.3.xyz /tmp/pip/cache/efficientnet_pytorch-0.6.3.tar.gz !cp.. /input/segmentationmodelspytorch/segmentation_models/pretrainedmodels-0.7.4.xyz /tmp/pip/cache/pretrainedmodels-0.7.4.tar.gz !cp.. /input/segmentationmodelspytorch/segmentation_models/segmentation-models-pytorch-0.1.2.xyz /tmp/pip/cache/segmentation_models_pytorch-0.1.2.tar.gz !cp.. /input/segmentationmodelspytorch/segmentation_models/timm-0.1.20-py3-none-any.whl /tmp/pip/cache/ !cp.. /input/segmentationmodelspytorch/segmentation_models/timm-0.2.1-py3-none-any.whl /tmp/pip/cache/ !pip install --no-index --find-links /tmp/pip/cache/ efficientnet-pytorch !pip install --no-index --find-links /tmp/pip/cache/ segmentation-models-pytorch<set_options>
n_fold = 10 folds = KFold(n_splits=n_fold, shuffle=True, random_state=42) train_columns = train.columns.values oof = np.zeros(len(train)) XGpredictions = np.zeros(len(test)) feature_importance_df = pd.DataFrame() for fold_,(trn_idx, val_idx)in enumerate(folds.split(train, target.values)) : strLog = "fold {}".format(fold_) print(strLog) X_tr, X_val = train.iloc[trn_idx], train.iloc[val_idx] y_tr, y_val = target.iloc[trn_idx], target.iloc[val_idx] model = XGBRegressor(**XGparams) model.fit(X_tr, y_tr, eval_set=[(X_tr, y_tr),(X_val, y_val)], verbose=1000, early_stopping_rounds=400) oof[val_idx] = model.predict(X_val, ntree_limit=model.best_iteration) preds = model.predict(test, ntree_limit=model.best_iteration) fold_importance_df = pd.DataFrame() fold_importance_df["Feature"] = train_columns fold_importance_df["importance"] = model.feature_importances_[:len(train_columns)] fold_importance_df["fold"] = fold_ + 1 feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0) XGpredictions += model.predict(test, ntree_limit=model.best_iteration)/ folds.n_splits
Tabular Playground Series - Jan 2021
14,376,120
warnings.filterwarnings("ignore" )<define_variables>
submission = pd.read_csv(input_path / 'sample_submission.csv', index_col='id') submission.reset_index(inplace=True) submission = submission.rename(columns = {'index':'id'} )
Tabular Playground Series - Jan 2021
14,376,120
VERBOSE = True DATA_DIR = '.. /input/hubmap-kidney-segmentation/test' REDUCTION = 3 TILE_SZ = 512 MEAN = np.array([0.63482309,0.47376275,0.67814029]) STD = np.array([0.17405236,0.23305763,0.1585981]) MODELS_FRESH_FROZEN = [f'.. /input/ret-r101-multi3468-lf/model_{i}.pth' for i in [0,2]] + \ [f'.. /input/ens-red345/model_effb7_{i}.pth' for i in [1,2,3]] + \ [f'.. /input/ens-red345/model_effb5_{i}.pth' for i in [0,1]] MODELS_PATHS = [MODELS_FRESH_FROZEN, MODELS_FRESH_FROZEN] S_TH = 40 P_TH = 200*TILE_SZ//256 PUBLIC_ONLY = False X_OVERLAP = [0., 0.5] Y_OVERLAP = [0., 0.5] CUSTOM_REDS = [1, 1] THS = [0.3, 0.3] N_BINS = 255 BATCH_SIZE = 8 NUM_WORKERS = 4 HALF_PRECISION = False TTA_FLIPS = [[-1], [-2], [-2, -1]] ROT_TTA_FLIPS = [0] MASK_SZ = 4096<drop_column>
LGBMsubmission=submission.copy() LGBMsubmission['target'] = LGBMpredictions LGBMsubmission.to_csv('submission_LGBM.csv', header=True, index=False) LGBMsubmission.head()
Tabular Playground Series - Jan 2021
14,376,120
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if os.path.exists('tmp'): if VERBOSE: print("Removing 'tmp' directory") shutil.rmtree('tmp' )<define_search_model>
XGBoostsubmission=submission.copy() XGBoostsubmission['target'] = XGpredictions XGBoostsubmission.to_csv('submission_XGBoost.csv', header=True, index=False) XGBoostsubmission.head()
Tabular Playground Series - Jan 2021
14,376,120
class FPN(nn.Module): def __init__(self, input_channels:list, output_channels:list): super().__init__() self.convs = nn.ModuleList( [nn.Sequential(nn.Conv2d(in_ch, out_ch*2, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.BatchNorm2d(out_ch*2), nn.Conv2d(out_ch*2, out_ch, kernel_size=3, padding=1)) for in_ch, out_ch in zip(input_channels, output_channels)]) def forward(self, xs:list, last_layer): hcs = [F.interpolate(c(x),scale_factor=2**(len(self.convs)-i),mode='bilinear') for i,(c,x)in enumerate(zip(self.convs, xs)) ] hcs.append(last_layer) return torch.cat(hcs, dim=1) class UnetBlock(nn.Module): def __init__(self, up_in_c:int, x_in_c:int, nf:int=None, blur:bool=False, self_attention:bool=False, **kwargs): super().__init__() self.shuf = PixelShuffle_ICNR(up_in_c, up_in_c//2, blur=blur, **kwargs) self.bn = nn.BatchNorm2d(x_in_c) ni = up_in_c//2 + x_in_c nf = nf if nf is not None else max(up_in_c//2,32) self.conv1 = ConvLayer(ni, nf, norm_type=None, **kwargs) self.conv2 = ConvLayer(nf, nf, norm_type=None, xtra=SelfAttention(nf)if self_attention else None, **kwargs) self.relu = nn.ReLU(inplace=True) def forward(self, up_in:torch.Tensor, left_in:torch.Tensor)-> torch.Tensor: s = left_in up_out = self.shuf(up_in) cat_x = self.relu(torch.cat([up_out, self.bn(s)], dim=1)) return self.conv2(self.conv1(cat_x)) class _ASPPModule(nn.Module): def __init__(self, inplanes, planes, kernel_size, padding, dilation, groups=1): super().__init__() self.atrous_conv = nn.Conv2d(inplanes, planes, kernel_size=kernel_size, stride=1, padding=padding, dilation=dilation, bias=False, groups=groups) self.bn = nn.BatchNorm2d(planes) self.relu = nn.ReLU() self._init_weight() def forward(self, x): x = self.atrous_conv(x) x = self.bn(x) return self.relu(x) def _init_weight(self): for m in self.modules() : if isinstance(m, nn.Conv2d): torch.nn.init.kaiming_normal_(m.weight) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() class ASPP(nn.Module): def __init__(self, inplanes=512, mid_c=256, dilations=[6, 12, 18, 24], out_c=None): super().__init__() self.aspps = [_ASPPModule(inplanes, mid_c, 1, padding=0, dilation=1)] + \ [_ASPPModule(inplanes, mid_c, 3, padding=d, dilation=d,groups=4)for d in dilations] self.aspps = nn.ModuleList(self.aspps) self.global_pool = nn.Sequential(nn.AdaptiveMaxPool2d(( 1, 1)) , nn.Conv2d(inplanes, mid_c, 1, stride=1, bias=False), nn.BatchNorm2d(mid_c), nn.ReLU()) out_c = out_c if out_c is not None else mid_c self.out_conv = nn.Sequential(nn.Conv2d(mid_c*(2+len(dilations)) , out_c, 1, bias=False), nn.BatchNorm2d(out_c), nn.ReLU(inplace=True)) self.conv1 = nn.Conv2d(mid_c*(2+len(dilations)) , out_c, 1, bias=False) self._init_weight() def forward(self, x): x0 = self.global_pool(x) xs = [aspp(x)for aspp in self.aspps] x0 = F.interpolate(x0, size=xs[0].size() [2:], mode='bilinear', align_corners=True) x = torch.cat([x0] + xs, dim=1) return self.out_conv(x) def _init_weight(self): for m in self.modules() : if isinstance(m, nn.Conv2d): torch.nn.init.kaiming_normal_(m.weight) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_()<choose_model_class>
EnsembledSubmission=submission.copy() EnsembledSubmission['target'] =(LGBMpredictions*0.72 + XGpredictions*0.28) EnsembledSubmission.to_csv('ensembled_submission.csv', header=True, index=False) EnsembledSubmission.head()
Tabular Playground Series - Jan 2021
14,377,327
class UneXt50(nn.Module): def __init__(self, stride=1, **kwargs): super().__init__() m = ResNet(Bottleneck, [3, 4, 6, 3], groups=32, width_per_group=4) self.enc0 = nn.Sequential(m.conv1, m.bn1, nn.ReLU(inplace=True)) self.enc1 = nn.Sequential(nn.MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1), m.layer1) self.enc2 = m.layer2 self.enc3 = m.layer3 self.enc4 = m.layer4 self.aspp = ASPP(2048,256,out_c=512,dilations=[stride*1,stride*2,stride*3,stride*4]) self.drop_aspp = nn.Dropout2d(0.5) self.dec4 = UnetBlock(512,1024,256) self.dec3 = UnetBlock(256,512,128) self.dec2 = UnetBlock(128,256,64) self.dec1 = UnetBlock(64,64,32) self.fpn = FPN([512,256,128,64],[16]*4) self.drop = nn.Dropout2d(0.1) self.final_conv = ConvLayer(32+16*4, 1, ks=1, norm_type=None, act_cls=None) def forward(self, x): enc0 = self.enc0(x) enc1 = self.enc1(enc0) enc2 = self.enc2(enc1) enc3 = self.enc3(enc2) enc4 = self.enc4(enc3) enc5 = self.aspp(enc4) dec3 = self.dec4(self.drop_aspp(enc5),enc3) dec2 = self.dec3(dec3,enc2) dec1 = self.dec2(dec2,enc1) dec0 = self.dec1(dec1,enc0) x = self.fpn([enc5, dec3, dec2, dec1], dec0) x = self.final_conv(self.drop(x)) x = F.interpolate(x,scale_factor=2,mode='bilinear') return x class UneXt101(nn.Module): def __init__(self, stride=1, **kwargs): super().__init__() m = ResNet(Bottleneck, [3, 4, 23, 3], groups=32, width_per_group=16) self.enc0 = nn.Sequential(m.conv1, m.bn1, nn.ReLU(inplace=True)) self.enc1 = nn.Sequential(nn.MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1), m.layer1) self.enc2 = m.layer2 self.enc3 = m.layer3 self.enc4 = m.layer4 self.aspp = ASPP(2048,256,out_c=512,dilations=[stride*1,stride*2,stride*3,stride*4]) self.drop_aspp = nn.Dropout2d(0.5) self.dec4 = UnetBlock(512,1024,256) self.dec3 = UnetBlock(256,512,128) self.dec2 = UnetBlock(128,256,64) self.dec1 = UnetBlock(64,64,32) self.fpn = FPN([512,256,128,64],[16]*4) self.drop = nn.Dropout2d(0.1) self.final_conv = ConvLayer(32+16*4, 1, ks=1, norm_type=None, act_cls=None) def forward(self, x): enc0 = self.enc0(x) enc1 = self.enc1(enc0) enc2 = self.enc2(enc1) enc3 = self.enc3(enc2) enc4 = self.enc4(enc3) enc5 = self.aspp(enc4) dec3 = self.dec4(self.drop_aspp(enc5),enc3) dec2 = self.dec3(dec3,enc2) dec1 = self.dec2(dec2,enc1) dec0 = self.dec1(dec1,enc0) x = self.fpn([enc5, dec3, dec2, dec1], dec0) x = self.final_conv(self.drop(x)) x = F.interpolate(x,scale_factor=2,mode='bilinear') return x class Unet50(nn.Module): def __init__(self, stride=1, **kwargs): super().__init__() m = ResNet(Bottleneck, [3, 4, 6, 3]) self.enc0 = nn.Sequential(m.conv1, m.bn1, nn.ReLU(inplace=True)) self.enc1 = nn.Sequential(nn.MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1), m.layer1) self.enc2 = m.layer2 self.enc3 = m.layer3 self.enc4 = m.layer4 self.aspp = ASPP(2048,256,out_c=512,dilations=[stride*1,stride*2,stride*3,stride*4]) self.drop_aspp = nn.Dropout2d(0.5) self.dec4 = UnetBlock(512,1024,256) self.dec3 = UnetBlock(256,512,128) self.dec2 = UnetBlock(128,256,64) self.dec1 = UnetBlock(64,64,32) self.fpn = FPN([512,256,128,64],[16]*4) self.drop = nn.Dropout2d(0.1) self.final_conv = ConvLayer(32+16*4, 1, ks=1, norm_type=None, act_cls=None) def forward(self, x): enc0 = self.enc0(x) enc1 = self.enc1(enc0) enc2 = self.enc2(enc1) enc3 = self.enc3(enc2) enc4 = self.enc4(enc3) enc5 = self.aspp(enc4) dec3 = self.dec4(self.drop_aspp(enc5),enc3) dec2 = self.dec3(dec3,enc2) dec1 = self.dec2(dec2,enc1) dec0 = self.dec1(dec1,enc0) x = self.fpn([enc5, dec3, dec2, dec1], dec0) x = self.final_conv(self.drop(x)) x = F.interpolate(x,scale_factor=2,mode='bilinear') return x<load_pretrained>
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import optuna from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline from sklearn.model_selection import train_test_split, KFold from sklearn.metrics import mean_squared_error from sklearn.base import TransformerMixin import xgboost as xgb import lightgbm as lgb
Tabular Playground Series - Jan 2021
14,377,327
MODELS = [] for models_list in MODELS_PATHS: models_i = [] for ij,path in enumerate(models_list): state_dict = torch.load(path,map_location=torch.device('cpu')) if ij < 2: model = UneXt101() elif ij < 5: model = smp.Unet(encoder_name='efficientnet-b7', classes=1, activation=None, encoder_weights=None) else: model = smp.Unet(encoder_name='efficientnet-b5', classes=1, activation=None, encoder_weights=None) model.load_state_dict(state_dict) model.float() model.eval() model.to(device) models_i.append(model) del state_dict MODELS.append(models_i )<prepare_x_and_y>
df = pd.read_csv('.. /input/tabular-playground-series-jan-2021/train.csv') df.head()
Tabular Playground Series - Jan 2021
14,377,327
def _tile_resize_save(img, img_id, tile_sz, reduce=1): x = 0 while x < img.shape[0]: y = 0 while y < img.shape[1]: img_tile = img[x:x+tile_sz,y:y+tile_sz] if reduce > 1: new_dim =(img_tile.shape[1]//reduce,img_tile.shape[0]//reduce) img_tile = cv2.resize(img_tile, new_dim, interpolation = cv2.INTER_AREA) save_path = "%s_%d_%d.png" %(img_id, x//reduce, y//reduce) Image.fromarray(img_tile ).save(save_path) y += tile_sz x += tile_sz final_x =(( x-tile_sz)//tile_sz)*(tile_sz//reduce)+ img_tile.shape[0] final_y =(( y-tile_sz)//tile_sz)*(tile_sz//reduce)+ img_tile.shape[1] return(final_x, final_y, 3 )<train_model>
def objective_xgb(trial, data, target): parameters = { 'tree_method': 'gpu_hist', 'lambda': trial.suggest_loguniform('lambda', 1e-3, 10.0), 'alpha': trial.suggest_loguniform('alpha', 1e-3, 10.0), 'colsample_bytree': trial.suggest_categorical('colsample_bytree', [0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9,1.0]), 'subsample': trial.suggest_categorical('subsample', [0.4, 0.5, 0.6, 0.7, 0.8, 1.0]), 'learning_rate': trial.suggest_categorical('learning_rate', [0.008, 0.009, 0.01, 0.012, 0.014, 0.016, 0.018, 0.02]), 'n_estimators': 1000, 'max_depth': trial.suggest_categorical('max_depth', [5, 7, 9, 11, 13, 15, 17, 20]), 'random_state': trial.suggest_categorical('random_state', [24, 48, 2020]), 'min_child_weight': trial.suggest_int('min_child_weight', 1, 300), } folds = KFold(n_splits=5, random_state=1337, shuffle=True) rmse = [] for train_idx, test_idx in folds.split(data, target): X_train, X_test = X.iloc[train_idx], X.iloc[test_idx] y_train, y_test = y.iloc[train_idx], y.iloc[test_idx] model = xgb.XGBRegressor(**parameters) model.fit(X_train, y_train, eval_set=[(X_test, y_test)], early_stopping_rounds=100, verbose=False) rmse.append(mean_squared_error(y_test, model.predict(X_test), squared=False)) print(f'Mean RMSE for all the folds: {np.mean(rmse)}') return np.mean(rmse )
Tabular Playground Series - Jan 2021
14,377,327
def load_resize(idx, reduce): img = load_image(os.path.join(DATA_DIR,idx+'.tiff')) init_shape = img.shape shape = _tile_resize_save(img, idx,(MASK_SZ*REDUCTION), reduce=REDUCTION) img = _reconstruct_img(idx,(MASK_SZ*REDUCTION)//REDUCTION, shape) return img, init_shape<categorify>
xgb_parameters = { 'objective': 'reg:squarederror', 'tree_method': 'gpu_hist', 'n_estimators': 1000, 'lambda': 7.610705234008646, 'alpha': 0.0019377246932580476, 'colsample_bytree': 0.5, 'subsample': 0.7, 'learning_rate': 0.012, 'max_depth': 20, 'random_state': 24, 'min_child_weight': 229 }
Tabular Playground Series - Jan 2021
14,377,327
def _get_nored_pads(initW, initH, upW, upH, xa, xb, ya, yb): px = xa/(xa+xb) py = ya/(ya+yb) padx = upW - initW pady = upH - initH assert padx > 0 assert pady > 0 xa = int(px*padx) xb = padx - xa ya = int(py*pady) yb = pady - ya return xa, xb, ya, yb def _add_padding(img, init_sz, img_shape, p0, p1): start = time.time() if VERBOSE: print(" > Adding padding to make the image dividable into tiles...") if VERBOSE: print(" > Before reduction:", img_shape) print(" > After reduction:", img.shape) pad0_ = TILE_SZ - img.shape[0]%TILE_SZ x_pad = int(TILE_SZ*p0) xa =(pad0_//2 + x_pad) xb = pad0_+TILE_SZ-(pad0_//2 + x_pad) pad0_lr = [xa, xb] pad1_ = TILE_SZ - img.shape[1]%TILE_SZ y_pad = int(TILE_SZ*p1) ya =(pad1_//2 + y_pad) yb = pad1_+TILE_SZ-(pad1_//2 + y_pad) pad1_lr = [ya, yb] img = np.pad(img,[pad0_lr, pad1_lr,[0,0]],constant_values=0) if VERBOSE: print(" > After padding:", img.shape, "Time =", time.time() - start, "s") xa, xb, ya, yb = _get_nored_pads(img_shape[0], img_shape[1], REDUCTION*img.shape[0], REDUCTION*img.shape[1], xa, xb, ya, yb) return img, xa, xb, ya, yb, img.shape<train_model>
def objective_lgb(trial): X, y = df.drop(columns=['target', 'id'] ).values, df['target'].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1337) ds_train = lgb.Dataset(X_train, label=y_train) ds_test = lgb.Dataset(X_test, label=y_test) parameters = { 'device_type': 'gpu', 'objective': 'regression', 'metric': 'rmse', 'verbosity': -1, 'boosting_type': 'gbdt', 'lambda_l1': trial.suggest_float('lambda_l1', 1e-8, 10.0, log=True), 'lambda_l2': trial.suggest_float('lambda_l2', 1e-8, 10.0, log=True), 'num_leaves': trial.suggest_int('num_leaves', 2, 256), 'feature_fraction': trial.suggest_float('feature_fraction', 0.4, 1.0), 'bagging_fraction': trial.suggest_float('bagging_fraction', 0.4, 1.0), 'bagging_freq': trial.suggest_int('bagging_freq', 1, 7), 'min_child_samples': trial.suggest_int('min_child_samples', 5, 100), } gbm = lgb.train(parameters, ds_train) prediction = gbm.predict(X_test) accuracy = mean_squared_error(y_test, prediction, squared=False) return accuracy
Tabular Playground Series - Jan 2021
14,377,327
def _split_image(img): start = time.time() if VERBOSE: print(" > Splitting image into tiles...") assert not img.shape[0]%TILE_SZ assert not img.shape[1]%TILE_SZ img = img.reshape(img.shape[0]//TILE_SZ, TILE_SZ, img.shape[1]//TILE_SZ, TILE_SZ, 3) img = img.transpose(0,2,1,3,4 ).reshape(-1,TILE_SZ,TILE_SZ,3) if VERBOSE: print(" > Splitting done! Time =", time.time() - start) return img<data_type_conversions>
lgb_parameters = { 'objective': 'regression', 'metric': 'rmse', 'boosting': 'gbdt', 'lambda_l1': 3.2737454713243543e-07, 'lambda_l2': 3.685676983230042e-06, 'num_leaves': 190, 'feature_fraction': 0.47291296723211934, 'bagging_fraction': 0.8846579981793894, 'bagging_freq': 3, 'min_child_samples': 58, 'verbose': 0, 'device_type': 'gpu' }
Tabular Playground Series - Jan 2021
14,377,327
def img2tensor(img, dtype:np.dtype=np.float32): if img.ndim==2: img = np.expand_dims(img,2) img = np.transpose(img,(2,0,1)) return torch.from_numpy(img.astype(dtype, copy=False)) class HuBMAPTestDataset(Dataset): def __init__(self, idxs): self.fnames = idxs def __len__(self): return len(self.fnames) def __getitem__(self, idx): im = cv2.imread("tmp/%d.png" %(self.fnames[idx],)) return img2tensor(( im/255.0 - MEAN)/STD )<train_model>
class NonLinearTransformer(TransformerMixin): def __init__(self): pass def fit(self, X, y=None): return self def transform(self, X, y=None): X = X.drop(columns=['id']) for c in X.columns: if c == 'target': continue X[f'{c}^2'] = X[c] ** 2 return X
Tabular Playground Series - Jan 2021
14,377,327
def _make_tiles_dataloader(idxs): start = time.time() ds = HuBMAPTestDataset(idxs) dl = DataLoader(ds, BATCH_SIZE, num_workers=NUM_WORKERS, shuffle=False, pin_memory=True) if VERBOSE: print(" > Tiles dataset created! Time =", time.time() - start) return dl<categorify>
pipe_xgb = Pipeline([ ('custom', NonLinearTransformer()), ('scaling', StandardScaler()), ('regression', xgb.XGBRegressor(**xgb_parameters)) ]) pipe_lgb = Pipeline([ ('custom', NonLinearTransformer()), ('scaling', StandardScaler()), ('regression', lgb.LGBMRegressor(**lgb_parameters)) ] )
Tabular Playground Series - Jan 2021
14,377,327
def _generate_masks(dl, idxs, n_tiles, init_sz, group): start = time.time() if VERBOSE: print(" > Generating masks...") red = CUSTOM_REDS[group] mp = Model_pred(MODELS[group], dl, red) mask = torch.zeros(n_tiles, init_sz, init_sz, dtype=torch.uint8) for i, p in zip(idxs,iter(mp)) : mask[i] = p.squeeze(-1) if VERBOSE: print(" > Masks generated! Time =", time.time() - start) return mask<train_model>
df_train = pd.read_csv('.. /input/tabular-playground-series-jan-2021/train.csv') df_predict = pd.read_csv('.. /input/tabular-playground-series-jan-2021/test.csv' )
Tabular Playground Series - Jan 2021
14,377,327
class Model_pred: def __init__(self, models, dl, red, half:bool=False): self.models = models self.dl = dl self.half = half self.red = red def __iter__(self): with torch.no_grad() : for x in iter(self.dl): x = x.to(device) x = F.interpolate(x, scale_factor=1/self.red, mode='bilinear') if self.half: x = x.half() py = 0. for rot_flip in ROT_TTA_FLIPS: for model in self.models: xr = torch.rot90(x, rot_flip, [-2, -1]) p = model(xr) p = torch.rot90(p, -rot_flip, [-2, -1]) p = torch.sigmoid(p ).detach() py += p for f in TTA_FLIPS: xf = torch.rot90(x, rot_flip, [-2, -1]) xf = torch.flip(xf,f) for model in self.models: p = model(xf) p = torch.flip(p,f) p = torch.rot90(p, -rot_flip, [-2, -1]) py += torch.sigmoid(p ).detach() py /=(1+len(TTA_FLIPS)) *len(ROT_TTA_FLIPS) py /= len(self.models) py = F.upsample(py, scale_factor=REDUCTION*self.red, mode="bilinear") py = py.permute(0,2,3,1 ).float().cpu() py =(N_BINS*py ).int() batch_size = len(py) for i in range(batch_size): yield py[i] def __len__(self): return len(self.dl.dataset )<categorify>
X, y = df_train.drop(columns=['target']), df_train['target']
Tabular Playground Series - Jan 2021
14,377,327
def _reshape_depad_mask(mask, init_shape, init_sz, p0, p1, xa, xb, ya, yb): start = time.time() if VERBOSE: print(" > Merge tiled masks into one mask and crop padding...") mask = mask.view(init_shape[0]//TILE_SZ, init_shape[1]//TILE_SZ, init_sz, init_sz ).\ permute(0,2,1,3 ).reshape(init_shape[0]*REDUCTION, init_shape[1]*REDUCTION) mask = mask[xa:-xb,ya:-yb] if VERBOSE: print(" > Mask created! Shape =", mask.shape,"Time =", time.time() - start) return mask<train_model>
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1337 )
Tabular Playground Series - Jan 2021
14,377,327
def _save_mask_tiles(mask, idx, p0, p1): start = time.time() if VERBOSE: print(" > Saving tiles in HDD memory...") x = 0 while x < mask.shape[0]: y = 0 while y < mask.shape[1]: mask_tile = mask[x:x+MASK_SZ,y:y+MASK_SZ].numpy() save_path = "%s_%d_%d_%s_%s.png" %(idx, x, y, str(p0), str(p1)) Image.fromarray(mask_tile ).save(save_path) y += MASK_SZ x += MASK_SZ if VERBOSE: print("Tiles saved! Time =", time.time() - start )<categorify>
pipe_xgb.fit(X_train, y_train) pipe_lgb.fit(X_train, y_train) print(f'XGB Score: {pipe_xgb.score(X_test, y_test)}, LGB Score: {pipe_lgb.score(X_test, y_test)}') print(f'XGB RMSE: {mean_squared_error(y_test, pipe_xgb.predict(X_test), squared=False)}, LGB RMSE: {mean_squared_error(y_test, pipe_lgb.predict(X_test), squared=False)}' )
Tabular Playground Series - Jan 2021
14,377,327
def make_one_prediction(img, group, idx, img_shape, p0, p1): init_sz = TILE_SZ*REDUCTION img, xa, xb, ya, yb, img_shape_p = _add_padding(img, init_sz, img_shape, p0, p1) img = _split_image(img) n_tiles = img.shape[0] idxs = _select_tiles(img) dl = _make_tiles_dataloader(idxs) mask = _generate_masks(dl, idxs, n_tiles, init_sz, group) mask = _reshape_depad_mask(mask, img_shape_p, init_sz, p0, p1, xa, xb, ya, yb) gc.collect() shutil.rmtree('tmp') _save_mask_tiles(mask, idx, p0, p1 )<normalization>
def ensemble_predict(X): target_xgb = pipe_xgb.predict(X) target_lgb = pipe_lgb.predict(X) return [0.85 * x + 0.15 * l for(x, l)in zip(target_xgb, target_lgb)]
Tabular Playground Series - Jan 2021
14,377,327
def get_mask_tiles(idx, p0_list, p1_list): group = _get_group(os.path.join(DATA_DIR,idx+'.tiff')) TH = THS[group] img, init_shape = load_resize(idx, REDUCTION) for p0 in p0_list: for p1 in p1_list: make_one_prediction(img, group, idx, init_shape, p0, p1) return init_shape, TH<predict_on_test>
print(f'Ensemble RMSE: {mean_squared_error(y_test, ensemble_predict(X_test), squared=False)}' )
Tabular Playground Series - Jan 2021
14,377,327
def make_predictions(idx): init_shape, TH = get_mask_tiles(idx, X_OVERLAP, Y_OVERLAP) mask = torch.zeros(*init_shape[:2], dtype=torch.uint8) x = 0 while x < init_shape[0]: y = 0 while y < init_shape[1]: mask_tile = 0. for p0 in X_OVERLAP: for p1 in Y_OVERLAP: tile_path = "%s_%d_%d_%s_%s.png" %(idx, x, y, str(p0), str(p1)) mask_tile += torch.tensor(np.asarray(Image.open(tile_path), dtype=int)) os.remove(tile_path) NEW_TH = int(N_BINS*len(X_OVERLAP)*len(Y_OVERLAP)*TH) mask[x:x+MASK_SZ,y:y+MASK_SZ] = mask_tile>NEW_TH y += MASK_SZ x += MASK_SZ if VERBOSE: print(" > Converting to RLE...") rle = rle_encode_less_memory(mask.numpy()) del mask return rle<categorify>
pipe_xgb.fit(X, y) pipe_lgb.fit(X, y )
Tabular Playground Series - Jan 2021
14,377,327
<load_from_csv><EOS>
target = pd.DataFrame({ 'id': df_predict['id'], 'target': ensemble_predict(df_predict) }) target.to_csv('submission.csv', index=False )
Tabular Playground Series - Jan 2021
14,208,130
<SOS> metric: RMSE Kaggle data source: tabular-playground-series-jan-2021<predict_on_test>
PATH = '/kaggle/input/tabular-playground-series-jan-2021/'
Tabular Playground Series - Jan 2021
14,208,130
for idx,row in tqdm(df_sample.iterrows() ,total=len(df_sample)) : idx = row['id'] print("Computing predictions for image", idx) rle = make_predictions(idx) names.append(idx) preds.append(rle )<save_to_csv>
train = pd.read_csv(PATH+'train.csv') test = pd.read_csv(PATH+'test.csv') submission = pd.read_csv(PATH+'sample_submission.csv' )
Tabular Playground Series - Jan 2021
14,208,130
df = pd.DataFrame({'id': names, 'predicted': preds}) df.to_csv('submission.csv',index=False )<install_modules>
!pip install pycaret
Tabular Playground Series - Jan 2021
14,208,130
!pip install --no-index --find-links=.. /input/preinstall efficientnet<import_modules>
from pycaret.regression import *
Tabular Playground Series - Jan 2021
14,208,130
import numpy as np import pandas as pd import os import glob import gc from functools import partial import json import rasterio from rasterio.windows import Window import yaml import pprint import pathlib from tqdm.notebook import tqdm import cv2 import tensorflow as tf import efficientnet as efn import efficientnet.tfkeras import time<define_variables>
reg = setup(data=train, target='target', silent=True, session_id=2021 )
Tabular Playground Series - Jan 2021
14,208,130
mod_paths = ['.. /input/hubmap-ensamble-model1/','.. /input/hubmap-ensamble-model2/'] THRESHOLD = 0.5 BATCH_SIZE = 256 CHECKSUM = False<load_pretrained>
blended = blend_models(best_3, fold=5 )
Tabular Playground Series - Jan 2021
14,208,130
identity = rasterio.Affine(1, 0, 0, 0, 1, 0) fold_models = [] for mod_path in mod_paths: with open(mod_path+'params.yaml')as file: P = yaml.load(file, Loader=yaml.FullLoader) pprint.pprint(P) with open(mod_path + 'metrics.json')as json_file: M = json.load(json_file) print('Model run datetime: '+M['datetime']) print('OOF val_dice_coef: ' + str(M['oof_dice_coef'])) for fold_model_path in glob.glob(mod_path+'*.h5'): fold_models.append(tf.keras.models.load_model(fold_model_path,compile = False)) print(len(fold_models))<define_variables>
pred_holdout = predict_model(blended )
Tabular Playground Series - Jan 2021
14,208,130
WINDOW = P['TILE'] if 'TILE' in P.keys() else P['DIM_FROM'] CROP_SIZE = WINDOW//2 INPUT_SIZE = P['INPUT_SIZE']<define_variables>
final_model = finalize_model(blended )
Tabular Playground Series - Jan 2021
14,208,130
MIN_OVERLAP = WINDOW - CROP_SIZE BOARD_CUT =(WINDOW - CROP_SIZE)//2<prepare_x_and_y>
predictions = predict_model(final_model, data=test )
Tabular Playground Series - Jan 2021
14,208,130
def rle_encode_less_memory(img): pixels = np.concatenate([[False], img.T.flatten() , [False]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] return ' '.join(str(x)for x in runs) def make_grid(shape, window, min_overlap=0, board_cut = 0): step = window - min_overlap x, y = shape start_x = np.arange(0, max(x-window+board_cut,1), step, dtype=np.int64) start_y = np.arange(0, max(y-window+board_cut,1), step, dtype=np.int64) if start_x[-1] < x-window+board_cut and x-window+board_cut > 0: start_x = np.concatenate([start_x,[x-window+board_cut]]) if start_y[-1] < y-window+board_cut and y-window+board_cut > 0: start_y = np.concatenate([start_y,[y-window+board_cut]]) slices = np.zeros(( len(start_x),len(start_y), 4), dtype=np.int64) for i in range(len(start_x)) : for j in range(len(start_y)) : slices[i,j] = start_x[i], min(start_x[i]+window,x), start_y[j], min(start_y[j]+window,y) return slices.reshape(len(start_x)*len(start_y),4 )<prepare_x_and_y>
submission['target'] = predictions['Label']
Tabular Playground Series - Jan 2021
14,208,130
AUTO = tf.data.experimental.AUTOTUNE image_feature = { 'image': tf.io.FixedLenFeature([], tf.string), 'x1': tf.io.FixedLenFeature([], tf.int64), 'y1': tf.io.FixedLenFeature([], tf.int64) } def _parse_image(example_proto): example = tf.io.parse_single_example(example_proto, image_feature) image = tf.reshape(tf.io.decode_raw(example['image'],out_type=np.dtype('uint8')) ,(P['DIM'],P['DIM'], 3)) return image, example['x1'], example['y1'] def load_dataset(filenames, ordered=True): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames) dataset = dataset.with_options(ignore_order) dataset = dataset.map(_parse_image) return dataset def get_dataset(FILENAME): dataset = load_dataset(FILENAME) dataset = dataset.batch(64) dataset = dataset.prefetch(AUTO) return dataset<save_to_csv>
submission.to_csv('submission_0116_baseline.csv', index=False )
Tabular Playground Series - Jan 2021
14,282,092
submission = pd.DataFrame.from_dict(subm, orient='index') submission.to_csv('submission.csv', index=False) submission.head()<install_modules>
import lightgbm as lgb import optuna.integration.lightgbm as oplgb from sklearn.model_selection import KFold from sklearn.metrics import mean_squared_error from tqdm.notebook import tqdm import matplotlib.pyplot as plt import seaborn as sns
Tabular Playground Series - Jan 2021
14,282,092
sys.path.append(".. /input/zarrkaggleinstall") sys.path.append(".. /input/segmentation-models-pytorch-install") !pip install -q --no-deps.. /input/deepflash2-lfs <categorify>
df_train = pd.read_csv("/kaggle/input/tabular-playground-series-jan-2021/train.csv") df_test = pd.read_csv("/kaggle/input/tabular-playground-series-jan-2021/test.csv") df_sample = pd.read_csv("/kaggle/input/tabular-playground-series-jan-2021/sample_submission.csv" )
Tabular Playground Series - Jan 2021
14,282,092
def rle_encode_less_memory(img): pixels = img.T.flatten() pixels[0] = 0 pixels[-1] = 0 runs = np.where(pixels[1:] != pixels[:-1])[0] + 2 runs[1::2] -= runs[::2] return ' '.join(str(x)for x in runs) def load_model_weights(model, file, strict=True): state = torch.load(file, map_location='cpu') stats = state['stats'] model_state = state['model'] model.load_state_dict(model_state, strict=strict) return model, stats<normalization>
train_id = df_train["id"] test_id = df_test["id"] df_train.drop("id", axis=1, inplace=True) df_test.drop("id", axis=1, inplace=True )
Tabular Playground Series - Jan 2021
14,282,092
@patch def read_img(self:BaseDataset, *args, **kwargs): image = tifffile.imread(args[0]) if len(image.shape)== 5: image = image.squeeze().transpose(1, 2, 0) elif image.shape[0] == 3: image = image.transpose(1, 2, 0) return image @patch def apply(self:DeformationField, data, offset=(0, 0), pad=(0, 0), order=1): "Apply deformation field to image using interpolation" outshape = tuple(int(s - p)for(s, p)in zip(self.shape, pad)) coords = [np.squeeze(d ).astype('float32' ).reshape(*outshape)for d in self.get(offset, pad)] sl = [] for i in range(len(coords)) : cmin, cmax = int(coords[i].min()), int(coords[i].max()) dmax = data.shape[i] if cmin<0: cmax = max(-cmin, cmax) cmin = 0 elif cmax>dmax: cmin = min(cmin, 2*dmax-cmax) cmax = dmax coords[i] -= cmin else: coords[i] -= cmin sl.append(slice(cmin, cmax)) if len(data.shape)== len(self.shape)+ 1: tile = np.empty(( *outshape, data.shape[-1])) for c in range(data.shape[-1]): tile[..., c] = cv2.remap(data[sl[0],sl[1], c]/255, coords[1],coords[0], interpolation=order, borderMode=cv2.BORDER_REFLECT) else: tile = cv2.remap(data[sl[0], sl[1]], coords[1], coords[0], interpolation=order, borderMode=cv2.BORDER_REFLECT) return tile<define_variables>
feature_cols = [c for c in df_train.columns if c != "target"]
Tabular Playground Series - Jan 2021
14,282,092
class CONFIG() : data_path = Path('.. /input/hubmap-kidney-segmentation') models_path = Path('.. /input/hubmap-efficient-sampling-deepflash2-train') models_file = np.array([x for x in models_path.iterdir() if x.name.startswith('u')]) scale = 3 tile_shape =(512, 512) padding =(100,100) encoder_name = "efficientnet-b4" encoder_weights = None in_channels = 3 classes = 2 batch_size = 16 threshold = 0.5 cfg = CONFIG()<load_from_csv>
train_x = df_train[feature_cols] train_y = df_train.target test_x = df_test
Tabular Playground Series - Jan 2021
14,282,092
print(cfg.models_file) print(len(cfg.models_file)) df_sample = pd.read_csv(cfg.data_path/'sample_submission.csv', index_col='id') names,preds = [],[] sub = None<categorify>
folds = KFold(n_splits=5, shuffle=True, random_state=2021 )
Tabular Playground Series - Jan 2021
14,282,092
names,preds = [],[] for idx, _ in df_sample.iterrows() : print(f' f = cfg.data_path/'test'/f'{idx}.tiff' ds = TileDataset([f], scale=cfg.scale, tile_shape=cfg.tile_shape, padding=cfg.padding) shape = ds.data[f.name].shape print('Shape:', shape) names.append(idx) msk = None print('Prediction') for model_path in cfg.models_file: model = smp.Unet(encoder_name=cfg.encoder_name, encoder_weights=cfg.encoder_weights, in_channels=cfg.in_channels, classes=cfg.classes) model, stats = load_model_weights(model, model_path) batch_tfms = [Normalize.from_stats(*stats)] dls = DataLoaders.from_dsets(ds, batch_size=cfg.batch_size, after_batch=batch_tfms, shuffle=False, drop_last=False) if torch.cuda.is_available() : dls.cuda() , model.cuda() learn = Learner(dls, model, loss_func='') res = learn.predict_tiles(dl=dls.train, path='/kaggle/temp/', use_tta=False, uncertainty_estimates=False) if msk is None: msk = res[0][f.name][..., 1] else: msk += res[0][f.name][..., 1] del model, stats, learn msk = msk/len(cfg.models_file) msk =(msk > cfg.threshold ).astype(np.uint8) print('Rezising') msk = cv2.resize(msk,(shape[1], shape[0])) rle = rle_encode_less_memory(msk) preds.append(rle) print('Plotting') fig, ax = plt.subplots(figsize=(15,15)) ax.imshow(cv2.resize(msk,(1024, 1024))) plt.show() _ = [shutil.rmtree(p, ignore_errors=True)for p in Path('/kaggle/temp/' ).iterdir() ] _ = [shutil.rmtree(p, ignore_errors=True)for p in Path('/tmp/' ).iterdir() if p.name.startswith('zarr')]<save_to_csv>
class FoldsAverageLGBM: def __init__(self, folds): self.folds = folds self.models = [] def fit(self, lgb_params, train_x, train_y): oof_preds = np.zeros_like(train_y) self.train_x = train_x.values self.train_y = train_y.values for tr_idx, va_idx in tqdm(folds.split(train_x)) : tr_x, va_x = self.train_x[tr_idx], self.train_x[va_idx] tr_y, va_y = self.train_y[tr_idx], self.train_y[va_idx] lgb_train_dataset = lgb.Dataset(tr_x, tr_y) lgb_valid_dataset = lgb.Dataset(va_x, va_y) model = lgb.train(lgb_params, lgb_train_dataset, valid_sets=[lgb_valid_dataset], verbose_eval=100) self.models.append(model) oof_pred = model.predict(va_x) oof_preds[va_idx] = oof_pred self.oof_preds = oof_preds def predict(self, test_x): preds = [] for model in tqdm(self.models): pred = model.predict(test_x) preds.append(pred) preds = np.mean(preds, axis=0) return preds
Tabular Playground Series - Jan 2021
14,282,092
df = pd.DataFrame({'id':names,'predicted':preds} ).set_index('id') df_sample.loc[df.index.values] = df.values df_sample.to_csv('submission.csv' )<set_options>
best_lgb_params = { 'seed': 2021, 'objective': 'regression', 'metric': 'rmse', 'verbosity': -1, 'feature_pre_filter': False, 'lambda_l1': 6.540486456085813, 'lambda_l2': 0.01548480538099245, 'num_leaves': 256, 'feature_fraction': 0.52, 'bagging_fraction': 0.6161835249194311, 'bagging_freq': 7, 'min_child_samples': 20 } best_lgb_params["learning_rate"] = 0.001 best_lgb_params["early_stopping_round"] = 1000 best_lgb_params["num_iterations"] = 20000
Tabular Playground Series - Jan 2021
14,282,092
warnings.filterwarnings("ignore" )<define_variables>
folds_average_lgbm = FoldsAverageLGBM(folds )
Tabular Playground Series - Jan 2021
14,282,092
Threshold = 35<categorify>
folds_average_lgbm.fit(best_lgb_params, train_x, train_y )
Tabular Playground Series - Jan 2021
14,282,092
def rle_encode_less_memory(img): pixels = img.T.flatten() pixels[0] = 0 pixels[-1] = 0 runs = np.where(pixels[1:] != pixels[:-1])[0] + 2 runs[1::2] -= runs[::2] return ' '.join(str(x)for x in runs )<load_from_csv>
np.sqrt(mean_squared_error(df_train.target, folds_average_lgbm.oof_preds))
Tabular Playground Series - Jan 2021
14,282,092
df_sample = pd.read_csv('.. /input/hubmap-kidney-segmentation/sample_submission.csv' )<define_variables>
y_pred = folds_average_lgbm.predict(test_x )
Tabular Playground Series - Jan 2021
14,282,092
names,preds = [],[]<categorify>
sub = df_sample.copy() sub["target"] = y_pred sub.to_csv("submission_lgbm_1.csv", index=False) sub.head()
Tabular Playground Series - Jan 2021
14,207,193
for idx,row in tqdm(df_sample.iterrows() ,total=len(df_sample)) : idx = row['id'] pred1 = np.load(f"./pred_{idx}_reduce2.npz")['arr_0'].astype(np.uint8) pred2 = np.load(f"./pred_{idx}_reduce4.npz")['arr_0'].astype(np.uint8) mask =(pred1 + pred2)> 2 * Threshold rle = rle_encode_less_memory(mask) names.append(idx) preds.append(rle) del mask, pred1, pred2, rle gc.collect()<save_to_csv>
train = pd.read_csv(input_path / 'train.csv', index_col='id') display(train.head() )
Tabular Playground Series - Jan 2021
14,207,193
df = pd.DataFrame({'id':names,'predicted':preds}) df.to_csv('submission.csv',index=False )<set_options>
test = pd.read_csv(input_path / 'test.csv', index_col='id') display(test.head() )
Tabular Playground Series - Jan 2021
14,207,193
warnings.filterwarnings("ignore" )<load_from_csv>
submission = pd.read_csv(input_path / 'sample_submission.csv', index_col='id') display(submission.head() )
Tabular Playground Series - Jan 2021
14,207,193
sz = 4096 reduce = 2 TH = 0.39 DATA = '.. /input/hubmap-kidney-segmentation/test/' MODELS = [f'.. /input/pytorch-reduce2-1024-resnet101-elu/model_{i}.pth' for i in range(10)] df_sample = pd.read_csv('.. /input/hubmap-kidney-segmentation/sample_submission.csv') bs = 1 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') mean = np.array([0.64296154, 0.48859665, 0.68421074]) std = np.array([0.18233702, 0.24357342, 0.16591795] )<categorify>
!pip install pytorch-tabnet
Tabular Playground Series - Jan 2021
14,207,193
def enc2mask(encs, shape): img = np.zeros(shape[0]*shape[1], dtype=np.uint8) for m,enc in enumerate(encs): if isinstance(enc,np.float)and np.isnan(enc): continue s = enc.split() for i in range(len(s)//2): start = int(s[2*i])- 1 length = int(s[2*i+1]) img[start:start+length] = 1 + m return img.reshape(shape ).T def mask2enc(mask, n=1): pixels = mask.T.flatten() encs = [] for i in range(1,n+1): p =(pixels == i ).astype(np.int8) if p.sum() == 0: encs.append(np.nan) else: p = np.concatenate([[0], p, [0]]) runs = np.where(p[1:] != p[:-1])[0] + 1 runs[1::2] -= runs[::2] encs.append(' '.join(str(x)for x in runs)) return encs def rle_encode_less_memory(img): pixels = img.T.flatten() pixels[0] = 0 pixels[-1] = 0 runs = np.where(pixels[1:] != pixels[:-1])[0] + 2 runs[1::2] -= runs[::2] return ' '.join(str(x)for x in runs )<data_type_conversions>
features = train.columns[1:-1] X = train[features] y = np.log1p(train["target"]) X_test = test[features]
Tabular Playground Series - Jan 2021
14,207,193
s_th = 40 p_th = 1000*(sz//256)**2 identity = rasterio.Affine(1, 0, 0, 0, 1, 0) def img2tensor(img,dtype:np.dtype=np.float32): if img.ndim==2 : img = np.expand_dims(img,2) img = np.transpose(img,(2,0,1)) return torch.from_numpy(img.astype(dtype, copy=False)) class HuBMAPDataset(Dataset): def __init__(self, idx, sz=sz, reduce=reduce): self.data = rasterio.open(os.path.join(DATA,idx+'.tiff'), transform = identity, num_threads='all_cpus') if self.data.count != 3: subdatasets = self.data.subdatasets self.layers = [] if len(subdatasets)> 0: for i, subdataset in enumerate(subdatasets, 0): self.layers.append(rasterio.open(subdataset)) self.shape = self.data.shape self.reduce = reduce self.sz = reduce*sz self.pad0 =(self.sz - self.shape[0]%self.sz)%self.sz self.pad1 =(self.sz - self.shape[1]%self.sz)%self.sz self.n0max =(self.shape[0] + self.pad0)//self.sz self.n1max =(self.shape[1] + self.pad1)//self.sz def __len__(self): return self.n0max*self.n1max def __getitem__(self, idx): n0,n1 = idx//self.n1max, idx%self.n1max x0,y0 = -self.pad0//2 + n0*self.sz, -self.pad1//2 + n1*self.sz p00,p01 = max(0,x0), min(x0+self.sz,self.shape[0]) p10,p11 = max(0,y0), min(y0+self.sz,self.shape[1]) img = np.zeros(( self.sz,self.sz,3),np.uint8) if self.data.count == 3: img[(p00-x0):(p01-x0),(p10-y0):(p11-y0)] = np.moveaxis(self.data.read([1,2,3], window=Window.from_slices(( p00,p01),(p10,p11))), 0, -1) else: for i,layer in enumerate(self.layers): img[(p00-x0):(p01-x0),(p10-y0):(p11-y0),i] =\ layer.read(1,window=Window.from_slices(( p00,p01),(p10,p11))) if self.reduce != 1: img = cv2.resize(img,(self.sz//reduce,self.sz//reduce), interpolation = cv2.INTER_AREA) hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) h,s,v = cv2.split(hsv) if(s>s_th ).sum() <= p_th or img.sum() <= p_th: return img2tensor(( img/255.0 - mean)/std), -1 else: return img2tensor(( img/255.0 - mean)/std), idx<categorify>
X = X.to_numpy() y = y.to_numpy().reshape(-1, 1) X_test = X_test.to_numpy()
Tabular Playground Series - Jan 2021
14,207,193
class Model_pred: def __init__(self, models, dl, tta:bool=True, half:bool=False): self.models = models self.dl = dl self.tta = tta self.half = half def __iter__(self): count=0 with torch.no_grad() : for x,y in iter(self.dl): if(( y>=0 ).sum() > 0): x = x[y>=0].to(device) y = y[y>=0] if self.half: x = x.half() py = None for model in self.models: p = model(x) p = torch.sigmoid(p ).detach() if py is None: py = p else: py += p if self.tta: flips = [[-1],[-2],[-2,-1]] for f in flips: xf = torch.flip(x,f) for model in self.models: p = model(xf) p = torch.flip(p,f) py += torch.sigmoid(p ).detach() py /=(1+len(flips)) py /= len(self.models) py = F.upsample(py, scale_factor=reduce, mode="bilinear") py = py.permute(0,2,3,1 ).float().cpu() batch_size = len(py) for i in range(batch_size): yield py[i],y[i] count += 1 def __len__(self): return len(self.dl.dataset )<define_search_model>
kf = KFold(n_splits=5, random_state=42, shuffle=True) predictions_array =[] CV_score_array =[] for train_index, test_index in kf.split(X): X_train, X_valid = X[train_index], X[test_index] y_train, y_valid = y[train_index], y[test_index] regressor = TabNetRegressor(verbose=1,seed=42) regressor.fit(X_train=X_train, y_train=y_train, eval_set=[(X_valid, y_valid)], patience=1, max_epochs=2, eval_metric=['rmse']) CV_score_array.append(regressor.best_cost) predictions_array.append(np.expm1(regressor.predict(X_test))) predictions = np.mean(predictions_array,axis=0 )
Tabular Playground Series - Jan 2021
14,207,193
class FPN(nn.Module): def __init__(self, input_channels:list, output_channels:list): super().__init__() self.convs = nn.ModuleList( [nn.Sequential(nn.Conv2d(in_ch, out_ch*2, kernel_size=3, padding=1), nn.ELU(inplace=True), nn.BatchNorm2d(out_ch*2), nn.Conv2d(out_ch*2, out_ch, kernel_size=3, padding=1)) for in_ch, out_ch in zip(input_channels, output_channels)]) def forward(self, xs:list, last_layer): hcs = [F.interpolate(c(x),scale_factor=2**(len(self.convs)-i),mode='bilinear') for i,(c,x)in enumerate(zip(self.convs, xs)) ] hcs.append(last_layer) return torch.cat(hcs, dim=1) class UnetBlock(Module): def __init__(self, up_in_c:int, x_in_c:int, nf:int=None, blur:bool=False, self_attention:bool=False, **kwargs): super().__init__() self.shuf = PixelShuffle_ICNR(up_in_c, up_in_c//2, blur=blur, **kwargs) self.bn = nn.BatchNorm2d(x_in_c) ni = up_in_c//2 + x_in_c nf = nf if nf is not None else max(up_in_c//2,32) self.conv1 = ConvLayer(ni, nf, norm_type=None, **kwargs) self.conv2 = ConvLayer(nf, nf, norm_type=None, xtra=SelfAttention(nf)if self_attention else None, **kwargs) self.relu = nn.ELU(inplace=True) def forward(self, up_in:Tensor, left_in:Tensor)-> Tensor: s = left_in up_out = self.shuf(up_in) cat_x = self.relu(torch.cat([up_out, self.bn(s)], dim=1)) return self.conv2(self.conv1(cat_x)) class _ASPPModule(nn.Module): def __init__(self, inplanes, planes, kernel_size, padding, dilation, groups=1): super().__init__() self.atrous_conv = nn.Conv2d(inplanes, planes, kernel_size=kernel_size, stride=1, padding=padding, dilation=dilation, bias=False, groups=groups) self.bn = nn.BatchNorm2d(planes) self.relu = nn.ELU() self._init_weight() def forward(self, x): x = self.atrous_conv(x) x = self.bn(x) return self.relu(x) def _init_weight(self): for m in self.modules() : if isinstance(m, nn.Conv2d): torch.nn.init.kaiming_normal_(m.weight) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() class ASPP(nn.Module): def __init__(self, inplanes=512, mid_c=256, dilations=[6, 12, 18, 24], out_c=None): super().__init__() self.aspps = [_ASPPModule(inplanes, mid_c, 1, padding=0, dilation=1)] + \ [_ASPPModule(inplanes, mid_c, 3, padding=d, dilation=d,groups=4)for d in dilations] self.aspps = nn.ModuleList(self.aspps) self.global_pool = nn.Sequential(nn.AdaptiveMaxPool2d(( 1, 1)) , nn.Conv2d(inplanes, mid_c, 1, stride=1, bias=False), nn.BatchNorm2d(mid_c), nn.ELU()) out_c = out_c if out_c is not None else mid_c self.out_conv = nn.Sequential(nn.Conv2d(mid_c*(2+len(dilations)) , out_c, 1, bias=False), nn.BatchNorm2d(out_c), nn.ELU(inplace=True)) self.conv1 = nn.Conv2d(mid_c*(2+len(dilations)) , out_c, 1, bias=False) self._init_weight() def forward(self, x): x0 = self.global_pool(x) xs = [aspp(x)for aspp in self.aspps] x0 = F.interpolate(x0, size=xs[0].size() [2:], mode='bilinear', align_corners=True) x = torch.cat([x0] + xs, dim=1) return self.out_conv(x) def _init_weight(self): for m in self.modules() : if isinstance(m, nn.Conv2d): torch.nn.init.kaiming_normal_(m.weight) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_()<choose_model_class>
print("The CV score is %.5f" % np.mean(CV_score_array,axis=0))
Tabular Playground Series - Jan 2021
14,207,193
class UneXt50(nn.Module): def __init__(self, stride=1, **kwargs): super().__init__() m = ResNet(Bottleneck, [3, 4, 23, 3], groups=32, width_per_group=4) self.enc0 = nn.Sequential(m.conv1, m.bn1, nn.ELU(inplace=True)) self.enc1 = nn.Sequential(nn.MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1), m.layer1) self.enc2 = m.layer2 self.enc3 = m.layer3 self.enc4 = m.layer4 self.aspp = ASPP(2048,256,out_c=512,dilations=[stride*1,stride*2,stride*3,stride*4]) self.drop_aspp = nn.Dropout2d(0.5) self.dec4 = UnetBlock(512,1024,256) self.dec3 = UnetBlock(256,512,128) self.dec2 = UnetBlock(128,256,64) self.dec1 = UnetBlock(64,64,32) self.fpn = FPN([512,256,128,64],[16]*4) self.drop = nn.Dropout2d(0.1) self.final_conv = ConvLayer(32+16*4, 1, ks=1, norm_type=None, act_cls=None) def forward(self, x): enc0 = self.enc0(x) enc1 = self.enc1(enc0) enc2 = self.enc2(enc1) enc3 = self.enc3(enc2) enc4 = self.enc4(enc3) enc5 = self.aspp(enc4) dec3 = self.dec4(self.drop_aspp(enc5),enc3) dec2 = self.dec3(dec3,enc2) dec1 = self.dec2(dec2,enc1) dec0 = self.dec1(dec1,enc0) x = self.fpn([enc5, dec3, dec2, dec1], dec0) x = self.final_conv(self.drop(x)) x = F.interpolate(x,scale_factor=2,mode='bilinear') return x<train_model>
submission.iloc[:,0:] = predictions submission.to_csv('submission.csv' )
Tabular Playground Series - Jan 2021
14,139,468
models = [] for path in MODELS: state_dict = torch.load(path,map_location=torch.device('cpu')) model = UneXt50() model.load_state_dict(state_dict) model.float() model.eval() model.to(device) models.append(model) del state_dict<categorify>
from catboost import CatBoostRegressor
Tabular Playground Series - Jan 2021
14,139,468
names,preds = [],[] for idx,row in tqdm(df_sample.iterrows() ,total=len(df_sample)) : idx = row['id'] ds = HuBMAPDataset(idx) dl = DataLoader(ds,bs,num_workers=0,shuffle=False,pin_memory=True) mp = Model_pred(models,dl) mask = torch.zeros(len(ds),ds.sz,ds.sz,dtype=torch.int8) for p,i in iter(mp): mask[i.item() ] = p.squeeze(-1)> TH mask = mask.view(ds.n0max,ds.n1max,ds.sz,ds.sz ).\ permute(0,2,1,3 ).reshape(ds.n0max*ds.sz,ds.n1max*ds.sz) mask = mask[ds.pad0//2:-(ds.pad0-ds.pad0//2)if ds.pad0 > 0 else ds.n0max*ds.sz, ds.pad1//2:-(ds.pad1-ds.pad1//2)if ds.pad1 > 0 else ds.n1max*ds.sz] rle = rle_encode_less_memory(mask.numpy()) names.append(idx) preds.append(rle) del mask, ds, dl gc.collect()<save_to_csv>
df_train = pd.read_csv('/kaggle/input/tabular-playground-series-jan-2021/train.csv') y = df_train['target'] df_train.drop(['id', 'target'], axis = 1, inplace = True) df_test = pd.read_csv('/kaggle/input/tabular-playground-series-jan-2021/test.csv') sub_id = df_test['id'] df_test.drop('id', axis = 1, inplace = True )
Tabular Playground Series - Jan 2021
14,139,468
df = pd.DataFrame({'id':names,'predicted':preds}) df.to_csv('submission.csv',index=False )<install_modules>
cbr = CatBoostRegressor() cbr.fit(df_train, y )
Tabular Playground Series - Jan 2021
14,139,468
!pip install.. /input/segmentationmodelspytorch-013/pretrainedmodels-0.7.4-py3-none-any.whl !pip install.. /input/segmentationmodelspytorch-013/efficientnet_pytorch-0.6.3-py2.py3-none-any.whl !pip install.. /input/segmentationmodelspytorch-013/timm-0.3.2-py3-none-any.whl !pip install.. /input/segmentationmodelspytorch-013/segmentation_models_pytorch-0.1.3-py3-none-any.whl clear_output()<load_from_csv>
submission = pd.DataFrame(sub_id, columns = ['id']) submission.head()
Tabular Playground Series - Jan 2021
14,139,468
sample_submission = pd.read_csv('.. /input/hubmap-kidney-segmentation/sample_submission.csv') sample_submission = sample_submission.set_index('id') seed = 1015 np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') def rle_encode_less_memory(img): pixels = img.T.flatten() pixels[0] = 0 pixels[-1] = 0 runs = np.where(pixels[1:] != pixels[:-1])[0] + 2 runs[1::2] -= runs[::2] return ' '.join(str(x)for x in runs) test_files = sample_submission.index.tolist()<define_variables>
submission['target'] = cbr.predict(df_test )
Tabular Playground Series - Jan 2021
14,139,468
PATH = ".. /input/hubmap-models2" model_list = ['1_unet-se_resnet50_0.9526_epoch_28.pth', '2_unet-se_resnet50_0.9494_epoch_28.pth', '1_unet-timm-effb0_0.9495_epoch_39.pth', '2_unet-timm-effb0_0.9477_epoch_35.pth', '1_unet-timm-resnest26d_0.9522_epoch_28.pth', '1_unet-se_resnet50_pesudo_0.9572_epoch_26.pth', '1_unet-timm-resnest26d-pesudo_0.9563_epoch_39.pth']<define_variables>
submission.to_csv('catboost.csv', index = False )
Tabular Playground Series - Jan 2021
14,220,134
model_path = list(map(lambda x: os.path.join(PATH, x), model_list))<train_model>
mpl.rcParams['agg.path.chunksize'] = 10000
Tabular Playground Series - Jan 2021
14,220,134
models = [] for path in model_path: model = torch.load(path, map_location= 'cuda') model.float() model.eval() model.to('cuda') models.append(model) del model<define_variables>
train_data = pd.read_csv('/kaggle/input/tabular-playground-series-jan-2021/train.csv') test_data = pd.read_csv('/kaggle/input/tabular-playground-series-jan-2021/test.csv') print("successfully loaded!" )
Tabular Playground Series - Jan 2021
14,220,134
sz = 512 test_path = '.. /input/hubmap-kidney-segmentation/test/' for step, person_idx in enumerate(test_files): print(f'load {step+1}/{len(test_files)} data...') img = tiff.imread(test_path + person_idx + '.tiff' ).squeeze() if img.shape[0] == 3: img = img.transpose(1,2,0) predict_mask_l1 = np.zeros(( img.shape[0], img.shape[1]), dtype = bool) landscape =img.shape[0]// 512 portrait = img.shape[1]// 512 sz = 512 print('predict mask...') for x in tqdm(range(landscape)) : for y in range(portrait): start_x =(512)* x end_x =(1024)+ start_x start_y =(512)* y end_y =(1024)+ start_y if x == landscape-1: start_x = img.shape[0] - 1024 end_x = img.shape[0] if y == portrait-1: start_y = img.shape[1] - 1024 end_y = img.shape[1] sample_img = img[start_x : end_x, start_y : end_y,:] sample_img = cv2.resize(sample_img,(sz,sz),interpolation = cv2.INTER_AREA)/256 sample_img = torch.cuda.FloatTensor(sample_img.transpose([2,0,1])[np.newaxis,...]) with torch.no_grad() : sample_pred = models[0].predict(sample_img ).cpu().numpy() [0,0,:,:] sample_pred = cv2.resize(sample_pred,(1024,1024),interpolation = cv2.INTER_NEAREST) sample_pred = np.where(sample_pred > 0.1, True, False ).astype(bool) predict_mask_l1[start_x + 256 : end_x - 256, start_y + 256 : end_y - 256] = sample_pred[256:256 + 512,256:256 + 512] del sample_img del sample_pred gc.collect() predict_mask_l1 = predict_mask_l1.astype(np.uint8) contours, hierarchy = cv2.findContours(predict_mask_l1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) predict_mask_l2 = np.zeros(( img.shape[0], img.shape[1]), dtype = bool) for cont in tqdm(contours): center_y, center_x = cont.mean(axis = 0 ).round(0 ).astype(int)[0] left_x = int(center_x - 512) top_y = int(center_y - 512) if left_x < 0: left_x = 0 elif left_x + 1024 > img.shape[0]: left_x = img.shape[0] - 1024 if top_y < 0: top_y = 0 elif top_y + 1024 > img.shape[1]: top_y = img.shape[1] - 1024 sample_img_l2 = img[left_x : left_x + 1024, top_y : top_y+ 1024,:] sample_img_l2 = cv2.resize(sample_img_l2,(sz,sz),interpolation = cv2.INTER_AREA)/256 sample_img_l2 = torch.cuda.FloatTensor(sample_img_l2.transpose([2,0,1])[np.newaxis,...]) sample_pred_l2 = None with torch.no_grad() : for model in models: pred_l2 = model.predict(sample_img_l2 ).cpu().numpy() [0,0,:,:] if sample_pred_l2 is None: sample_pred_l2 = pred_l2 sample_pred_l2 += pred_l2 sample_pred_l2 /= len(models) sample_pred_l2 = cv2.resize(sample_pred_l2,(1024,1024),interpolation = cv2.INTER_NEAREST) sample_pred_l2 = np.where(sample_pred_l2 > 0.3, True, False ).astype(np.uint8) contours_l2, hierarchy = cv2.findContours(sample_pred_l2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) if len(contours_l2)< 1: print('no cotour') continue for cont_l2 in contours_l2: area = cv2.contourArea(cont_l2) min_y, min_x = cont_l2.min(axis = 0 ).round(0 ).astype(int)[0] max_y, max_x = cont_l2.max(axis = 0 ).round(0 ).astype(int)[0] if(min_x < 512)and(max_x > 512): if(min_y < 512)and(max_y > 512): sample_mask_l2 = np.zeros(sample_pred_l2.shape, dtype = np.uint8) sample_center = cv2.drawContours(sample_mask_l2, [cont_l2], 0, (255, 255, 255), -1) predict_mask_l2[left_x : left_x + 1024, top_y : top_y+ 1024] =\ np.logical_or(predict_mask_l2[left_x : left_x + 1024, top_y : top_y+ 1024], sample_center) del predict_mask_l1 del img gc.collect() print('convert mask to rle ') predict_rle = rle_encode_less_memory(predict_mask_l2) sample_submission.loc[person_idx,'predicted'] = predict_rle del predict_rle del predict_mask_l2 gc.collect() sample_submission.reset_index().to_csv('/kaggle/working/submission.csv',index=False) sample_submission.reset_index().to_csv('submission.csv', index = False) sample_submission = sample_submission.reset_index() sample_submission.to_csv('/kaggle/working/submission.csv',index=False) sample_submission.to_csv('submission.csv', index = False) sample_submission<load_from_csv>
outlier = train_data.loc[train_data.target < 1.0] print(outlier )
Tabular Playground Series - Jan 2021
14,220,134
sample_sub = pd.read_csv('.. /input/hubmap-kidney-segmentation/sample_submission.csv', index_col='id') submission = pd.read_csv('.. /input/my-csv-outputs/d488c759a_single_mask.csv', index_col='id' )<save_to_csv>
train_data.drop([170514], inplace = True )
Tabular Playground Series - Jan 2021
14,220,134
pub_ids = submission.index.values predictions = submission.values sample_sub.loc[pub_ids] = predictions sample_sub.to_csv('submission.csv') <load_pretrained>
y_train = train_data["target"] train_data.drop(columns = ["target"], inplace = True )
Tabular Playground Series - Jan 2021
14,220,134
df = pd.read_pickle('.. /input/preprocessingdata/df.pkl' )<prepare_x_and_y>
params = { 'n_estimators' : [1500, 2000, 2500], 'learning_rate' : [0.01, 0.02] } xgb = XGBRegressor( objective = 'reg:squarederror', subsample = 0.8, colsample_bytree = 0.8, learning_rate = 0.01, tree_method = 'gpu_hist') grid_search = GridSearchCV(xgb, param_grid = params, scoring = 'neg_root_mean_squared_error', n_jobs = -1, verbose = 10) grid_search.fit(train_data, y_train) print(' Best estimator:') print(grid_search.best_estimator_) print(' Best score:') print(grid_search.best_score_) print(' Best hyperparameters:') print(grid_search.best_params_ )
Tabular Playground Series - Jan 2021
14,220,134
X_train = df[df.date_block_num < 33].drop(['item_cnt_month'], axis=1) Y_train = df[df.date_block_num < 33]['item_cnt_month'] X_valid = df[df.date_block_num == 33].drop(['item_cnt_month'], axis=1) Y_valid = df[df.date_block_num == 33]['item_cnt_month'] X_test = df[df.date_block_num == 34].drop(['item_cnt_month'], axis=1) del df<create_dataframe>
clf = XGBRegressor( objective = 'reg:squarederror', subsample = 0.8, learning_rate = 0.02, max_depth = 7, n_estimators = 2500, tree_method = 'gpu_hist') clf.fit(train_data, y_train) y_pred_xgb = clf.predict(test_data) print(y_pred_xgb )
Tabular Playground Series - Jan 2021
14,220,134
feature_name = X_train.columns.tolist() feature_name_indexes = [ 'country_part', 'item_category_common', 'item_category_code', 'city_code', ] def objective(trial): lgb_train = lgb.Dataset(X_train[feature_name], Y_train) lgb_eval = lgb.Dataset(X_valid[feature_name], Y_valid, reference=lgb_train) params = { 'objective': 'rmse', 'metric': 'rmse', 'num_leaves': trial.suggest_int('num_leaves', 1000, 1500), 'min_data_in_leaf':10, 'feature_fraction':trial.suggest_uniform('feature_fraction', 0.6, 0.8), 'learning_rate': trial.suggest_uniform('feature_fraction', 0.01, 0.015), 'num_rounds': 1000, 'early_stopping_rounds': 30, 'seed': 1 } evals_result = {} gbm = lgb.train( params, lgb_train, num_boost_round=3000, valid_sets=(lgb_train, lgb_eval), feature_name = feature_name, categorical_feature = feature_name_indexes, verbose_eval=50, evals_result = evals_result, ) preds = gbm.predict(X_valid) loss = sklearn.metrics.mean_squared_error(Y_valid, preds) return loss <find_best_params>
solution = pd.DataFrame({"id":test_data.id, "target":y_pred_xgb}) solution.to_csv("solution.csv", index = False) print("saved successful!" )
Tabular Playground Series - Jan 2021
14,055,870
study = optuna.create_study(direction='minimize') study.optimize(objective, n_trials=50) print('Number of finished trials:', len(study.trials)) print('Best trial:', study.best_trial.params )<init_hyperparams>
train = pd.read_csv(input_path / 'train.csv', index_col='id') test = pd.read_csv(input_path / 'test.csv', index_col='id') submission = pd.read_csv(input_path / 'sample_submission.csv', index_col='id' )
Tabular Playground Series - Jan 2021
14,055,870
params = { 'objective': 'rmse', 'metric': 'rmse', 'num_leaves': 1012, 'min_data_in_leaf':10, 'feature_fraction':0.622351664881, 'learning_rate': 0.01, 'num_rounds': 1000, 'early_stopping_rounds': 30, 'seed': 1 } feature_name_indexes = [ 'country_part', 'item_category_common', 'item_category_code', 'city_code', ] lgb_train = lgb.Dataset(X_train[feature_name], Y_train) lgb_eval = lgb.Dataset(X_valid[feature_name], Y_valid, reference=lgb_train) evals_result = {} gbm = lgb.train( params, lgb_train, num_boost_round=3000, valid_sets=(lgb_train, lgb_eval), feature_name = feature_name, categorical_feature = feature_name_indexes, verbose_eval=50, evals_result = evals_result, )<save_to_csv>
target = train.pop('target') X_train, X_test, y_train, y_test = train_test_split(train, target, train_size=0.8 )
Tabular Playground Series - Jan 2021
14,055,870
test = pd.read_csv('.. /input/competitive-data-science-predict-future-sales/test.csv') Y_test = gbm.predict(X_test[feature_name] ).clip(0, 20) submission = pd.DataFrame({ "ID": test.index, "item_cnt_month": Y_test }) submission.to_csv('gbm_submission.csv', index=False )<load_from_csv>
parameters = { 'n_estimators': 350, 'tree_method': 'hist', 'learning_rate': 0.03, 'colsample_bytree': 0.9, 'subsample': 0.9, 'min_child_weight': 9, 'max_depth': 11, 'n_jobs': -1 }
Tabular Playground Series - Jan 2021
14,055,870
categories = pd.read_csv(".. /input/eng-translations/categories_eng.csv") items = pd.read_csv(".. /input/eng-translations/items_eng.csv") sales = pd.read_csv(".. /input/competitive-data-science-predict-future-sales/sales_train.csv") test = pd.read_csv(".. /input/competitive-data-science-predict-future-sales/test.csv") shops = pd.read_csv(".. /input/eng-translations/shops_eng.csv") submission = pd.read_csv(".. /input/competitive-data-science-predict-future-sales/sample_submission.csv" )<data_type_conversions>
parameters2 = { 'n_estimators': 350, 'tree_method': 'exact', 'learning_rate': 0.03, 'colsample_bytree': 0.9, 'subsample': 0.9, 'min_child_weight': 9, 'max_depth': 11, 'n_jobs': -1 }
Tabular Playground Series - Jan 2021
14,055,870
def downcast1(df, verbose=True): start_mem = df.memory_usage().sum() / 1024**2 for col in df.columns: dtype_name = df[col].dtype.name if dtype_name == 'object': pass elif dtype_name == 'bool': df[col] = df[col].astype('int8') elif dtype_name.startswith('int')or(df[col].round() == df[col] ).all() : df[col] = pd.to_numeric(df[col], downcast='integer') else: df[col] = pd.to_numeric(df[col], downcast='float') end_mem = df.memory_usage().sum() / 1024**2 if verbose: print('{:.1f}% compressed'.format(100 *(start_mem - end_mem)/ start_mem)) return df<define_variables>
Tabular Playground Series - Jan 2021
14,055,870
def cleans(i): pattern = r'[A-Za-z0-9]+' finds = re.findall(pattern, str(i)) stringy = "" for j in finds: stringy += f" {j}" return stringy<feature_engineering>
Tabular Playground Series - Jan 2021
14,055,870
shops["clean"] = shops["shop_name"].apply(cleans) shops.head()<feature_engineering>
final_model = XGBRegressor(tree_method='hist', min_child_weight=9, max_depth=11, n_jobs=-1, colsample_bytree=0.5, learning_rate=0.01, n_estimators=1500) final_model.fit(X_train, y_train, early_stopping_rounds=10, eval_set=[(X_test, y_test)], verbose=False) prediction = final_model.predict(X_test) mse = mean_squared_error(y_test, prediction,squared=False) print(mse)
Tabular Playground Series - Jan 2021
14,055,870
sales.loc[sales["shop_id"]==0, "shop_id"] = 57 sales.loc[sales["shop_id"]==1, "shop_id"] = 58 sales.loc[sales["shop_id"]==10, "shop_id"] = 11 sales.loc[sales["shop_id"]==39, "shop_id"] = 40 test.loc[test['shop_id'] == 0, 'shop_id'] = 57 test.loc[test['shop_id'] == 1, 'shop_id'] = 58 test.loc[test['shop_id'] == 10, 'shop_id'] = 11 test.loc[test['shop_id'] == 39, 'shop_id'] = 40<count_unique_values>
submission['target'] = final_model.predict(test) submission.to_csv('xgb_reg.csv' )
Tabular Playground Series - Jan 2021
14,162,481
unique_test_shops = test["shop_id"].unique() sales = sales[sales["shop_id"].isin(unique_test_shops)] print(f"Number of Unique Shops in Test Data:{len(unique_test_shops)} Number of Unique Shops in Sales Data:{len(sales['shop_id'].unique())}" )<drop_column>
import matplotlib.pyplot as plt import seaborn as sns from matplotlib_venn import venn2 import shap from optuna.integration import _lightgbm_tuner as lgb_tuner import optuna from catboost import CatBoost from catboost import Pool from catboost import cv import category_encoders as ce from tqdm import tqdm import lightgbm as lgb import xgboost as xgb import joblib import os import logging import datetime from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error from sklearn.model_selection import KFold from sklearn.model_selection import StratifiedKFold from sklearn import preprocessing from sklearn.preprocessing import LabelEncoder from sklearn.decomposition import PCA from scipy import stats from sklearn.preprocessing import StandardScaler from sklearn import metrics from catboost import CatBoostRegressor
Tabular Playground Series - Jan 2021
14,162,481
shops.drop("shop_name", axis=1, inplace=True )<feature_engineering>
df_train = pd.read_csv("/kaggle/input/tabular-playground-series-jan-2021/train.csv") df_test = pd.read_csv("/kaggle/input/tabular-playground-series-jan-2021/test.csv") submission = pd.read_csv("/kaggle/input/tabular-playground-series-jan-2021/sample_submission.csv" )
Tabular Playground Series - Jan 2021
14,162,481
shops["city"] = shops["clean"].apply(lambda x: x.split() [0] )<categorify>
y = df_train["target"] X = df_train.drop(["target","id"], axis=1 )
Tabular Playground Series - Jan 2021
14,162,481
le = LabelEncoder() shops["city"] = le.fit_transform(shops["city"]) shops.drop("clean", axis=1, inplace=True) <feature_engineering>
fold_num = 10 EARLY_STOPPING_ROUNDS = 10 VERBOSE_EVAL = 10000 LGB_ROUND_NUM = 10000 objective = 'regression' metric = 'rmse' params = { 'task': 'train', 'boosting_type': 'gbdt', 'objective': objective, 'metric': metric, 'verbosity': -1, "seed": 42, } @contextmanager def timer(logger=None, format_str='{:.3f}[s]', prefix=None, suffix=None): if prefix: format_str = str(prefix)+ format_str if suffix: format_str = format_str + str(suffix) start = time() yield d = time() - start out_str = format_str.format(d) if logger: logger.info(out_str) else: print(out_str) def fit_lgbm(X, y, cv, params: dict=None, verbose: int=50): metric_func = mean_squared_error if params is None: params = {} models = [] oof_pred = np.zeros_like(y, dtype=np.float) for i,(idx_train, idx_valid)in enumerate(cv): x_train, y_train = X[idx_train], y[idx_train] x_valid, y_valid = X[idx_valid], y[idx_valid] lgb_train = lgb.Dataset(x_train, y_train) lgb_valid = lgb.Dataset(x_valid, y_valid) with timer(prefix='fit fold={} '.format(i + 1)) : lgb_model = lgb_tuner.train(params, lgb_train, num_boost_round=LGB_ROUND_NUM, valid_names=["train", "valid"], valid_sets=[lgb_train, lgb_valid], early_stopping_rounds=EARLY_STOPPING_ROUNDS, verbose_eval=VERBOSE_EVAL) pred_i = lgb_model.predict(x_valid) oof_pred[idx_valid] = pred_i models.append(lgb_model) print(f'Fold {i} RMSE: {metric_func(y_valid, pred_i)**.5:.4f}') score = metric_func(y, oof_pred)**.5 print('FINISHED | Whole RMSE: {:.4f}'.format(score)) return oof_pred, models
Tabular Playground Series - Jan 2021
14,162,481
items["item_name"] = items["item_name"].str.lower() items["item_name_clean"] = items["item_name"].apply(cleans) items.drop("item_name", axis=1, inplace=True )<categorify>
fold = KFold(n_splits=5, shuffle=True, random_state=71) cv = list(fold.split(X, y)) oof, models = fit_lgbm(X.values, y, cv, params=params )
Tabular Playground Series - Jan 2021
14,162,481
items["item_name_five"] = [x[:5] for x in items["item_name_clean"]] items["item_name_five"] = le.fit_transform(items["item_name_five"]) items.drop("item_name_clean", axis=1, inplace=True )<groupby>
def visualize_importance(models, feat_train_df): feature_importance_df = pd.DataFrame() for i, model in enumerate(models): _df = pd.DataFrame() _df['feature_importance'] = model.feature_importance() _df['column'] = feat_train_df.columns _df['fold'] = i + 1 feature_importance_df = pd.concat([feature_importance_df, _df], axis=0, ignore_index=True) order = feature_importance_df.groupby('column')\ .sum() [['feature_importance']]\ .sort_values('feature_importance', ascending=False ).index[:50] fig, ax = plt.subplots(figsize=(max(6, len(order)*.4), 7)) sns.boxenplot(data=feature_importance_df, x='column', y='feature_importance', order=order, ax=ax, palette='viridis') ax.tick_params(axis='x', rotation=90) ax.grid() fig.tight_layout() return fig, ax
Tabular Playground Series - Jan 2021
14,162,481
items["first_sale_date"] = sales.groupby("item_id" ).agg({"date_block_num":"min"})["date_block_num"] items<data_type_conversions>
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8) def opt(trial): n_estimators = trial.suggest_int('n_estimators', 0, 1000) max_depth = trial.suggest_int('max_depth', 1, 20) learning_rate = trial.suggest_discrete_uniform('learning_rate', 0.01,0.1,0.01) min_child_weight = trial.suggest_int('min_child_weight', 1, 20) subsample = trial.suggest_discrete_uniform('subsample', 0.5, 0.9, 0.1) colsample_bytree = trial.suggest_discrete_uniform('colsample_bytree', 0.5, 0.9, 0.1) xgboost_tuna = xgb.XGBRegressor( random_state=42, n_estimators = n_estimators, max_depth = max_depth, min_child_weight = min_child_weight, subsample = subsample, colsample_bytree = colsample_bytree, ) xgboost_tuna.fit(X_train,y_train) tuna_pred_test = xgboost_tuna.predict(X_test) return mean_squared_error(y_test, tuna_pred_test )
Tabular Playground Series - Jan 2021
14,162,481
items[items["first_sale_date"].isna() ] items["first_sale_date"] = items["first_sale_date"].fillna(34 )<feature_engineering>
def fit_xgb(X, y, cv, params: dict=None, verbose: int=50): metric_func = mean_squared_error if params is None: params = {} models = [] oof_pred = np.zeros_like(y, dtype=np.float) for i,(idx_train, idx_valid)in enumerate(cv): x_train, y_train = X[idx_train], y[idx_train] x_valid, y_valid = X[idx_valid], y[idx_valid] model_xgb = xgb.XGBRegressor(**params) with timer(prefix='fit fold={} '.format(i + 1)) : model_xgb.fit(x_train, y_train, eval_set=[(x_valid, y_valid)],verbose=-1) pred_i = model_xgb.predict(x_valid) oof_pred[idx_valid] = pred_i models.append(model_xgb) print(f'Fold {i} RMSE: {metric_func(y_valid, pred_i)**.5:.4f}') score = metric_func(y, oof_pred)**.5 print('FINISHED | Whole RMSE: {:.4f}'.format(score)) return oof_pred, models
Tabular Playground Series - Jan 2021
14,162,481
categories["category"] = categories["category_name"].apply(lambda x: x.split() [0]) categories<count_values>
params_xgb = {'n_estimators': 208, 'max_depth': 4, 'learning_rate':0.08, 'min_child_weight': 13, 'subsample': 0.8, 'colsample_bytree': 0.8} oof_xgb, models_xgb = fit_xgb(X.values, y, cv, params=params_xgb )
Tabular Playground Series - Jan 2021
14,162,481
categories["category"].value_counts()<feature_engineering>
def opt_cb(trial): params = { 'iterations' : trial.suggest_int('iterations', 50, 300), 'depth' : trial.suggest_int('depth', 4, 10), 'learning_rate' : trial.suggest_loguniform('learning_rate', 0.01, 0.3), 'random_strength' :trial.suggest_int('random_strength', 0, 100), 'bagging_temperature' :trial.suggest_loguniform('bagging_temperature', 0.01, 100.00), 'od_type': trial.suggest_categorical('od_type', ['IncToDec', 'Iter']), 'od_wait' :trial.suggest_int('od_wait', 10, 50) } train_pool = Pool(X_train, y_train) test_pool = Pool(X_test, y_test) catboost_tuna = CatBoostRegressor(**params) catboost_tuna.fit(train_pool) tuna_pred_test = catboost_tuna.predict(test_pool) pred_labels = np.rint(tuna_pred_test) return mean_squared_error(y_test, pred_labels )
Tabular Playground Series - Jan 2021
14,162,481
categories.loc[categories["category"] == "Game"] = "Games"<feature_engineering>
def fit_cb(X, y, cv, params: dict=None, verbose: int=50): metric_func = mean_squared_error if params is None: params = {} models = [] oof_pred = np.zeros_like(y, dtype=np.float) for i,(idx_train, idx_valid)in enumerate(cv): x_train, y_train = X[idx_train], y[idx_train] x_valid, y_valid = X[idx_valid], y[idx_valid] train_pool = Pool(x_train, label = y_train) valid_pool = Pool(x_valid, label = y_valid) model_cb = CatBoost(params) with timer(prefix='fit fold={} '.format(i + 1)) : model_cb.fit(train_pool, eval_set = valid_pool, use_best_model = True, silent = True, plot = False) print(model_cb.get_best_score()) pred_i = model_cb.predict(x_valid) oof_pred[idx_valid] = pred_i models.append(model_cb) print(f'Fold {i} RMSE: {metric_func(y_valid, pred_i)**.5:.4f}') score = metric_func(y, oof_pred)**.5 print('FINISHED | Whole RMSE: {:.4f}'.format(score)) return oof_pred, models
Tabular Playground Series - Jan 2021
14,162,481
def make_misc(x): if len(categories[categories['category']==x])>= 5: return x else: return 'Misc' categories["cats"] = categories["category"].apply(make_misc) categories<drop_column>
params_cb = { 'loss_function': 'RMSE', 'max_depth': 3, 'learning_rate': 0.08, 'subsample': 0.8, 'num_boost_round': 1000, 'early_stopping_rounds': 100, } oof_cb, models_cb = fit_cb(X.values, y, cv, params=params_cb )
Tabular Playground Series - Jan 2021
14,162,481
categories.drop(["category", "category_name"], axis=1, inplace=True )<drop_column>
df_test = df_test.drop("id",axis=1 )
Tabular Playground Series - Jan 2021
14,162,481
categories["cats_le"] = le.fit_transform(categories["cats"]) categories.drop("cats", inplace=True, axis=1 )<feature_engineering>
pred_lgb = np.array([model.predict(df_test.values)for model in models]) pred_lgb = np.mean(pred_lgb, axis=0) pred_lgb = np.where(pred_lgb < 0, 0, pred_lgb) pred_xgb = np.array([model.predict(df_test.values)for model in models_xgb]) pred_xgb = np.mean(pred_xgb, axis=0) pred_xgb = np.where(pred_xgb < 0, 0, pred_xgb) pred_cb = np.array([model.predict(df_test.values)for model in models_cb]) pred_cb = np.mean(pred_cb, axis=0) pred_cb = np.where(pred_cb < 0, 0, pred_cb) tmp_sub = pd.DataFrame({"lgb":pred_lgb, "xgb":pred_xgb, "cb":pred_cb}) tmp_sub["pred"] = tmp_sub.mean(axis="columns" )
Tabular Playground Series - Jan 2021
14,162,481
sales = sales[sales["item_price"] > 0] sales = sales[sales["item_price"] < 50000] sales = sales[sales["item_cnt_day"] > 0] sales = sales[sales["item_cnt_day"] < 1000] sales["item_price"] = sales["item_price"].apply(lambda x: round(x,2)) sales<merge>
submission["target"] = tmp_sub["pred"].copy()
Tabular Playground Series - Jan 2021
14,162,481
group = sales.groupby(index_feats ).agg({"item_cnt_day": "sum"}) group = group.reset_index() group = group.rename(columns={"item_cnt_day": "item_cnt_month"}) train = pd.merge(train, group, on=index_feats, how="left") train<set_options>
submission.to_csv("submission.csv", index=False )
Tabular Playground Series - Jan 2021