kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
14,163,466 | predictions = inference(model, test_loader )<save_to_csv> | features = [f'cont{x}'for x in range(1,15)]
target = ['target'] | Tabular Playground Series - Jan 2021 |
14,163,466 | test['label'] = predictions.argmax(1)
test[['image_id', 'label']].to_csv(OUTPUT_DIR + 'submission.csv', index=False )<define_variables> | all_data = train[features+['id']].append(test ) | Tabular Playground Series - Jan 2021 |
14,163,466 | BATCH_SIZE = 1
image_size = 512
enet_type = ['tf_efficientnet_b4_ns'] * 5
model_path = ['.. /input/moa-b4-baseline/baseline_cld_fold0_epoch8_tf_efficientnet_b4_ns_512.pth',
'.. /input/moa-b4-baseline/baseline_cld_fold1_epoch9_tf_efficientnet_b4_ns_512.pth',
'.. /input/moa-b4-baseline/baseline_cld_fold2_epoch9_tf_efficientnet_b4_ns_512.pth',
'.. /input/moa-b4-baseline/baseline_cld_fold3_epoch5_tf_efficientnet_b4_ns_512.pth',
'.. /input/moa-b4-baseline/baseline_cld_fold4_epoch11_tf_efficientnet_b4_ns_512.pth']<normalization> | lesser,greater = train[train['target'] <= train['target'].median() ],train[train['target'] >= train['target'].median() ] | Tabular Playground Series - Jan 2021 |
14,163,466 | transforms_valid = albumentations.Compose([
albumentations.CenterCrop(image_size, image_size, p=1),
albumentations.Resize(image_size, image_size),
albumentations.Normalize()
] )<define_variables> | train = train[train['target'] > 5] | Tabular Playground Series - Jan 2021 |
14,163,466 | OUTPUT_DIR = './'
MODEL_DIR = '.. /input/cassava-resnext50-32x4d-weights/'
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
TRAIN_PATH = '.. /input/cassava-leaf-disease-classification/train_images'
TEST_PATH = '.. /input/cassava-leaf-disease-classification/test_images'<define_search_space> | def rmse_score(yreal, yhat):
return sqrt(mean_squared_error(yreal, yhat)) | Tabular Playground Series - Jan 2021 |
14,163,466 | class CFG:
debug=False
num_workers=8
model_name='resnext50_32x4d'
size=512
batch_size=32
seed=2020
target_size=5
target_col='label'
n_fold=5
trn_fold=[0, 1, 2, 3, 4]
inference=True<load_from_csv> | from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import lightgbm as lgb
import optuna
from math import sqrt | Tabular Playground Series - Jan 2021 |
14,163,466 | test = pd.read_csv('.. /input/cassava-leaf-disease-classification/sample_submission.csv')
test['filepath'] = test.image_id.apply(lambda x: os.path.join('.. /input/cassava-leaf-disease-classification/test_images', f'{x}'))
<create_dataframe> | train_ds = train.loc[:,features]
y_train = train.loc[:,target]
val_ds = test.loc[:,features] | Tabular Playground Series - Jan 2021 |
14,163,466 | test_dataset_efficient = CLDDataset(test, 'test', transform=transforms_valid)
test_loader_efficient = torch.utils.data.DataLoader(test_dataset_efficient, batch_size=BATCH_SIZE, shuffle=False, num_workers=4 )<categorify> | sc = MinMaxScaler()
train_ds = sc.fit_transform(train_ds)
val_ds = sc.transform(val_ds ) | Tabular Playground Series - Jan 2021 |
14,163,466 | def get_transforms(*, data):
if data == 'valid':
return A.Compose([
A.Resize(CFG.size, CFG.size),
A.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
),
ToTensorV2() ,
] )<choose_model_class> | X, X_val, y, y_val = train_test_split(train_ds, y_train, test_size=0.35,random_state=43)
print('Train', X.shape, y.shape, 'Test', X_val.shape, y_val.shape ) | Tabular Playground Series - Jan 2021 |
14,163,466 | class CustomResNext(nn.Module):
def __init__(self, model_name='resnext50_32x4d', pretrained=False):
super().__init__()
self.model = timm.create_model(model_name, pretrained=pretrained)
n_features = self.model.fc.in_features
self.model.fc = nn.Linear(n_features, CFG.target_size)
def forward(self, x):
x = self.model(x)
return x<define_search_model> | lgb_train = lgb.Dataset(X,y)
lgb_valid = lgb.Dataset(X_val,y_val,reference=lgb_train ) | Tabular Playground Series - Jan 2021 |
14,163,466 | class enet_v2(nn.Module):
def __init__(self, backbone, out_dim, pretrained=False):
super(enet_v2, self ).__init__()
self.enet = timm.create_model(backbone, pretrained=pretrained)
in_ch = self.enet.classifier.in_features
self.myfc = nn.Linear(in_ch, out_dim)
self.enet.classifier = nn.Identity()
def forward(self, x):
x = self.enet(x)
x = self.myfc(x)
return x<load_pretrained> | def objective(trial):
params = {
'num_leaves': trial.suggest_int('num_leaves', 32, 512),
'boosting_type': 'gbdt',
'max_bin': trial.suggest_int('max_bin', 700, 900),
'objective': 'regression',
'metric': 'RMSE',
'learning_rate': trial.suggest_float('learning_rate',0.0155,0.05),
'random_state' : 17,
'max_depth': trial.suggest_int('max_depth', 4, 16),
'min_child_weight': trial.suggest_int('min_child_weight', 1, 16),
'feature_fraction': trial.suggest_uniform('feature_fraction', 0.4, 1.0),
'bagging_fraction': trial.suggest_uniform('bagging_fraction', 0.4, 1.0),
'bagging_freq': trial.suggest_int('bagging_freq', 1, 8),
'min_child_samples': trial.suggest_int('min_child_samples', 4, 80),
'lambda_l1': trial.suggest_loguniform('lambda_l1', 1e-8, 1.0),
'lambda_l2': trial.suggest_loguniform('lambda_l2', 1e-8, 1.0),
'early_stopping_rounds': 10
}
model = lgb.train(params, lgb_train, valid_sets=[lgb_valid], verbose_eval=100, num_boost_round=800)
y_pred = model.predict(X_val)
score = rmse_score(y_val, y_pred)
print(f"RMSE = {score}")
return score | Tabular Playground Series - Jan 2021 |
14,163,466 | def load_state(model_path):
model = CustomResNext(CFG.model_name, pretrained=False)
try:
model.load_state_dict(torch.load(model_path)['model'], strict=True)
state_dict = torch.load(model_path)['model']
except:
state_dict = torch.load(model_path)['model']
state_dict = {k[7:] if k.startswith('module.')else k: state_dict[k] for k in state_dict.keys() }
return state_dict
def inference(model, states, test_loader, device):
model.to(device)
tk0 = tqdm(enumerate(test_loader), total=len(test_loader))
probs = []
for i,(images)in tk0:
images = images.to(device)
avg_preds = []
for state in states:
model.load_state_dict(state)
model.eval()
with torch.no_grad() :
y_preds = model(images)
avg_preds.append(y_preds.softmax(1 ).to('cpu' ).numpy())
avg_preds = np.mean(avg_preds, axis=0)
probs.append(avg_preds)
probs = np.concatenate(probs)
return probs<create_dataframe> | study = optuna.create_study(direction = 'minimize')
study.optimize(objective,n_trials = 50 ) | Tabular Playground Series - Jan 2021 |
14,163,466 | model = CustomResNext(CFG.model_name, pretrained=False)
states = [load_state(MODEL_DIR+f'{CFG.model_name}_fold{fold}.pth')for fold in CFG.trn_fold]
test_dataset = TestDataset(test, transform=get_transforms(data='valid'))
test_loader = DataLoader(test_dataset, batch_size=CFG.batch_size, shuffle=False,
num_workers=CFG.num_workers, pin_memory=True)
predictions = inference(model, states, test_loader, device)
test_preds = []
for i in range(len(enet_type)) :
model = enet_v2(enet_type[i], out_dim=5)
model = model.to(device)
model.load_state_dict(torch.load(model_path[i]))
test_preds += [tta_inference_func(test_loader_efficient)]
pred = 0.8*predictions + 0.2*np.mean(test_preds, axis=0)
test['label'] = softmax(pred ).argmax(1)
test[['image_id', 'label']].to_csv(OUTPUT_DIR+'submission.csv', index=False)
test.head()<define_variables> | print(f"Trials : {len(study.trials)}")
print(f"Best Trial : {study.best_trial}")
best = study.best_trial
for i, j in best.params.items() :
print(f"{i} : {j}")
print(f"{best.value}" ) | Tabular Playground Series - Jan 2021 |
14,163,466 | OUTPUT_DIR = "./"
MODEL_DIR = ".. /input/cassava-model/"
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
TRAIN_PATH = ".. /input/cassava-leaf-disease-classification/train_images"
TEST_PATH = ".. /input/cassava-leaf-disease-classification/test_images"<init_hyperparams> | best_params = best.params
; | Tabular Playground Series - Jan 2021 |
14,163,466 | class CFG:
debug = False
num_workers = 4
models = [
"tf_efficientnet_b4_ns",
"vit_base_patch16_384",
"seresnext50_32x4d",
]
size = {
"tf_efficientnet_b3_ns": 512,
"tf_efficientnet_b4_ns": 512,
"vit_base_patch16_384": 384,
"deit_base_patch16_384": 384,
"seresnext50_32x4d": 512,
}
batch_size = 64
seed = 7097
target_size = 5
target_col = "label"
n_fold = 5
trn_fold = {
"tf_efficientnet_b3_ns": {
"best": [0, 1, 2, 3, 4],
"final": [],
},
"tf_efficientnet_b4_ns": {
"best": [0, 1, 2, 3, 4],
"final": [],
},
"vit_base_patch16_384": {"best": [0, 1, 2, 3, 4], "final": []},
"deit_base_patch16_384": {"best": [0, 1, 2, 3, 4], "final": []},
"seresnext50_32x4d": {"best": [5, 6, 7, 8, 9], "final": []},
}
data_parallel = {
"tf_efficientnet_b3_ns": False,
"tf_efficientnet_b4_ns": True,
"vit_base_patch16_384": False,
"deit_base_patch16_384": False,
"seresnext50_32x4d": False,
}
transform = {
"tf_efficientnet_b4_ns": "rotate",
"vit_base_patch16_384": "rotate",
"seresnext50_32x4d": "rotate",
}
weight = {
"tf_efficientnet_b4_ns": 1,
"vit_base_patch16_384": 1,
"seresnext50_32x4d": 1,
}
tta = 10
no_tta_weight = tta - 1
train = False
inference = True<define_variables> | model = lgb.train(best_params, lgb_train, valid_sets=[lgb_train,lgb_valid],verbose_eval=50,num_boost_round=700 ) | Tabular Playground Series - Jan 2021 |
14,163,466 | tta_weight_sum = CFG.no_tta_weight +(CFG.tta - 1)
weight_sum = sum([CFG.weight[model] for model in CFG.models])* tta_weight_sum<load_from_csv> | preds = model.predict(X_val,num_iteration=model.best_iteration ) | Tabular Playground Series - Jan 2021 |
14,163,466 | test = pd.read_csv(".. /input/cassava-leaf-disease-classification/sample_submission.csv")
test.head()<choose_model_class> | print('LGBM: RMSE %.6f' %(rmse_score(y_val, preds)) ) | Tabular Playground Series - Jan 2021 |
14,163,466 | class CassvaImgClassifier(nn.Module):
def __init__(self, model_name="resnext50_32x4d", pretrained=False):
super().__init__()
if model_name == "deit_base_patch16_384":
self.model = torch.hub.load(".. /input/fair-deit", model_name, pretrained=pretrained, source="local")
n_features = self.model.head.in_features
self.model.head = nn.Linear(n_features, CFG.target_size)
else:
self.model = timm.create_model(model_name, pretrained=pretrained)
if "resnext50_32x4d" in model_name:
n_features = self.model.fc.in_features
self.model.fc = nn.Linear(n_features, CFG.target_size)
elif model_name.startswith("tf_efficientnet"):
n_features = self.model.classifier.in_features
self.model.classifier = nn.Linear(n_features, CFG.target_size)
elif model_name.startswith("vit_"):
n_features = self.model.head.in_features
self.model.head = nn.Linear(n_features, CFG.target_size)
def forward(self, x):
x = self.model(x)
return x<choose_model_class> | train_preds = model.predict(train_ds ) | Tabular Playground Series - Jan 2021 |
14,163,466 | def inference(model, states, test_loader, device, data_parallel):
model.to(device)
if device == torch.device("cuda")and data_parallel:
model = torch.nn.DataParallel(model)
tk0 = tqdm(enumerate(test_loader), total=len(test_loader))
probs = []
for i,(images)in tk0:
images = images.to(device)
avg_preds = []
for state in states:
model.load_state_dict(state["model"])
model.eval()
with torch.no_grad() :
y_preds = model(images)
avg_preds.append(y_preds.softmax(1 ).to("cpu" ).numpy())
avg_preds = np.mean(avg_preds, axis=0)
probs.append(avg_preds)
probs = np.concatenate(probs)
return probs<choose_model_class> | print('LGBM: Train RMSE %.6f' %(rmse_score(y_train, train_preds)) ) | Tabular Playground Series - Jan 2021 |
14,163,466 | predictions = None
for model_name in CFG.models:
for i in range(CFG.tta):
model = CassvaImgClassifier(model_name, pretrained=False)
states = []
for saved_model in ["best", "final"]:
if CFG.trn_fold[model_name][saved_model] != []:
LOGGER.info(
f"========== Model: {model_name}, TTA: {i}, Saved: {saved_model}, Fold: {CFG.trn_fold[model_name][saved_model]} =========="
)
states += [
torch.load(MODEL_DIR + f"{model_name}_fold{fold}_{saved_model}.pth")
for fold in CFG.trn_fold[model_name][saved_model]
]
if i == 0:
test_dataset = TestDataset(test, transform=get_transforms(data="valid", size=CFG.size[model_name]))
tta_weight = CFG.no_tta_weight
else:
test_dataset = TestDataset(
test, transform=get_transforms(data=CFG.transform[model_name], size=CFG.size[model_name])
)
tta_weight = 1
test_loader = DataLoader(
test_dataset, batch_size=CFG.batch_size, shuffle=False, num_workers=CFG.num_workers, pin_memory=True
)
inf = inference(model, states, test_loader, device, CFG.data_parallel[model_name])
LOGGER.info(f"Inference example: {inf[0]}")
if predictions is None:
predictions = inf[np.newaxis] * CFG.weight[model_name] * tta_weight
else:
predictions = np.append(predictions, inf[np.newaxis] * CFG.weight[model_name] * tta_weight, axis=0)
sub = np.sum(predictions, axis=0)/ weight_sum
LOGGER.info(f"========== Overall ==========")
LOGGER.info(f"Submission example: {sub[0]}")
test["label"] = sub.argmax(1)
test[["image_id", "label"]].to_csv(OUTPUT_DIR + "submission.csv", index=False)
test.head()<install_modules> | train['t_preds'] = train_preds | Tabular Playground Series - Jan 2021 |
14,163,466 | !pip install -U /kaggle/input/kerasapplications<install_modules> | preds = model.predict(val_ds ) | Tabular Playground Series - Jan 2021 |
14,163,466 | !pip install -U /kaggle/input/efficientnet/efficientnet-master<install_modules> | sub = pd.DataFrame({'id':test.id, 'target':preds})
sub.to_csv('submission.csv',index=False ) | Tabular Playground Series - Jan 2021 |
14,163,466 | <set_options><EOS> | x = pd.read_csv('./submission.csv')
x.head() | Tabular Playground Series - Jan 2021 |
14,151,081 | <SOS> metric: RMSE Kaggle data source: tabular-playground-series-jan-2021<import_modules> | import random
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error
from sklearn.metrics import accuracy_score
from sklearn.mixture import GaussianMixture
from sklearn.metrics import confusion_matrix
from lightgbm import LGBMRegressor
from catboost import CatBoostRegressor
from lightgbm import LGBMClassifier
from xgboost import XGBRegressor
import optuna
from optuna import Trial
from optuna.samplers import TPESampler | Tabular Playground Series - Jan 2021 |
14,151,081 | import pandas as pd, numpy as np
from kaggle_datasets import KaggleDatasets
import tensorflow as tf, re, math
import tensorflow.keras.backend as K
import efficientnet.tfkeras as efn
from sklearn.model_selection import KFold
from sklearn.metrics import roc_auc_score
import matplotlib.pyplot as plt
from tqdm import tqdm
from sklearn.metrics import accuracy_score,confusion_matrix,classification_report, precision_recall_curve
from IPython.display import display
import gc
import cv2
from tf2_resnets import models<feature_engineering> | df_train = pd.read_csv('/kaggle/input/tabular-playground-series-jan-2021/train.csv')
df_test = pd.read_csv('/kaggle/input/tabular-playground-series-jan-2021/test.csv')
df_train.info(verbose=False, memory_usage='deep' ) | Tabular Playground Series - Jan 2021 |
14,151,081 | def _bytes_feature(value):
if isinstance(value, type(tf.constant(0))):
value = value.numpy()
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def serialize_example(image,image_name):
feature = {
'image': _bytes_feature(image),
'image_name': _bytes_feature(image_name),
}
example_proto = tf.train.Example(features=tf.train.Features(feature=feature))
return example_proto.SerializeToString()<feature_engineering> | train_mode = False
target = 'target'
random_state = 42
x_cols = [c for c in df_train.columns if 'cont' in c] | Tabular Playground Series - Jan 2021 |
14,151,081 | RESNEXT_ID = 10
N_TFRECORDS = 20
IMAGE_HEIGHT = 600
IMAGE_WIDTH = 800
os.mkdir('test_tfrecords_600')
test_df = pd.DataFrame(os.listdir('.. /input/cassava-leaf-disease-classification/test_images/'),
columns=['image_name'])
test_df['tfr_group'] = test_df.index%N_TFRECORDS<save_to_csv> | def _evaluate(model, x_cols, df, target=target, n_folds=5):
oof = np.zeros(len(df[target]))
preds_test = np.zeros(len(df_test))
kf = KFold(n_splits=n_folds, random_state=random_state, shuffle=True)
for idx_train, idx_test in kf.split(df):
x_train = df.loc[idx_train, x_cols].values
y_train = df.loc[idx_train, target].values
x_test = df.loc[idx_test, x_cols].values
y_test = df.loc[idx_test, target].values
model.fit(x_train, y_train)
preds_train = model.predict(x_test)
preds_test += model.predict(df_test[x_cols].values)/ n_folds
oof[idx_test] = preds_train
return oof, preds_test | Tabular Playground Series - Jan 2021 |
14,151,081 | for tfr_group in range(N_TFRECORDS):
df = test_df[test_df.tfr_group==tfr_group]
if df.shape[0]>0:
tfr_filename = 'test_tfrecords_600/cassava_test{}-{}.tfrec'.format(tfr_group,df.shape[0])
print("Writing",tfr_filename)
with tf.io.TFRecordWriter(tfr_filename)as writer:
for index,row in tqdm(df.iterrows()):
image_name = row['image_name']
image_path = '.. /input/cassava-leaf-disease-classification/test_images/'+image_name
image = cv2.imread(image_path)
image = cv2.resize(image,(IMAGE_WIDTH,IMAGE_HEIGHT))
image_shape = image.shape
image = cv2.imencode('.jpg', image,(cv2.IMWRITE_JPEG_QUALITY, 100)) [1].tostring()
image_name = str.encode(image_name)
sample = serialize_example(image,image_name)
writer.write(sample)
image_shape<feature_engineering> | def objective(trial):
param = {
'boosting_type':'gbdt',
'num_leaves':trial.suggest_int('num_leaves', 3, 150),
'max_depth':trial.suggest_int('max_depth', -1, 20),
'learning_rate':trial.suggest_float('learning_rate', 0.001, 0.6),
'n_estimators':trial.suggest_int('n_estimators', 50, 500),
'min_child_weight':trial.suggest_float('min_child_weight', 0.2, 0.6),
'min_child_samples':trial.suggest_int('min_child_samples', 15, 30),
'subsample':trial.suggest_float('subsample', 0.5, 1.0),
'subsample_freq':trial.suggest_int('subsample_freq', 3, 150),
'random_state':random_state,
'lambda_l1':trial.suggest_float('lambda_l1', 0.0, 5.0)
}
lgbm = LGBMRegressor(**param)
x_cols = [c for c in df_train.columns if 'cont' in c]
oof, preds_test = _evaluate(lgbm, x_cols, df_train.reset_index(drop=True))
return mean_squared_error(df_train[target].reset_index(drop=True), oof, squared=False)
if train_mode:
study = optuna.create_study(direction='minimize', sampler=TPESampler())
study.optimize(objective, n_trials=200)
print(study.best_value)
print(study.best_params ) | Tabular Playground Series - Jan 2021 |
14,151,081 | N_TFRECORDS = 20
IMAGE_HEIGHT = 666
IMAGE_WIDTH = 500
os.mkdir('test_tfrecords_500')
test_df = pd.DataFrame(os.listdir('.. /input/cassava-leaf-disease-classification/test_images/'),
columns=['image_name'])
test_df['tfr_group'] = test_df.index%N_TFRECORDS<save_to_csv> | def objective(trial):
param = {
'iterations':1000,
'verbose':False,
'random_state':random_state,
'loss_function':'RMSE',
'bootstrap_type':'Bernoulli',
'learning_rate':trial.suggest_float('learning_rate', 0.0001, 0.31),
'max_depth':trial.suggest_int('max_depth', 3, 10),
'colsample_bylevel':trial.suggest_float('colsample_bylevel', 0.3, 0.8),
}
cat = CatBoostRegressor(**param)
x_cols = [c for c in df_train.columns if 'cont' in c]
oof, preds_test = _evaluate(cat, x_cols, df_train, target=target)
return mean_squared_error(df_train[target], oof, squared=False)
if train_mode:
study = optuna.create_study(direction='minimize', sampler=TPESampler())
study.optimize(objective, n_trials=100)
print(study.best_value)
print(study.best_params ) | Tabular Playground Series - Jan 2021 |
14,151,081 | for tfr_group in range(N_TFRECORDS):
df = test_df[test_df.tfr_group==tfr_group]
if df.shape[0]>0:
tfr_filename = 'test_tfrecords_500/cassava_test{}-{}.tfrec'.format(tfr_group,df.shape[0])
print("Writing",tfr_filename)
with tf.io.TFRecordWriter(tfr_filename)as writer:
for index,row in tqdm(df.iterrows()):
image_name = row['image_name']
image_path = '.. /input/cassava-leaf-disease-classification/test_images/'+image_name
image = cv2.imread(image_path)
image = cv2.resize(image,(IMAGE_WIDTH,IMAGE_HEIGHT))
image_shape = image.shape
image = cv2.imencode('.jpg', image,(cv2.IMWRITE_JPEG_QUALITY, 100)) [1].tostring()
image_name = str.encode(image_name)
sample = serialize_example(image,image_name)
writer.write(sample)
image_shape<define_variables> | def objective(trial):
param = {
'random_state':random_state,
'objective':'reg:squarederror',
'booster':'gbtree',
'learning_rate':trial.suggest_float('learning_rate', 0.001, 0.1),
'alpha':trial.suggest_float('alpha', 0.001, 0.1),
'colsample_bylevel':trial.suggest_float('colsample_bylevel', 0.05, 0.1),
'colsample_bytree':trial.suggest_float('colsample_bytree', 0.1, 0.9),
'gamma':trial.suggest_float('gamma', 0, 0.5),
'max_depth':trial.suggest_int('max_depth', 3, 18),
'min_child_weight': trial.suggest_float('min_child_weight', 1, 20),
'subsample':trial.suggest_float('subsample', 0.3, 0.7),
}
xgb = XGBRegressor(**param)
x_cols = [c for c in df_train.columns if 'cont' in c]
oof, preds_test = _evaluate(xgb, x_cols, df_train, target=target)
return mean_squared_error(df_train[target], oof, squared=False)
if train_mode:
study = optuna.create_study(direction='minimize', sampler=TPESampler())
study.optimize(objective, n_trials=100)
print(study.best_value)
print(study.best_params ) | Tabular Playground Series - Jan 2021 |
14,151,081 | DEVICE = "GPU"
FOLDS = 5
FOLD_TO_RUN = [0,1,2,3,4]
BATCH_SIZE = 128
EPOCHS = 15
N_WORKERS = 4
<choose_model_class> | param_lgbm = {'num_leaves': 148, 'max_depth': 19, 'learning_rate': 0.04168752594808129, 'n_estimators': 468, 'min_child_weight': 0.35392113973764505, 'min_child_samples': 29, 'subsample': 0.9348697769228501, 'subsample_freq': 6, 'lambda_l1': 4.639129744838143}
param_cat = {'learning_rate': 0.07468089271003528, 'max_depth': 8, 'colsample_bylevel': 0.7338241468797853}
param_xgb = param_xgb = {'learning_rate': 0.0652222304334701, 'alpha': 0.0036866921576056855, 'colsample_bylevel': 0.09959606060270643, 'colsample_bytree': 0.863554381598069, 'gamma': 0.3959383978062547, 'max_depth': 15, 'min_child_weight': 19.357558021086128, 'subsample': 0.6991638855748524}
lgbm = LGBMRegressor(seed=random_state, **param_lgbm)
oof_lgbm, preds_lgbm = _evaluate(lgbm, x_cols, df_train, n_folds=5)
print('hyper lgbm:', mean_squared_error(df_train[target], oof_lgbm, squared=False))
cat = CatBoostRegressor(iterations=1000, verbose=False, random_state=random_state, loss_function='RMSE', bootstrap_type='Bernoulli', **param_cat)
oof_cat, preds_cat = _evaluate(cat, x_cols, df_train, n_folds=5)
print('hyper cat:', mean_squared_error(df_train[target], oof_cat, squared=False))
xgb = XGBRegressor(random_state=random_state, **param_xgb)
oof_xgb, preds_xgb = _evaluate(xgb, x_cols, df_train, n_folds=2)
print('hyper xgb:', mean_squared_error(df_train[target], oof, squared=False))
print('hyper blend:', mean_squared_error(df_train[target],(( oof_lgbm * 0.4)+(oof_cat * 0.4)+(oof_xgb * 0.2)) , squared=False)) | Tabular Playground Series - Jan 2021 |
14,151,081 | if DEVICE == "TPU":
print("connecting to TPU...")
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print('Running on TPU ', tpu.master())
except ValueError:
print("Could not connect to TPU")
tpu = None
if tpu:
try:
print("initializing TPU...")
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
print("TPU initialized")
except _:
print("failed to initialize TPU")
else:
DEVICE = "GPU"
if DEVICE != "TPU":
print("Using default strategy for CPU and single GPU")
strategy = tf.distribute.get_strategy()
if DEVICE == "GPU":
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
AUTO = tf.data.experimental.AUTOTUNE
REPLICAS = strategy.num_replicas_in_sync
print(f'REPLICAS: {REPLICAS}' )<sort_values> | df_test['hyper_lgbm'] = preds_lgbm
df_test['hyper_cat'] = preds_cat
df_test['hyper_xgb'] = preds_xgb
df_test['hyper_blend'] =(preds_lgbm * 0.4)+(preds_cat * 0.4)+(preds_xgb * 0.2)
df_test['target'] = df_test['hyper_blend']
df_test[['id','target']].to_csv('submission_hyper_blend.csv', index=False ) | Tabular Playground Series - Jan 2021 |
14,151,081 | GCS_PATH = '.'
files_test_600 = np.sort(np.array(tf.io.gfile.glob(GCS_PATH + '/test_tfrecords_600/*.tfrec')))
files_test_500 = np.sort(np.array(tf.io.gfile.glob(GCS_PATH + '/test_tfrecords_500/*.tfrec')))
print(files_test_600)
print(files_test_500 )<define_variables> | n_folds = 3
oof_lgbm_seeds = np.zeros(len(df_train))
oof_car_seeds = np.zeros(len(df_train))
preds_lgbm_seeds = np.zeros(len(df_test))
preds_car_seeds = np.zeros(len(df_test))
for g in range(n_folds):
rand_num = random.randint(1, 5000)
lgbm = LGBMRegressor(seed=rand_num, **param_lgbm)
oof, preds_lgbm = _evaluate(lgbm, x_cols, df_train, n_folds=n_folds)
oof_lgbm_seeds +=(oof / n_folds)
preds_lgbm_seeds +=(preds_lgbm / n_folds)
cat = CatBoostRegressor(iterations=1000, verbose=False, random_state=rand_num, loss_function='RMSE', bootstrap_type='Bernoulli', **param_cat)
oof, preds_cat = _evaluate(cat, x_cols, df_train, n_folds=n_folds)
oof_car_seeds +=(oof / n_folds)
preds_car_seeds +=(preds_cat / n_folds ) | Tabular Playground Series - Jan 2021 |
14,151,081 | ROT_ = 180.0
SHR_ = 2.0
HZOOM_ = 8.0
WZOOM_ = 8.0
HSHIFT_ = 8.0
WSHIFT_ = 8.0<normalization> | print('hyper lgbm:', mean_squared_error(df_train[target], oof_lgbm_seeds, squared=False))
print('hyper cat:', mean_squared_error(df_train[target], oof_car_seeds, squared=False))
| Tabular Playground Series - Jan 2021 |
14,151,081 | def get_mat(rotation, shear, height_zoom, width_zoom, height_shift, width_shift):
rotation = math.pi * rotation / 180.
shear = math.pi * shear / 180.
def get_3x3_mat(lst):
return tf.reshape(tf.concat([lst],axis=0), [3,3])
c1 = tf.math.cos(rotation)
s1 = tf.math.sin(rotation)
one = tf.constant([1],dtype='float32')
zero = tf.constant([0],dtype='float32')
rotation_matrix = get_3x3_mat([c1, s1, zero,
-s1, c1, zero,
zero, zero, one])
c2 = tf.math.cos(shear)
s2 = tf.math.sin(shear)
shear_matrix = get_3x3_mat([one, s2, zero,
zero, c2, zero,
zero, zero, one])
zoom_matrix = get_3x3_mat([one/height_zoom, zero, zero,
zero, one/width_zoom, zero,
zero, zero, one])
shift_matrix = get_3x3_mat([one, zero, height_shift,
zero, one, width_shift,
zero, zero, one])
return K.dot(K.dot(rotation_matrix, shear_matrix),
K.dot(zoom_matrix, shift_matrix))
def transform(image, DIM=256):
XDIM = DIM%2
rot = ROT_ * tf.random.normal([1], dtype='float32')
shr = SHR_ * tf.random.normal([1], dtype='float32')
h_zoom = 1.0 + tf.random.normal([1], dtype='float32')/ HZOOM_
w_zoom = 1.0 + tf.random.normal([1], dtype='float32')/ WZOOM_
h_shift = HSHIFT_ * tf.random.normal([1], dtype='float32')
w_shift = WSHIFT_ * tf.random.normal([1], dtype='float32')
m = get_mat(rot,shr,h_zoom,w_zoom,h_shift,w_shift)
x = tf.repeat(tf.range(DIM//2, -DIM//2,-1), DIM)
y = tf.tile(tf.range(-DIM//2, DIM//2), [DIM])
z = tf.ones([DIM*DIM], dtype='int32')
idx = tf.stack([x,y,z])
idx2 = K.dot(m, tf.cast(idx, dtype='float32'))
idx2 = K.cast(idx2, dtype='int32')
idx2 = K.clip(idx2, -DIM//2+XDIM+1, DIM//2)
idx3 = tf.stack([DIM//2-idx2[0,], DIM//2-1+idx2[1,]])
d = tf.gather_nd(image, tf.transpose(idx3))
return tf.reshape(d,[DIM, DIM,3] )<normalization> | df_test['seed_lgbm'] = preds_lgbm_seeds
df_test['seed_cat'] = preds_car_seeds
df_test['seed_blend'] =(preds_lgbm_seeds * 0.4)+(preds_car_seeds * 0.6)
df_test['target'] = df_test['seed_blend']
df_test[['id','target']].to_csv('submission_seed_blend.csv', index=False ) | Tabular Playground Series - Jan 2021 |
14,151,081 | def read_unlabeled_tfrecord(example):
tfrec_format = {
'image' : tf.io.FixedLenFeature([], tf.string),
"image_name": tf.io.FixedLenFeature([], tf.string)
}
example = tf.io.parse_single_example(example, tfrec_format)
return example['image'], example['image_name']
def prepare_image(img, augment=True, tta=None, dim=256):
img = tf.image.decode_jpeg(img, channels=3)
if dim==600:
img = tf.image.crop_to_bounding_box(img, 0, 100, 600, 600)
else:
img = tf.image.crop_to_bounding_box(img, 83, 0, 500, 500)
img = tf.image.resize(img, [dim,dim])
img = tf.cast(img, tf.float32)/ 255.0
if tta!=None:
if tta[-3:]=='_lr':
img = tf.image.flip_left_right(img)
tta = tta[:-3]
if tta=='rotate90':
img = tf.image.rot90(img,1)
if tta=='rotate180':
img = tf.image.rot90(img,2)
if tta=='rotate270':
img = tf.image.rot90(img,3)
img = tf.reshape(img, [dim,dim, 3])
return img
def count_data_items(filenames):
n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1))
for filename in filenames]
return np.sum(n)
count_data_items(files_test_600),count_data_items(files_test_500 )<create_dataframe> | lgbm_gmm_class = LGBMClassifier(random_state=random_state)
oof, preds = _evaluate(lgbm_gmm_class, x_cols, df_gmm_train, target='target_gmm')
print(accuracy_score(df_gmm_train['target_gmm'], oof))
print(confusion_matrix(df_gmm_train['target_gmm'], oof)) | Tabular Playground Series - Jan 2021 |
14,151,081 | def get_dataset(files, augment = False, tta=None, shuffle = False, repeat = False,
batch_size=16, dim=512):
ds = tf.data.TFRecordDataset(files, num_parallel_reads=AUTO)
if repeat:
ds = ds.repeat()
if shuffle:
ds = ds.shuffle(1024*8)
opt = tf.data.Options()
opt.experimental_deterministic = False
ds = ds.with_options(opt)
ds = ds.map(read_unlabeled_tfrecord, num_parallel_calls=AUTO)
ds = ds.map(lambda img, imgname_or_label:(prepare_image(img, augment=augment, tta=tta, dim=dim),
imgname_or_label),
num_parallel_calls=AUTO)
ds = ds.batch(batch_size * REPLICAS)
ds = ds.prefetch(AUTO)
return ds<choose_model_class> | param_lgbm_gmm_class = {'num_leaves': 144, 'max_depth': 10, 'learning_rate': 0.0934759313797003, 'n_estimators': 107, 'min_child_weight': 0.30363944085393396, 'min_child_samples': 17, 'subsample': 0.9463879414095, 'subsample_freq': 80, 'lambda_l1': 4.67131782429971}
lgbm_gmm_class = LGBMClassifier(random_state=random_state, **param_lgbm_gmm_class)
lgbm_gmm_reg_0 = LGBMRegressor(seed=random_state)
lgbm_gmm_reg_1 = LGBMRegressor(seed=random_state)
lgbm_gmm_reg = LGBMRegressor(seed=random_state, **param_lgbm)
lgbm_cat_reg = CatBoostRegressor(iterations=1000, verbose=False, random_state=random_state, loss_function='RMSE', bootstrap_type='Bernoulli', **param_cat)
mask_gmm_0 = df_gmm_train['target_gmm'] == 0
mask_gmm_1 = df_gmm_train['target_gmm'] == 1
lgbm_gmm_class.fit(df_gmm_train[x_cols], df_gmm_train['target_gmm'])
lgbm_gmm_reg_0.fit(df_gmm_train.loc[mask_gmm_0, x_cols].reset_index(drop=True), df_gmm_train.loc[mask_gmm_0, target].reset_index(drop=True))
lgbm_gmm_reg_1.fit(df_gmm_train.loc[mask_gmm_1, x_cols].reset_index(drop=True), df_gmm_train.loc[mask_gmm_1, target].reset_index(drop=True))
lgbm_gmm_reg.fit(df_gmm_train[x_cols], df_gmm_train[target])
lgbm_cat_reg.fit(df_gmm_train[x_cols], df_gmm_train[target])
df_gmm_test[['gmm_class_' + str(a)+ '_pred' for a in lgbm_gmm_class.classes_]] = lgbm_gmm_class.predict_proba(df_gmm_test[x_cols])
df_gmm_test['lgbm'] = lgbm_gmm_reg.predict(df_gmm_test[x_cols])
df_gmm_test['cat'] = lgbm_cat_reg.predict(df_gmm_test[x_cols])
df_gmm_test['hyper_blend'] =(df_gmm_test['lgbm'] * 0.5)+(df_gmm_test['cat'] * 0.5)
df_gmm_test['gmm_reg_0'] = lgbm_gmm_reg_0.predict(df_gmm_test[x_cols])
df_gmm_test['gmm_reg_1'] = lgbm_gmm_reg_1.predict(df_gmm_test[x_cols] ) | Tabular Playground Series - Jan 2021 |
14,151,081 | <init_hyperparams><EOS> | print('blend:', mean_squared_error(df_gmm_test[target], df_gmm_test['hyper_blend'], squared=False))
print('super:', mean_squared_error(df_gmm_test[target], df_gmm_test['super_target'], squared=False)) | Tabular Playground Series - Jan 2021 |
14,115,408 | <SOS> metric: RMSE Kaggle data source: tabular-playground-series-jan-2021<create_dataframe> | %matplotlib inline
dfk = pd.DataFrame({
'Kernel ID': ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L'],
'Score': [ 0.69864 , 0.69846 , 0.69836 , 0.69824 , 0.69813, 0.69795, 0.69751, 0.69749, 0.69747, 0.69735, 0.69731, 0.69701],
'File Path': ['.. /input/aa69864/AA69864.csv', '.. /input/bb69846/BB69846.csv', '.. /input/cc69836/CC69836.csv', '.. /input/a69824/A69824.csv', '.. /input/c69813/C69813.csv', '.. /input/ff69795/FF69795.csv', '.. /input/gg69751/GG69751.csv' , '.. /input/g69749/G69749.csv', '.. /input/h69747/H69747.csv', '.. /input/i69735/I69735.csv', '.. /input/j69731/J69731.csv', '.. /input/l69701/L69701.csv']
})
dfk | Tabular Playground Series - Jan 2021 |
14,115,408 | print('Getting test_ids')
IMG_SIZE = 500
NUM_TEST_IMAGES = count_data_items(files_test_500)
ds_test = get_dataset(files_test_500,augment=False,repeat=False,shuffle=False,
dim=IMG_SIZE,batch_size=BATCH_SIZE*4)
test_ids_ds = ds_test.map(lambda image, idnum: idnum ).unbatch()
test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U')
<define_variables> | def generate(main, support, coeff):
g = main.copy()
for i in main.columns[1:]:
res = []
lm, Is = [], []
lm = main[i].tolist()
ls = support[i].tolist()
for j in range(len(main)) :
res.append(( lm[j] * coeff)+(ls[j] *(1.- coeff)))
g[i] = res
return g
| Tabular Playground Series - Jan 2021 |
14,115,408 | tta_counter = 0<define_variables> | support = pd.read_csv(dfk.iloc[0, 2])
for k in range(1, 7):
main = pd.read_csv(dfk.iloc[k, 2])
support = generate(main, support, 0.99)
sub1 = support | Tabular Playground Series - Jan 2021 |
14,115,408 | VERBOSE = IS_INTERACTIVE
def generate_submission(EFF_NET,category,TTA):
if EFF_NET == RESNEXT_ID:
model_root = f'.. /input/cassava-category-{category}/ResNext50/ResNext50/'
else:
model_root = f'.. /input/cassava-category-{category}/B{EFF_NET}/B{EFF_NET}/'
if category%2==0:
IMG_SIZE = 600
files_test = files_test_600
elif category%2==1:
IMG_SIZE = 500
files_test = files_test_500
else:
assert 1==2, "INVALID CATEGORY"
global tta_counter
test_predictions = []
for fold in range(FOLDS):
print('
if EFF_NET == RESNEXT_ID:
print('
(IMG_SIZE,BATCH_SIZE*REPLICAS))
else:
print('
(IMG_SIZE,EFF_NET,BATCH_SIZE*REPLICAS))
K.clear_session()
with strategy.scope() :
model = build_model(dim=IMG_SIZE,ef=EFF_NET)
print('Loading best model...')
model.load_weights(model_root+'fold-%i.h5'%fold)
if len(TTA)==0:
print('Predicting TEST without TTA...')
ds_test = get_dataset(files_test,augment=False,repeat=False,shuffle=False,
dim=IMG_SIZE,batch_size=BATCH_SIZE)
pred = model.predict(ds_test,verbose=VERBOSE,batch_size=BATCH_SIZE,
use_multiprocessing=True,workers=N_WORKERS)
if 1:
print('Predicting TEST with TTA...')
for x_ in range(1):
tta = TTA[tta_counter%8]
print(tta)
tta_counter+=1
ds_test = get_dataset(files_test,augment=False,tta=tta,repeat=False,shuffle=False,
dim=IMG_SIZE,batch_size=BATCH_SIZE)
if x_==0:
pred = model.predict(ds_test,verbose=VERBOSE,batch_size=BATCH_SIZE,
use_multiprocessing=True,workers=N_WORKERS)/len(TTA)
else:
pred += model.predict(ds_test,verbose=VERBOSE,batch_size=BATCH_SIZE,
use_multiprocessing=True,workers=N_WORKERS)/len(TTA)
test_pred_fold = pd.DataFrame(pred)
test_pred_fold['fold'] = fold
test_pred_fold['image_id'] = test_ids
test_predictions.append(test_pred_fold)
test_predictions = pd.concat(test_predictions)
test_predictions_averaged = test_predictions.groupby('image_id')[[0,1,2,3,4]].mean()
test_predictions_averaged = test_predictions_averaged.sort_index()
return test_predictions_averaged<define_variables> | sub7 = comparison(sub6, 7, 1.0036, 0.9972)
| Tabular Playground Series - Jan 2021 |
14,115,408 | <prepare_output><EOS> | sub = sub7
sub.to_csv("submission.csv", index=False)
sub1.to_csv("submission1.csv", index=False)
sub2.to_csv("submission2.csv", index=False)
sub3.to_csv("submission3.csv", index=False)
sub4.to_csv("submission4.csv", index=False)
sub5.to_csv("submission5.csv", index=False)
sub6.to_csv("submission6.csv", index=False)
!ls | Tabular Playground Series - Jan 2021 |
14,110,546 | <SOS> metric: RMSE Kaggle data source: tabular-playground-series-jan-2021<import_modules> | %matplotlib inline | Tabular Playground Series - Jan 2021 |
14,110,546 | import random<save_to_csv> | train = pd.read_csv(DATA / "train.csv")
test = pd.read_csv(DATA / "test.csv")
smpl_sub = pd.read_csv(DATA / "sample_submission.csv")
print("train: {}, test: {}, sample sub: {}".format(
train.shape, test.shape, smpl_sub.shape
)) | Tabular Playground Series - Jan 2021 |
14,110,546 | submission.to_csv('predictions.csv')
submission<set_options> | class TreeModel:
def __init__(self, model_type: str):
self.model_type = model_type
self.trn_data = None
self.val_data = None
self.model = None
def train(self,
params: dict,
X_train: pd.DataFrame, y_train: np.ndarray,
X_val: pd.DataFrame, y_val: np.ndarray,
train_weight: tp.Optional[np.ndarray] = None,
val_weight: tp.Optional[np.ndarray] = None,
train_params: dict = {}):
if self.model_type == "lgb":
self.trn_data = lgb.Dataset(X_train, label=y_train, weight=train_weight)
self.val_data = lgb.Dataset(X_val, label=y_val, weight=val_weight)
self.model = lgb.train(params=params,
train_set=self.trn_data,
valid_sets=[self.trn_data, self.val_data],
**train_params)
elif self.model_type == "xgb":
self.trn_data = xgb.DMatrix(X_train, y_train, weight=train_weight)
self.val_data = xgb.DMatrix(X_val, y_val, weight=val_weight)
self.model = xgb.train(params=params,
dtrain=self.trn_data,
evals=[(self.trn_data, "train"),(self.val_data, "val")],
**train_params)
elif self.model_type == "cat":
self.trn_data = Pool(X_train, label=y_train, group_id=[0] * len(X_train))
self.val_data = Pool(X_val, label=y_val, group_id=[0] * len(X_val))
self.model = CatBoost(params)
self.model.fit(
self.trn_data, eval_set=[self.val_data], use_best_model=True, **train_params)
else:
raise NotImplementedError
def predict(self, X: pd.DataFrame):
if self.model_type == "lgb":
return self.model.predict(
X, num_iteration=self.model.best_iteration)
elif self.model_type == "xgb":
X_DM = xgb.DMatrix(X)
return self.model.predict(
X_DM, ntree_limit=self.model.best_ntree_limit)
elif self.model_type == "cat":
return self.model.predict(X)
else:
raise NotImplementedError
@property
def feature_names_(self):
if self.model_type == "lgb":
return self.model.feature_name()
elif self.model_type == "xgb":
return list(self.model.get_score(importance_type="gain" ).keys())
elif self.model_type == "cat":
return self.model.feature_names_
else:
raise NotImplementedError
@property
def feature_importances_(self):
if self.model_type == "lgb":
return self.model.feature_importance(importance_type="gain")
elif self.model_type == "xgb":
return list(self.model.get_score(importance_type="gain" ).values())
elif self.model_type == "cat":
return self.model.feature_importances_
else:
raise NotImplementedError | Tabular Playground Series - Jan 2021 |
14,110,546 | K.clear_session()<set_options> | ID_COL = "id"
FEAT_COLS = [f"cont{i}" for i in range(1, 15)]
TGT_COL = "target"
N_SPLITS = 10
RANDOM_SEED_LIST = [
1003,2021
]
MODEL_PARAMS = {
"lgb": {
'seed': RANDOM_SEED_LIST[1],
'objective': 'regression',
'metric': 'rmse',
'verbosity': -1,
'boosting_type': 'gbdt',
'feature_pre_filter': False,
'lambda_l1': 6.540486456085813,
'lambda_l2': 0.01548480538099245,
'num_leaves': 256,
'feature_fraction': 0.52,
'bagging_fraction': 0.6161835249194311,
'bagging_freq': 7,
'min_child_samples': 20,
'learning_rate': 0.005,
"device": "gpu",
"gpu_device_id": 0,
},
"xgb": {
"objective": "reg:squarederror",
"learning_rate": 0.005,
"seed": RANDOM_SEED_LIST[0],
'alpha': 0.01563,
'base_score': 0.5,
'booster': 'gbtree',
'colsample_bylevel': 1,
'colsample_bynode': 1,
'colsample_bytree': 0.5,
'gamma': 0,
'importance_type': 'gain',
'interaction_constraints': '',
'max_delta_step': 0,
'max_depth': 15,
'metric_period': 100,
'min_child_weight': 257,
'n_jobs': 0,
'num_parallel_tree': 1,
'reg_alpha': 0.0156299993,
'reg_lambda': 0.003,
'scale_pos_weight': 1,
'silent': 1,
'subsample': 0.7,
'tree_method': 'exact',
'validate_parameters': 1,
'verbosity': None,
'tree_method': 'gpu_hist',
'gpu_id': 0,
},
"cat": {
'loss_function': 'RMSE',
'learning_rate': 0.01,
'max_depth': 7,
'random_state': RANDOM_SEED_LIST[0],
"thread_count": 2,
'num_boost_round': 20000
}
}
TRAIN_PARAMS = {
"lgb": {
"num_boost_round": 20000,
"early_stopping_rounds": 200,
"verbose_eval": 100,
},
"xgb": {
"num_boost_round": 20000,
"early_stopping_rounds": 200,
"verbose_eval": 100,
},
"cat": {'early_stopping_rounds': 200, 'verbose_eval': 100}
} | Tabular Playground Series - Jan 2021 |
14,110,546 | device = cuda.get_current_device()
device.reset()<define_variables> | use_feat_cols = []
train_feat = train[[ID_COL]].copy()
test_feat = test[[ID_COL]].copy() | Tabular Playground Series - Jan 2021 |
14,110,546 | package_paths = [
'.. /input/pytorch-image-models/pytorch-image-models-master',
'.. /input/image-fmix/FMix-master'
]
for pth in package_paths:
sys.path.append(pth)
<import_modules> | train_feat = pd.concat([
train_feat, train[FEAT_COLS]], axis=1)
test_feat = pd.concat([
test_feat, test[FEAT_COLS]], axis=1)
use_feat_cols.extend(FEAT_COLS ) | Tabular Playground Series - Jan 2021 |
14,110,546 | from glob import glob
from sklearn.model_selection import GroupKFold, StratifiedKFold
import cv2
from skimage import io
import torch
from torch import nn
import os
from datetime import datetime
import time
import random
import cv2
import torchvision
from torchvision import transforms
import pandas as pd
import numpy as np
from tqdm import tqdm
import glob
import matplotlib.pyplot as plt
from torch.utils.data import Dataset,DataLoader
from torch.utils.data.sampler import SequentialSampler, RandomSampler
from torch.cuda.amp import autocast, GradScaler
import sklearn
import warnings
import joblib
from sklearn.metrics import roc_auc_score, log_loss
from sklearn import metrics
import warnings
import cv2
import pydicom
import timm
from scipy.ndimage.interpolation import zoom
from sklearn.metrics import log_loss<import_modules> | def run_train_and_inference(X, X_test, y, use_model, model_params, train_params, seed_list, n_splits):
oof_pred_arr = np.zeros(len(X))
test_pred_arr = np.zeros(len(X_test))
feature_importances = pd.DataFrame()
score_list = []
for seed in seed_list:
if use_model == "cat":
model_params['random_state'] = seed
else:
model_params["seed"] = seed
kf = KFold(n_splits=n_splits, shuffle=True, random_state=seed)
tmp_oof_pred = np.zeros(len(X))
tmp_test_pred = np.zeros(len(X_test))
for fold,(trn_idx, val_idx)in enumerate(kf.split(X, y)) :
print("*" * 100)
print(f"Seed: {seed} - Fold: {fold}")
X_trn = X.loc[trn_idx].reset_index(drop=True)
X_val = X.loc[val_idx].reset_index(drop=True)
y_trn = y[trn_idx]
y_val = y[val_idx]
model = TreeModel(model_type=use_model)
with timer(prefix="Model training"):
model.train(
params=model_params, X_train=X_trn, y_train=y_trn,
X_val=X_val, y_val=y_val, train_params=train_params)
fi_tmp = pd.DataFrame()
fi_tmp["feature"] = model.feature_names_
fi_tmp["importance"] = model.feature_importances_
fi_tmp["fold"] = fold
fi_tmp["seed"] = seed
feature_importances = feature_importances.append(fi_tmp)
val_pred = model.predict(X_val)
score = mean_squared_error(y_val, val_pred, squared=False)
print(f"score: {score:.5f}")
score_list.append([seed, fold, score])
tmp_oof_pred[val_idx] = val_pred
tmp_test_pred += model.predict(X_test)
oof_score = mean_squared_error(y, tmp_oof_pred, squared=False)
print(f"oof score: {oof_score: 5f}")
score_list.append([seed, "oof", oof_score])
oof_pred_arr += tmp_oof_pred
test_pred_arr += tmp_test_pred / n_splits
oof_pred_arr /= len(seed_list)
test_pred_arr /= len(seed_list)
oof_score = mean_squared_error(y, oof_pred_arr, squared=False)
score_list.append(["avg", "oof", oof_score])
score_df = pd.DataFrame(
score_list, columns=["seed", "fold", "rmse score"])
return oof_pred_arr, test_pred_arr, score_df, feature_importances | Tabular Playground Series - Jan 2021 |
14,110,546 | import os
import pandas as pd
import albumentations as albu
import matplotlib.pyplot as plt
import json
import seaborn as sns
import cv2
import albumentations as albu
import numpy as np<import_modules> | X = train_feat[use_feat_cols]
X_test = test_feat[use_feat_cols]
y = train[TGT_COL].values
print(f"train_feat: {X.shape}, test_feat: {X_test.shape}" ) | Tabular Playground Series - Jan 2021 |
14,110,546 | import torch
import torch.nn as nn
import torchvision.models as models
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torch.optim.lr_scheduler import ReduceLROnPlateau
from sklearn.metrics import accuracy_score
from sklearn.model_selection import StratifiedKFold, GroupKFold, KFold, train_test_split
from albumentations.pytorch import ToTensorV2
import time
import datetime
import copy<init_hyperparams> | oof_pred_lgb, test_pred_lgb, score_lgb, feat_imps_lgb = run_train_and_inference(
X, X_test, y, "lgb", MODEL_PARAMS["lgb"], TRAIN_PARAMS["lgb"], RANDOM_SEED_LIST, N_SPLITS ) | Tabular Playground Series - Jan 2021 |
14,110,546 | CFG = {
'fold_num': 10,
'seed': 719,
'model_arch': 'tf_efficientnet_b3_ns',
'img_size': 384,
'epochs': 32,
'train_bs': 32,
'valid_bs': 32,
'lr': 1e-4,
'num_workers': 4,
'accum_iter': 1,
'verbose_step': 1,
'device': 'cuda:0',
'tta': 4,
'used_epochs': [8],
'weights': [1,1,1,1,1]
}<set_options> | score_lgb.loc[score_lgb.fold == "oof"] | Tabular Playground Series - Jan 2021 |
14,110,546 | def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def get_img(path):
im_bgr = cv2.imread(path)
im_rgb = im_bgr[:, :, ::-1]
return im_rgb<train_on_grid> | oof_pred_xgb, test_pred_xgb, score_xgb, feat_imps_xgb = run_train_and_inference(
X, X_test, y, "xgb", MODEL_PARAMS["xgb"], TRAIN_PARAMS["xgb"], RANDOM_SEED_LIST, N_SPLITS ) | Tabular Playground Series - Jan 2021 |
14,110,546 | class CassavaDataset(Dataset):
def __init__(self,df:pd.DataFrame,imfolder:str,train:bool = True, transforms=None):
self.df=df
self.imfolder=imfolder
self.train=train
self.transforms=transforms
def __getitem__(self,index):
im_path=os.path.join(self.imfolder,self.df.iloc[index]['image_id'])
x=cv2.imread(im_path,cv2.IMREAD_COLOR)
x=cv2.cvtColor(x,cv2.COLOR_BGR2RGB)
if(self.transforms):
x=self.transforms(image=x)['image']
if(self.train):
y=self.df.iloc[index]['label']
return x,y
else:
return x
def __len__(self):
return len(self.df )<choose_model_class> | score_xgb.loc[score_xgb.fold == "oof"] | Tabular Playground Series - Jan 2021 |
14,110,546 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class CustomDeiT(nn.Module):
def __init__(self, model_name='model_name', pretrained=False):
super().__init__()
self.model = torch.hub.load('facebookresearch/deit:main', model_name, pretrained=0)
n_features = self.model.head.in_features
self.model.head = nn.Linear(n_features, CFG.target_size)
def forward(self, x):
x = self.model(x)
return x
class CassvaImgClassifier(nn.Module):
def __init__(self, model_arch, n_class, pretrained=False):
super().__init__()
self.model = timm.create_model(model_arch, pretrained=pretrained)
print(self.model)
n_features = self.model.head.in_features
self.model.head = nn.Linear(n_features, n_class)
def forward(self, x):
x = self.model(x)
return x
class CustomViT(nn.Module):
def __init__(self, model_name='', pretrained=False):
super().__init__()
self.model = timm.create_model(model_name, pretrained=pretrained)
n_features = self.model.head.in_features
self.model.head = nn.Linear(n_features,5)
def forward(self, x):
x = self.model(x)
return x
model = CustomViT(model_name='vit_base_patch16_384', pretrained=False ).to(device)
device = torch.device(CFG['device'])
model.to(device)
model.eval()<categorify> | oof_pred_avg = oof_pred_lgb*0.6 + oof_pred_xgb*0.4
oof_score_avg = mean_squared_error(y, oof_pred_avg, squared=False)
print(f"oof score avg: {oof_score_avg:.5f}" ) | Tabular Playground Series - Jan 2021 |
14,110,546 | class CassavaDataset(Dataset):
def __init__(
self, df, data_root, transforms=None, output_label=True
):
super().__init__()
self.df = df.reset_index(drop=True ).copy()
self.transforms = transforms
self.data_root = data_root
self.output_label = output_label
def __len__(self):
return self.df.shape[0]
def __getitem__(self, index: int):
if self.output_label:
target = self.df.iloc[index]['label']
path = "{}/{}".format(self.data_root, self.df.iloc[index]['image_id'])
img = get_img(path)
if self.transforms:
img = self.transforms(image=img)['image']
if self.output_label == True:
return img, target
else:
return img<normalization> | test_pred_avg = test_pred_lgb*0.6 + test_pred_xgb*0.4 | Tabular Playground Series - Jan 2021 |
14,110,546 | HorizontalFlip, VerticalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90,
Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, HueSaturationValue,
IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, IAAPiecewiseAffine, RandomResizedCrop,
IAASharpen, IAAEmboss, RandomBrightnessContrast, Flip, OneOf, Compose, Normalize, Cutout, CoarseDropout,RandomCrop, ShiftScaleRotate, CenterCrop, Resize
)
def get_inference_transforms(CFG):
return Compose([
RandomCrop(512,512),
Resize(CFG['img_size'], CFG['img_size']),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0),
ToTensorV2(p=1.0),
], p=1.)
<prepare_output> | sub = smpl_sub.copy()
sub[TGT_COL] = test_pred_avg
sub.to_csv("submission.csv", index=False)
sub.head() | Tabular Playground Series - Jan 2021 |
14,191,427 | tst_preds = np.mean(tst_preds, axis=0)
<create_dataframe> | !pip install python-vivid | Tabular Playground Series - Jan 2021 |
14,191,427 | test_pred = pd.DataFrame(tst_preds)
test_pred['image_id'] = test.image_id<drop_column> | import seaborn as sns
import matplotlib.pyplot as plt
from vivid.estimators.base import MetaBlock
from vivid.estimators.boosting.mixins import BoostingEarlyStoppingMixin
from catboost import CatBoostRegressor | Tabular Playground Series - Jan 2021 |
14,191,427 | test_pred = test_pred.set_index('image_id' )<save_to_csv> | from vivid.features.base import BinningCountBlock
from vivid.features.base import CountEncodingBlock
from vivid.features.base import FilterBlock
from vivid.estimators.boosting import XGBRegressorBlock
from vivid.estimators.boosting import LGBMRegressorBlock
from vivid.estimators.boosting.block import create_boosting_seed_blocks
from vivid.estimators.linear import TunedRidgeBlock
from vivid.estimators.svm import SVRBlock
from vivid.estimators.ensumble import RFRegressorBlock
from vivid.estimators.base import EnsembleBlock, BaseBlock | Tabular Playground Series - Jan 2021 |
14,191,427 | test_pred.to_csv('vit_predictions.csv' )<define_variables> | train_df = pd.read_csv('/kaggle/input/tabular-playground-series-jan-2021/train.csv')
test_df = pd.read_csv('/kaggle/input/tabular-playground-series-jan-2021/test.csv')
sample_submission_df = pd.read_csv('/kaggle/input/tabular-playground-series-jan-2021/sample_submission.csv')
feature_columns = [
'cont1',
'cont2',
'cont3',
'cont4',
'cont5',
'cont6',
'cont7',
'cont8',
'cont9',
'cont10',
'cont11',
'cont12',
'cont13',
'cont14',
]
y = train_df['target'].values | Tabular Playground Series - Jan 2021 |
14,191,427 |
<import_modules> | class PCABlock(BaseBlock):
def __init__(self, n_components=3, columns=None, *args, **kwrgs):
self.n_components = n_components
if columns is None: columns = feature_columns
self.columns = columns
super().__init__(name='pca_n={}'.format(n_components), *args, **kwrgs)
def fit(self, source_df, y, experiment=None)-> pd.DataFrame:
clf = PCA(n_components=self.n_components)
clf.fit(source_df[self.columns].values)
self.clf_ = clf
return self.transform(source_df)
def transform(self, source_df):
z = self.clf_.transform(source_df[self.columns])
out_df = pd.DataFrame(z)
return out_df.add_prefix('PCA_')
class GaussianMixtureBlock(BaseBlock):
def __init__(self, n_components=3, columns=None, *args, **kwrgs):
self.n_components = n_components
if columns is None: columns = feature_columns
self.columns = columns
super().__init__(name='GMM_n={}'.format(n_components), *args, **kwrgs)
def fit(self, source_df, y, experiment=None)-> pd.DataFrame:
clf = GaussianMixture(n_components=self.n_components)
clf.fit(source_df[self.columns].values)
self.clf_ = clf
return self.transform(source_df)
def transform(self, source_df):
z = self.clf_.predict_proba(source_df[self.columns])
z = np.clip(z, 1e-6,1 - 1e-6)
out_df = pd.DataFrame(z)
return out_df.add_prefix('GMM_' ) | Tabular Playground Series - Jan 2021 |
14,191,427 |
<load_from_csv> | from vivid.estimators.boosting.mixins import TunedBoostingBlock
from vivid.estimators.boosting.helpers import get_boosting_parameter_suggestions
from vivid.estimators.boosting.lgbm import LGBMRegressorBlock
import lightgbm as lgbm
from vivid.runner import create_runner | Tabular Playground Series - Jan 2021 |
14,191,427 |
<categorify> | class TunedLightGBMRegressorBlock(TunedBoostingBlock):
model_class = lgbm.LGBMRegressor
default_eval_metric = 'rmse'
initial_params = LGBMRegressorBlock.initial_params
def generate_model_class_try_params(self, trial):
param = get_boosting_parameter_suggestions(trial)
param['n_jobs'] = -1
return param | Tabular Playground Series - Jan 2021 |
14,191,427 |
<load_from_csv> | feature_blocks = [
BinningCountBlock(name='BINS', column=feature_columns),
CountEncodingBlock(name='CE', column=feature_columns),
FilterBlock(name='F', column=feature_columns),
PCABlock(n_components=3),
GaussianMixtureBlock(n_components=3)
]
runner = create_runner(blocks=[
LGBMRegressorBlock(name='normal_lgbm', parent=feature_blocks),
TunedLightGBMRegressorBlock(name='tuned_lgbm', parent=feature_blocks, n_trials=50)
] ) | Tabular Playground Series - Jan 2021 |
14,191,427 | pred1 = pd.read_csv('./predictions.csv',index_col=0 ).sort_index()
pred3 = pd.read_csv('./vit_predictions.csv',index_col=0 ).sort_index()
pred1 = pred1.div(pred1.sum(axis=1),axis=0)
pred3 = pred3.div(pred3.sum(axis=1),axis=0 )<prepare_output> | oof_results = runner.fit(train_df[feature_columns], y=y ) | Tabular Playground Series - Jan 2021 |
14,191,427 | submission = 0.8*pred1 + 0.2*pred3<prepare_output> | test_results = runner.predict(test_df ) | Tabular Playground Series - Jan 2021 |
14,191,427 | submission['label'] = submission.idxmax(axis=1)
submission = submission.reset_index()
submission<save_to_csv> | oof_df = pd.DataFrame()
for result in oof_results:
oof_df[result.block.name] = result.out_df.values[:, 0] | Tabular Playground Series - Jan 2021 |
14,191,427 | submission[['image_id','label']].to_csv('submission.csv',index=False )<import_modules> | score_df = pd.DataFrame()
for name, pred in oof_df.T.iterrows() :
score_i = regression_metrics(y, pred)
score_df = score_df.append(pd.Series(score_i, name=name)) | Tabular Playground Series - Jan 2021 |
14,191,427 |
<import_modules> | score_df.sort_values('rmse' ) | Tabular Playground Series - Jan 2021 |
14,191,427 | import os
import glob
import random
import shutil
import warnings
import json
import itertools
import numpy as np
import pandas as pd
from collections import Counter
import plotly.express as px
import matplotlib.pyplot as plt
import seaborn as sns
import keras
from keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
from PIL import Image
from sklearn.model_selection import train_test_split<define_variables> | for result in test_results:
out_df = result.out_df
sub_df = sample_submission_df.copy()
sub_df['target'] = result.out_df.values[:, 0]
to = f'/kaggle/working/{str(result.block.name)}_submission.csv'
print('save to {}'.format(to))
sub_df.to_csv(to, index=False ) | Tabular Playground Series - Jan 2021 |
14,050,492 | work_dir = '.. /input/cassava-leaf-disease-classification/'
train_path = '/kaggle/input/cassava-leaf-disease-classification/train_images'<define_variables> | train = pd.read_csv(input_path / 'train.csv', index_col='id')
display(train.head() ) | Tabular Playground Series - Jan 2021 |
14,050,492 | def seed_everything(seed=0):
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
os.environ['TF_DETERMINISTIC_OPS'] = '1'
seed = 123
seed_everything(seed)
warnings.filterwarnings('ignore' )<load_from_csv> | z = np.abs(stats.zscore(train))
print(z ) | Tabular Playground Series - Jan 2021 |
14,050,492 | data = pd.read_csv(work_dir + 'train.csv')
print(data['label'].value_counts() )<categorify> | print(len(train))
train = train[(z < 3 ).all(axis=1)]
print(len(train)) | Tabular Playground Series - Jan 2021 |
14,050,492 | with open(work_dir + 'label_num_to_disease_map.json')as f:
real_labels = json.load(f)
real_labels = {int(k):v for k,v in real_labels.items() }
data['class_name'] = data['label'].map(real_labels)
real_labels<split> | test = pd.read_csv(input_path / 'test.csv', index_col='id' ) | Tabular Playground Series - Jan 2021 |
14,050,492 | train, test = train_test_split(data, test_size = 0.05, random_state = 123, stratify = data['class_name'] )<define_variables> | for df in [train, test]:
df['sum']=train.sum(axis=0)
df['min'] = train[features].min(axis=0)
df['max'] = train[features].max(axis=0)
df['mean'] = train[features].mean(axis=0)
df['median'] = train[features].median(axis=0)
df['skew'] = train[features].skew(axis=0)
df['kurtosis'] = train[features].kurtosis(axis=0)
display(test.head() ) | Tabular Playground Series - Jan 2021 |
14,050,492 | IMG_SIZE = 300
size =(IMG_SIZE,IMG_SIZE)
n_CLASS = 5
BATCH_SIZE = 15<define_variables> | submission = pd.read_csv(input_path / 'sample_submission.csv', index_col='id')
display(submission.head() ) | Tabular Playground Series - Jan 2021 |
14,050,492 | datagen_train = ImageDataGenerator(
preprocessing_function = tf.keras.applications.efficientnet.preprocess_input,
rotation_range = 40,
width_shift_range = 0.2,
height_shift_range = 0.2,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True,
vertical_flip = True,
fill_mode = 'nearest',
)
datagen_val = ImageDataGenerator(
preprocessing_function = tf.keras.applications.efficientnet.preprocess_input,
)<prepare_x_and_y> | target = train.pop('target')
X_train, X_test, y_train, y_test = train_test_split(train, target, train_size=0.90, random_state=42 ) | Tabular Playground Series - Jan 2021 |
14,050,492 | train_set = datagen_train.flow_from_dataframe(
train,
directory=train_path,
seed=123,
x_col='image_id',
y_col='class_name',
target_size = size,
class_mode='categorical',
interpolation='nearest',
shuffle = True,
batch_size = BATCH_SIZE,
)
test_set = datagen_val.flow_from_dataframe(
test,
directory=train_path,
seed=123,
x_col='image_id',
y_col='class_name',
target_size = size,
class_mode='categorical',
interpolation='nearest',
shuffle=True,
batch_size=BATCH_SIZE,
)<choose_model_class> | params={
'n_estimators': 300,
'subsample': 0.7,
'colsample_bytree': 0.7,
'eta': 0.035282124803629786,
'max_depth': 11,
'min_child_weight': 10,
'tree_method':'gpu_hist',
'random_state':42
}
model = xgb.XGBRegressor(**params)
model.fit(X_train, y_train)
preds = model.predict(X_test)
print(mean_squared_error(preds, y_test)**0.5 ) | Tabular Playground Series - Jan 2021 |
14,050,492 | def create_model() :
model = Sequential()
model.add(
EfficientNetB5(
input_shape =(IMG_SIZE, IMG_SIZE, 3),
include_top = False,
weights='imagenet',
drop_connect_rate=0.6,
)
)
model.add(GlobalAveragePooling2D())
model.add(Flatten())
model.add(Dense(
256,
activation='relu',
bias_regularizer=tf.keras.regularizers.L1L2(l1=0.01, l2=0.001)
))
model.add(Dropout(0.5))
model.add(Dense(n_CLASS, activation = 'softmax'))
return model
leaf_model = create_model()
leaf_model.summary()<define_variables> | model.fit(train, target)
submission['target'] = model.predict(test)
submission.to_csv('xgb_regression.csv' ) | Tabular Playground Series - Jan 2021 |
14,064,777 | EPOCHS = 50
STEP_SIZE_TRAIN = train_set.n // train_set.batch_size
STEP_SIZE_TEST = test_set.n // test_set.batch_size<train_model> | import plotly.express as px
import plotly.graph_objects as go
import catboost as cgb
from catboost import CatBoostRegressor,Pool
import shap
from bayes_opt import BayesianOptimization
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error | Tabular Playground Series - Jan 2021 |
14,064,777 | def model_fit() :
leaf_model = create_model()
loss = tf.keras.losses.CategoricalCrossentropy(
from_logits = False,
label_smoothing=0.0001,
name='categorical_crossentropy'
)
leaf_model.compile(
optimizer = Adam(learning_rate = 1e-3),
loss = loss,
metrics = ['categorical_accuracy']
)
es = EarlyStopping(
monitor='val_loss',
mode='min',
patience=5,
restore_best_weights=True,
verbose=1,
)
checkpoint_cb = ModelCheckpoint(
"Cassava_best_model_b5.h5",
save_best_only=True,
monitor='val_loss',
mode='min',
)
reduce_lr = ReduceLROnPlateau(
monitor='val_loss',
factor=0.2,
patience=2,
min_lr=1e-6,
mode='min',
verbose=1,
)
history = leaf_model.fit(
train_set,
validation_data=test_set,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_steps=STEP_SIZE_TEST,
callbacks=[es, checkpoint_cb, reduce_lr],
)
leaf_model.save('Cassava_best_model'+'.h5')
return history<set_options> | X=pd.read_csv('.. /input/tabular-playground-series-jan-2021/train.csv')
X.head() | Tabular Playground Series - Jan 2021 |
14,064,777 | sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(log_device_placement=True))
K.set_session(sess )<train_model> | X_test=pd.read_csv('.. /input/tabular-playground-series-jan-2021/test.csv')
X_test.head() | Tabular Playground Series - Jan 2021 |
14,064,777 | try:
final_model = keras.models.load_model('Cassava_best_model.h5')
except Exception as e:
with tf.device('/GPU:0'):
results = model_fit()
print('Train Categorical Accuracy: ', max(results.history['categorical_accuracy']))
print('Test Categorical Accuracy: ', max(results.history['val_categorical_accuracy']))<define_variables> | corr=X.corr()
corr_t=corr['target'].sort_values(ascending=False)
corr_t | Tabular Playground Series - Jan 2021 |
14,064,777 | debug = True
MODEL_DIR = '.. /input/20t-efficientnet-b3-cutmix-tta'<define_variables> | X=X.drop('id',axis=1 ) | Tabular Playground Series - Jan 2021 |
14,064,777 | OUTPUT_DIR = './'
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
TRAIN_PATH = '.. /input/cassava-leaf-disease-classification/train_images'
TEST_PATH = '.. /input/cassava-leaf-disease-classification/test_images'
assert len(glob.glob(f'{MODEL_DIR}/*.yml')) ==1
config_path = glob.glob(f'{MODEL_DIR}/*.yml')[0]<init_hyperparams> | y=X.target
X=X.drop('target',axis=1)
X_train,X_val,y_train,y_val=train_test_split(X,y,test_size=0.2,random_state=1 ) | Tabular Playground Series - Jan 2021 |
14,064,777 | with open(config_path)as f:
config = yaml.load(f)
INFO = config['info']
TAG = config['tag']
CFG = config['cfg']
CFG['train'] = False
CFG['inference'] = True
inference_batch_size = 8
<compute_train_metric> | def objective(max_depth,random_strength,learning_rate,iterations,bagging_temperature):
params={'eval_metric':'RMSE',
'verbose':False,
'loss_function':'RMSE',
'num_leaves':31,
'bootstrap_type':'Bayesian'
}
params['max_depth']=int(round(max_depth))
params['random_strength']=random_strength
params['learning_rate']=learning_rate
params['iterations']=int(round(iterations))
params['bagging_temperature']=bagging_temperature
cv_dataset=cgb.Pool(data=X,label=y)
scores=cgb.cv(cv_dataset,params,fold_count=3,)
return np.min(scores['test-RMSE-mean'])
pds={
'max_depth':(3,10),
'random_strength':(1,10),
'learning_rate':(0.01,0.3),
'iterations':(100,1000),
'bagging_temperature':(3,10)
}
optimizer=BayesianOptimization(objective,pds,random_state=2100)
optimizer.maximize(init_points=3,n_iter=7 ) | Tabular Playground Series - Jan 2021 |
14,064,777 | def get_result(result_df):
preds = result_df['preds'].values
labels = result_df['label'].values
score = get_score(labels, preds)
LOGGER.info(f'Score: {score:<.5f}')
return score
def get_aug_name(compose):
aug_list = []
for aug in compose:
aug_list.append(aug.__class__.__name__)
return aug_list
def get_aug_score(aug_preds, labels, tta_list):
for i in range(aug_preds.shape[1]):
aug_pred = aug_preds[:,i,:]
aug_list = get_aug_name(tta_list[i])
score = get_score(labels, aug_pred.argmax(1))
print(score, aug_list)
LOGGER.info(f"========== aug: {aug_list} result ==========")
LOGGER.info(f'Score: {score:<.5f}')
def get_aug_csv(aug_preds, oof_df, tta_list):
for i in range(aug_preds.shape[1]):
base_df = oof_df.copy() [['image_id', 'label', 'fold']]
aug_pred = aug_preds[:,i,:]
base_df[[str(c)for c in range(5)]] = aug_pred
base_df['preds'] = aug_pred.argmax(1)
aug_pred = aug_preds[:,i,:]
aug_list = get_aug_name(tta_list[i])
csv_name = '-'.join(aug_list)
base_df.to_csv(f'{OUTPUT_DIR}{csv_name}.csv', index=False )<load_from_csv> | ct=CatBoostRegressor(iterations=786
,learning_rate=0.08262,
max_depth=6,
num_leaves=31,
random_strength=1.427,
use_best_model=True,
eval_metric='RMSE',
verbose=False,
bagging_temperature=4.901)
ct.fit(X_train,y_train,
verbose_eval=100,
early_stopping_rounds=30,
eval_set=(X_val,y_val),
plot=True); | Tabular Playground Series - Jan 2021 |
14,064,777 | test = pd.read_csv('.. /input/cassava-leaf-disease-classification/sample_submission.csv')
test.head()<normalization> | pred=ct.predict(X_val)
print(np.sqrt(mean_squared_error(y_val,pred)) ) | Tabular Playground Series - Jan 2021 |
14,064,777 | <normalization><EOS> | prediction_file=ct.predict(X_test.drop('id',axis=1))
submission=pd.DataFrame({
"id":X_test['id'],
"target":prediction_file
})
submission.to_csv('my_submission.csv',index=False ) | Tabular Playground Series - Jan 2021 |
13,985,294 | <SOS> metric: RMSE Kaggle data source: tabular-playground-series-jan-2021<categorify> | import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold
from tqdm import tqdm | Tabular Playground Series - Jan 2021 |
13,985,294 | def get_transforms(*, aug_list):
return Compose(
_get_augmentations(aug_list)
)<choose_model_class> | train_data = pd.read_csv('.. /input/tabular-playground-series-jan-2021/train.csv')
test_data = pd.read_csv('.. /input/tabular-playground-series-jan-2021/test.csv')
features = ['cont1', 'cont2', 'cont3', 'cont4', 'cont5', 'cont6', 'cont7',
'cont8', 'cont9', 'cont10', 'cont11', 'cont12', 'cont13', 'cont14']
X_train = train_data[features]
y_train = train_data["target"]
final_X_test = test_data[features]
X_train,X_test,y_train,y_test = train_test_split(X_train, y_train, test_size=0.1, random_state=0)
regressor = xgb.XGBRegressor(
colsample_bytree=0.5,
alpha=0.01563,
learning_rate=0.01,
max_depth=15,
min_child_weight=257,
n_estimators=4000,
reg_lambda=0.003,
subsample=0.7,
random_state=2020,
metric_period=100,
silent=1)
regressor.fit(X_train, y_train, early_stopping_rounds=6, eval_set=[(X_test, y_test)], verbose=1 ) | Tabular Playground Series - Jan 2021 |
13,985,294 | class CustomModel(nn.Module):
def __init__(self, model_name, pretrained=False):
super().__init__()
self.model = timm.create_model(model_name, pretrained=pretrained)
if hasattr(self.model, 'classifier'):
n_features = self.model.classifier.in_features
self.model.classifier = nn.Linear(n_features, CFG['target_size'])
elif hasattr(self.model, 'fc'):
n_features = self.model.fc.in_features
self.model.fc = nn.Linear(n_features, CFG['target_size'])
def forward(self, x):
x = self.model(x)
return x<prepare_output> | predictions = regressor.predict(final_X_test ) | Tabular Playground Series - Jan 2021 |
13,985,294 | model = CustomModel(TAG['model_name'], pretrained=False)
model_paths = glob.glob(f'{MODEL_DIR}/*.pth')
model_paths.sort()
states = [torch.load(path)for path in model_paths]
test_dataset = TTADataset(test, TEST_PATH, ttas=ttas)
test_loader = DataLoader(test_dataset, batch_size=inference_batch_size, shuffle=False,
num_workers=2, pin_memory=True)
predictions = inference(model, states, test_loader, device)
prediction = predictions.mean(1)
test['label'] = prediction.argmax(1)
test[['image_id', 'label']].to_csv(OUTPUT_DIR+'submission.csv', index=False)
test.head()<find_best_params> | X = train_data.drop(['id','target'], axis=1)
Xtest = test_data.drop(['id'], axis=1)
y = train_data['target']
train = int(len(X)*0.9)
Xtrain, Xval = X.iloc[:train], X.iloc[train:]
ytrain, yval = y.iloc[:train], y.iloc[train:] | Tabular Playground Series - Jan 2021 |
13,985,294 | def valid_inference(model, state, test_loader, device):
model.to(device)
tk0 = tqdm(enumerate(test_loader), total=len(test_loader))
probs = []
for i,(images, labels)in tk0:
images = images.to(device)
labels = labels.to(device)
batch_size, n_crops, c, h, w = images.size()
images = images.view(-1, c, h, w)
model.load_state_dict(state['model'])
model.eval()
with torch.no_grad() :
y_preds = model(images ).softmax(1)
y_preds = y_preds.view(batch_size, n_crops,-1)
avg_preds = y_preds.to('cpu' ).numpy()
probs.append(avg_preds)
del images, labels, y_preds, avg_preds
torch.cuda.empty_cache()
probs = np.concatenate(probs)
return probs<load_from_csv> | params={'random_state': 33,'n_estimators':5000,
'min_data_per_group': 5,
'boosting_type': 'gbdt',
'num_leaves': 256,
'max_dept': -1,
'learning_rate': 0.02,
'subsample_for_bin': 200000,
'lambda_l1': 1.074622455507616e-05,
'lambda_l2': 2.0521330798729704e-06,
'n_jobs': -1,
'cat_smooth': 1.0,
'silent': True,
'importance_type': 'split',
'metric': 'rmse',
'feature_pre_filter': False,
'bagging_fraction': 0.8206341150202605,
'min_data_in_leaf': 100,
'min_sum_hessian_in_leaf': 0.001,
'bagging_freq': 6,
'feature_fraction': 0.5,
'min_gain_to_split': 0.0,
'min_child_samples': 20} | Tabular Playground Series - Jan 2021 |
13,985,294 | if debug:
train = pd.read_csv('.. /input/cassava-leaf-disease-classification/train.csv')
folds = train.copy()
Fold = StratifiedKFold(n_splits=CFG['n_fold'], shuffle=True, random_state=CFG['seed'])
for n,(train_index, val_index)in enumerate(Fold.split(folds, folds[CFG['target_col']])) :
folds.loc[val_index, 'fold'] = int(n)
folds['fold'] = folds['fold'].astype(int)
model_paths = glob.glob(f'{MODEL_DIR}/*.pth')
model_paths.sort()
states = [torch.load(path)for path in model_paths]
oof_df = pd.DataFrame()
oof_aug_preds = []
for fold, state in enumerate(states):
val_idx = folds[folds['fold'] == fold].index
valid_folds = folds.loc[val_idx].reset_index(drop=True)
valid_dataset = TTADataset(valid_folds, TRAIN_PATH, ttas=ttas)
valid_loader = DataLoader(valid_dataset,
batch_size=inference_batch_size,
shuffle=False,
num_workers=CFG['num_workers'], pin_memory=True)
valid_preds = valid_inference(model, state, valid_loader, device)
valid_pred = valid_preds.mean(1)
valid_folds[[str(c)for c in range(5)]] = valid_pred
valid_folds['preds'] = valid_pred.argmax(1)
oof_df = pd.concat([oof_df, valid_folds])
oof_aug_preds.append(valid_preds)
LOGGER.info(f"========== fold: {fold} result ==========")
_ = get_result(valid_folds)
LOGGER.info(f"========== CV result ==========")
score = get_result(oof_df)
score_rem3 = get_result(oof_df.query('fold!=3'))
oof_aug_preds = np.concatenate(oof_aug_preds )<compute_train_metric> | N_FOLDS = 5
kf = KFold(n_splits = N_FOLDS)
oof = np.zeros(len(y))
oof_vanilla = np.zeros(len(y))
preds = np.zeros(len(Xtest))
params['learning_rate'] = 0.005
params['num_iterations'] = 5000
for train_ind, test_ind in tqdm(kf.split(X)) :
Xtrain = X.iloc[train_ind]
Xval = X.iloc[test_ind]
ytrain = y.iloc[train_ind]
yval = y.iloc[test_ind]
model = LGBMRegressor(**params)
vanilla_model = LGBMRegressor()
model.fit(Xtrain, ytrain, eval_set =(( Xval,yval)) , early_stopping_rounds = 50, verbose = 0)
vanilla_model.fit(Xtrain, ytrain)
p = model.predict(Xval)
p_vanilla = vanilla_model.predict(Xval)
oof[test_ind] = p
oof_vanilla[test_ind] = p_vanilla
preds += model.predict(Xtest)/N_FOLDS
print(f'mean square error on training data(vanilla model): {np.round(mean_squared_error(y, oof_vanilla, squared=False),5)}')
print(f'mean square error on training data(with optuna tuning): {np.round(mean_squared_error(y, oof, squared=False),5)}' ) | Tabular Playground Series - Jan 2021 |
13,985,294 | if debug:
LOGGER.info(f"========== augmentation result ==========")
get_aug_score(oof_aug_preds, oof_df['label'], ttas)
get_aug_csv(oof_aug_preds, oof_df, ttas )<import_modules> | sub1 = pd.read_csv('.. /input/resultsdriven-tabular-playground-series-201/submission - 2021-01-15T023916.124.csv')
predictions1 = sub1['target'].tolist()
sub2 = pd.read_csv('.. /input/resultsdriventabularplaygroundseries2011/submission - 2021-01-16T012125.132.csv')
predictions2 = sub2['target'].tolist() | Tabular Playground Series - Jan 2021 |
13,985,294 | import numpy as np
import pandas as pd
import os
<set_options> | results = [y*1.0002 for x, y in zip(predictions1, predictions2)] | Tabular Playground Series - Jan 2021 |
13,985,294 | <import_modules><EOS> | output = pd.DataFrame({"id":test_data.id, "target":results})
output.to_csv('submission.csv', index=False ) | Tabular Playground Series - Jan 2021 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.