kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
14,271,675
__all__ = ['SENet', 'senet154', 'se_resnet50', 'se_resnet101', 'se_resnet152', 'se_resnext50_32x4d', 'se_resnext101_32x4d'] pretrained_settings = { 'senet154': { 'imagenet': { 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/senet154-c7b49a05.pth', 'input_space': 'RGB', 'input_size': [3, 224, 224], 'input_range': [0, 1], 'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225], 'num_classes': 1000 } }, 'se_resnet50': { 'imagenet': { 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet50-ce0d4300.pth', 'input_space': 'RGB', 'input_size': [3, 224, 224], 'input_range': [0, 1], 'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225], 'num_classes': 1000 } }, 'se_resnet101': { 'imagenet': { 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet101-7e38fcc6.pth', 'input_space': 'RGB', 'input_size': [3, 224, 224], 'input_range': [0, 1], 'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225], 'num_classes': 1000 } }, 'se_resnet152': { 'imagenet': { 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet152-d17c99b7.pth', 'input_space': 'RGB', 'input_size': [3, 224, 224], 'input_range': [0, 1], 'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225], 'num_classes': 1000 } }, 'se_resnext50_32x4d': { 'imagenet': { 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth', 'input_space': 'RGB', 'input_size': [3, 224, 224], 'input_range': [0, 1], 'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225], 'num_classes': 1000 } }, 'se_resnext101_32x4d': { 'imagenet': { 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext101_32x4d-3b2fe3d8.pth', 'input_space': 'RGB', 'input_size': [3, 224, 224], 'input_range': [0, 1], 'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225], 'num_classes': 1000 } }, } class SEModule(nn.Module): def __init__(self, channels, reduction): super(SEModule, self ).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1, padding=0) self.relu = nn.ReLU(inplace=True) self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1, padding=0) self.sigmoid = nn.Sigmoid() def forward(self, x): module_input = x x = self.avg_pool(x) x = self.fc1(x) x = self.relu(x) x = self.fc2(x) x = self.sigmoid(x) return module_input * x class Bottleneck(nn.Module): def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out = self.se_module(out)+ residual out = self.relu(out) return out class SEBottleneck(Bottleneck): expansion = 4 def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None): super(SEBottleneck, self ).__init__() self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes * 2) self.conv2 = nn.Conv2d(planes * 2, planes * 4, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False) self.bn2 = nn.BatchNorm2d(planes * 4) self.conv3 = nn.Conv2d(planes * 4, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.se_module = SEModule(planes * 4, reduction=reduction) self.downsample = downsample self.stride = stride class SEResNetBottleneck(Bottleneck): expansion = 4 def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None): super(SEResNetBottleneck, self ).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False, stride=stride) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, groups=groups, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.se_module = SEModule(planes * 4, reduction=reduction) self.downsample = downsample self.stride = stride class SEResNeXtBottleneck(Bottleneck): expansion = 4 def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None, base_width=4): super(SEResNeXtBottleneck, self ).__init__() width = math.floor(planes *(base_width / 64)) * groups self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, bias=False, stride=1) self.bn1 = nn.BatchNorm2d(width) self.conv2 = nn.Conv2d(width, width, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False) self.bn2 = nn.BatchNorm2d(width) self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.se_module = SEModule(planes * 4, reduction=reduction) self.downsample = downsample self.stride = stride class SENet(nn.Module): def __init__(self, block, layers, groups, reduction, dropout_p=0.2, inplanes=128, input_3x3=True, downsample_kernel_size=3, downsample_padding=1, num_classes=1000): super(SENet, self ).__init__() self.inplanes = inplanes if input_3x3: layer0_modules = [ ('conv1', nn.Conv2d(3, 64, 3, stride=2, padding=1, bias=False)) , ('bn1', nn.BatchNorm2d(64)) , ('relu1', nn.ReLU(inplace=True)) , ('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False)) , ('bn2', nn.BatchNorm2d(64)) , ('relu2', nn.ReLU(inplace=True)) , ('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1, bias=False)) , ('bn3', nn.BatchNorm2d(inplanes)) , ('relu3', nn.ReLU(inplace=True)) , ] else: layer0_modules = [ ('conv1', nn.Conv2d(3, inplanes, kernel_size=7, stride=2, padding=3, bias=False)) , ('bn1', nn.BatchNorm2d(inplanes)) , ('relu1', nn.ReLU(inplace=True)) , ] layer0_modules.append(( 'pool', nn.MaxPool2d(3, stride=2, ceil_mode=True))) self.layer0 = nn.Sequential(OrderedDict(layer0_modules)) self.layer1 = self._make_layer( block, planes=64, blocks=layers[0], groups=groups, reduction=reduction, downsample_kernel_size=1, downsample_padding=0 ) self.layer2 = self._make_layer( block, planes=128, blocks=layers[1], stride=2, groups=groups, reduction=reduction, downsample_kernel_size=downsample_kernel_size, downsample_padding=downsample_padding ) self.layer3 = self._make_layer( block, planes=256, blocks=layers[2], stride=2, groups=groups, reduction=reduction, downsample_kernel_size=downsample_kernel_size, downsample_padding=downsample_padding ) self.layer4 = self._make_layer( block, planes=512, blocks=layers[3], stride=2, groups=groups, reduction=reduction, downsample_kernel_size=downsample_kernel_size, downsample_padding=downsample_padding ) self.avg_pool = nn.AvgPool2d(7, stride=1) self.dropout = nn.Dropout(dropout_p)if dropout_p is not None else None self.last_linear = nn.Linear(512 * block.expansion, num_classes) def _make_layer(self, block, planes, blocks, groups, reduction, stride=1, downsample_kernel_size=1, downsample_padding=0): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=downsample_kernel_size, stride=stride, padding=downsample_padding, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, groups, reduction, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes, groups, reduction)) return nn.Sequential(*layers) def features(self, x): x = self.layer0(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) return x def logits(self, x): x = self.avg_pool(x) if self.dropout is not None: x = self.dropout(x) x = x.view(x.size(0), -1) x = self.last_linear(x) return x def forward(self, x): x = self.features(x) x = self.logits(x) return x def initialize_pretrained_model(model, num_classes, settings): assert num_classes == settings['num_classes'], \ 'num_classes should be {}, but is {}'.format( settings['num_classes'], num_classes) model.load_state_dict(model_zoo.load_url(settings['url'])) model.input_space = settings['input_space'] model.input_size = settings['input_size'] model.input_range = settings['input_range'] model.mean = settings['mean'] model.std = settings['std'] def senet154(num_classes=1000, pretrained='imagenet'): model = SENet(SEBottleneck, [3, 8, 36, 3], groups=64, reduction=16, dropout_p=0.2, num_classes=num_classes) if pretrained is not None: settings = pretrained_settings['senet154'][pretrained] initialize_pretrained_model(model, num_classes, settings) return model def se_resnet50(num_classes=1000, pretrained='imagenet'): model = SENet(SEResNetBottleneck, [3, 4, 6, 3], groups=1, reduction=16, dropout_p=None, inplanes=64, input_3x3=False, downsample_kernel_size=1, downsample_padding=0, num_classes=num_classes) if pretrained is not None: settings = pretrained_settings['se_resnet50'][pretrained] initialize_pretrained_model(model, num_classes, settings) return model def se_resnet101(num_classes=1000, pretrained='imagenet'): model = SENet(SEResNetBottleneck, [3, 4, 23, 3], groups=1, reduction=16, dropout_p=None, inplanes=64, input_3x3=False, downsample_kernel_size=1, downsample_padding=0, num_classes=num_classes) if pretrained is not None: settings = pretrained_settings['se_resnet101'][pretrained] initialize_pretrained_model(model, num_classes, settings) return model def se_resnet152(num_classes=1000, pretrained='imagenet'): model = SENet(SEResNetBottleneck, [3, 8, 36, 3], groups=1, reduction=16, dropout_p=None, inplanes=64, input_3x3=False, downsample_kernel_size=1, downsample_padding=0, num_classes=num_classes) if pretrained is not None: settings = pretrained_settings['se_resnet152'][pretrained] initialize_pretrained_model(model, num_classes, settings) return model def se_resnext50_32x4d(num_classes=1000, pretrained='imagenet'): model = SENet(SEResNeXtBottleneck, [3, 4, 6, 3], groups=32, reduction=16, dropout_p=None, inplanes=64, input_3x3=False, downsample_kernel_size=1, downsample_padding=0, num_classes=num_classes) if pretrained is not None: settings = pretrained_settings['se_resnext50_32x4d'][pretrained] initialize_pretrained_model(model, num_classes, settings) return model def se_resnext101_32x4d(num_classes=1000, pretrained='imagenet'): model = SENet(SEResNeXtBottleneck, [3, 4, 23, 3], groups=32, reduction=16, dropout_p=None, inplanes=64, input_3x3=False, downsample_kernel_size=1, downsample_padding=0, num_classes=num_classes) if pretrained is not None: settings = pretrained_settings['se_resnext101_32x4d'][pretrained] initialize_pretrained_model(model, num_classes, settings) return model<normalization>
scaler = StandardScaler() Xs_train = scaler.fit_transform(X_train) Xs_test = scaler.transform(X_test )
Titanic - Machine Learning from Disaster
14,271,675
sys.path.append('/kaggle/working/') class GeM(nn.Module): def __init__(self, p=3, eps=1e-6): super(GeM,self ).__init__() self.p = Parameter(torch.ones(1)*p) self.eps = eps def forward(self, x): return gem(x, p=self.p, eps=self.eps) def __repr__(self): return self.__class__.__name__ + '(' + 'p=' + '{:.4f}'.format(self.p.data.tolist() [0])+ ', ' + 'eps=' + str(self.eps)+ ')' def gem(x, p=3, eps=1e-6): return F.avg_pool2d(x.clamp(min=eps ).pow(p),(x.size(-2), x.size(-1)) ).pow(1./p) def get_se_resnet50_gem(pretrain): if pretrain == 'imagenet': model = se_resnet50(num_classes=1000, pretrained='imagenet') else: model = se_resnet50(num_classes=1000, pretrained=None) model.avg_pool = GeM() model.last_linear = nn.Linear(2048, 1) return model def get_densenet121_gem(pretrain): if pretrain == 'imagenet': model = densenet121(num_classes=1000, pretrained='imagenet') else: model = densenet121(num_classes=1000, pretrained=None) model.avg_pool = GeM() model.last_linear = nn.Linear(1024, 1) return model<set_options>
logreg = LogisticRegression() logreg.fit(Xs_train, y_train )
Titanic - Machine Learning from Disaster
14,271,675
TEST_IMAGE_PATH = '/kaggle/input/aptos2019-blindness-detection/test_images' device = torch.device("cuda") test_images = glob(os.path.join(TEST_IMAGE_PATH, '*.png')) <init_hyperparams>
Y_pred=logreg.predict(Xs_test )
Titanic - Machine Learning from Disaster
14,271,675
def make_predictions(model, test_images, transforms, size=256, device=torch.device("cuda")) : predictions = [] for i, im_path in enumerate(test_images): image = Image.open(im_path) image = image.resize(( size, size), resample=Image.BILINEAR) image = transforms(image ).to(device) output = model(image.unsqueeze(0)) output_flip = model(torch.flip(image.unsqueeze(0), dims=(3,))) final_prediction =(output.item() +output_flip.item())/2 predictions.append(( os.path.splitext(im_path.split('/')[-1])[0], final_prediction)) return predictions<load_pretrained>
logreg.score(Xs_train, y_train )
Titanic - Machine Learning from Disaster
14,271,675
MODEL_PATH = '.. /input/densenet121/model_densenet121_bs64_30.pth' model = get_densenet121_gem(pretrain=False) model.to(device) model.load_state_dict(torch.load(MODEL_PATH, map_location='cuda:0')) model.eval() norm = transforms.Compose([transforms.ToTensor() ]) <choose_model_class>
submission = pd.DataFrame({ "PassengerId": test["PassengerId"], "Survived": Y_pred }) submission.to_csv('submission2_LG.csv', index=False )
Titanic - Machine Learning from Disaster
14,271,675
MODEL_PATH = '.. /input/seresnet50testpseudo/model10.pth' model = get_se_resnet50_gem(pretrain=False) model.to(device) model.load_state_dict(torch.load(MODEL_PATH, map_location='cuda:0')) model.eval() norm = transforms.Compose([transforms.ToTensor() , transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) <choose_model_class>
lr_pipe2 = Pipeline([ ('sscaler2', StandardScaler()), ('logreg2', LogisticRegression(penalty='l1', C=0.1, solver='liblinear')) ])
Titanic - Machine Learning from Disaster
14,271,675
MODEL_PATH = '.. /input/seresnet50pseudo-512/model30.pth' model = get_se_resnet50_gem(pretrain=False) model.to(device) model.load_state_dict(torch.load(MODEL_PATH, map_location='cuda:0')) model.eval() norm = transforms.Compose([transforms.ToTensor() , transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) predictions_seresnet_512 = make_predictions(model, test_images, norm, size=512, device=torch.device("cuda"))<define_variables>
pipe_2_params = {'sscaler2__with_mean': [True, False], 'sscaler2__with_std': [True, False], 'logreg2__C': [0.1, 0.2,0.3], 'logreg2__solver':['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'], 'logreg2__fit_intercept': [True, False], 'logreg2__penalty': ['l1', 'l2']}
Titanic - Machine Learning from Disaster
14,271,675
final_predictions = predictions_seresnet_512 <save_to_csv>
pipe_2_gridsearch = GridSearchCV(lr_pipe2, pipe_2_params, cv=5, verbose=1 )
Titanic - Machine Learning from Disaster
14,271,675
submission = pd.DataFrame(final_predictions) submission.columns = ['id_code','diagnosis'] submission.loc[submission.diagnosis < 0.75, 'diagnosis'] = 0 submission.loc[(0.75 <= submission.diagnosis)&(submission.diagnosis < 1.5), 'diagnosis'] = 1 submission.loc[(1.5 <= submission.diagnosis)&(submission.diagnosis < 2.5), 'diagnosis'] = 2 submission.loc[(2.5 <= submission.diagnosis)&(submission.diagnosis < 3.5), 'diagnosis'] = 3 submission.loc[3.5 <= submission.diagnosis, 'diagnosis'] = 4 submission['diagnosis'] = submission['diagnosis'].astype(int) submission.to_csv('submission.csv', index=False) submission.head()<install_modules>
pipe_2_gridsearch.fit(X_train, y_train);
Titanic - Machine Learning from Disaster
14,271,675
!pip install.. /input/weights/timm-0.3.1-py3-none-any.whl<import_modules>
pipe_2_gridsearch.best_score_
Titanic - Machine Learning from Disaster
14,271,675
device = "cuda:0" <import_modules>
pipe_2_gridsearch.best_estimator_
Titanic - Machine Learning from Disaster
14,271,675
FeaturePyramidNetwork, LastLevelMaxPool, ) def gem(x, p=3, eps=1e-6): return F.avg_pool2d(x.clamp(min=eps ).pow(p),(x.size(-2), x.size(-1)) ).pow(1./p) class GeM(nn.Module): def __init__(self, p=3, eps=1e-6, flatten=False): super(GeM,self ).__init__() self.p = Parameter(torch.ones(1)*p) self.eps = eps self.flatten = flatten def forward(self, x): x = gem(x, p=self.p, eps=self.eps) if self.flatten: x = x.flatten(1) return x def __repr__(self): return self.__class__.__name__ + '(' + 'p=' + '{:.4f}'.format(self.p.data.tolist() [0])+ ', ' + 'eps=' + str(self.eps)+ ')' class FrozenBatchNorm2d(torch.nn.Module): def __init__( self, num_features: int, eps: float = 1e-5, n: Optional[int] = None, ): if n is not None: warnings.warn("`n` argument is deprecated and has been renamed `num_features`", DeprecationWarning) num_features = n super(FrozenBatchNorm2d, self ).__init__() self.eps = eps self.register_buffer("weight", torch.ones(num_features)) self.register_buffer("bias", torch.zeros(num_features)) self.register_buffer("running_mean", torch.zeros(num_features)) self.register_buffer("running_var", torch.ones(num_features)) def _load_from_state_dict( self, state_dict: dict, prefix: str, local_metadata: dict, strict: bool, missing_keys: List[str], unexpected_keys: List[str], error_msgs: List[str], ): num_batches_tracked_key = prefix + 'num_batches_tracked' if num_batches_tracked_key in state_dict: del state_dict[num_batches_tracked_key] super(FrozenBatchNorm2d, self )._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) def forward(self, x: Tensor)-> Tensor: w = self.weight.reshape(1, -1, 1, 1) b = self.bias.reshape(1, -1, 1, 1) rv = self.running_var.reshape(1, -1, 1, 1) rm = self.running_mean.reshape(1, -1, 1, 1) scale = w *(rv + self.eps ).rsqrt() bias = b - rm * scale return x * scale + bias def __repr__(self)-> str: return f"{self.__class__.__name__}({self.weight.shape[0]}, eps={self.eps})" class Linear(nn.Linear): def forward(self, input: torch.Tensor)-> torch.Tensor: if torch.jit.is_scripting() : bias = self.bias.to(dtype=input.dtype)if self.bias is not None else None return F.linear(input, self.weight.to(dtype=input.dtype), bias=bias) else: return F.linear(input, self.weight, self.bias) class backboneNet_efficient(nn.Module): def __init__(self): super(backboneNet_efficient, self ).__init__() net = timm.create_model('tf_efficientnet_b4_ns', pretrained=False) layers_to_train = ['blocks'] for name, parameter in net.named_parameters() : if all([not name.startswith(layer)for layer in layers_to_train]): parameter.requires_grad_(False) self.num_features = 1792 self.conv_stem = net.conv_stem self.bn1 = net.bn1 self.act1 = net.act1 self.block0 = net.blocks[0] self.block1 = net.blocks[1] self.block2 = net.blocks[2] self.block3 = net.blocks[3] self.block4 = net.blocks[4] self.block5 = net.blocks[5] self.block6 = net.blocks[6] self.conv_head = net.conv_head self.bn2 = net.bn2 self.act2 = net.act2 self.global_pool = net.global_pool self.drop_rate = 0.4 self.rg_cls = Linear(self.num_features, 1, bias=True) self.cls_cls = Linear(self.num_features, 5, bias=True) self.ord_cls = Linear(self.num_features, 4, bias=True) def forward(self, x): x1 = self.conv_stem(x) x2 = self.bn1(x1) x3 = self.act1(x2) x4 = self.block0(x1) x5 = self.block1(x4) x6 = self.block2(x5) x7 = self.block3(x6) x8 = self.block4(x7) x9 = self.block5(x8) x10 = self.block6(x9) x11 = self.conv_head(x10) x12 = self.bn2(x11) x13 = self.act2(x12) x14 = self.global_pool(x13) if self.drop_rate > 0.: x14 = F.dropout(x14, p=self.drop_rate, training=self.training) x15 = self.rg_cls(x14) x16 = self.cls_cls(x14) x17 = self.ord_cls(x14) return x15, x16, x17 <find_best_params>
pre = pipe_2_gridsearch.predict(X_test )
Titanic - Machine Learning from Disaster
14,271,675
threshold = [0.75, 1.5, 2.5, 3.5] def regress2class(out): prediction = 0 for i in range(4): prediction +=(out.data >= threshold[i] ).squeeze().cpu().item() return prediction def ordinal2class_prob(out): pred_prob = torch.zeros(out.size(0), 5 ).cuda() pred_prob[:, 0] =(1 - out[:, 0] ).squeeze() pred_prob[:, 1] =(out[:, 0] *(1 - out[:, 1])).squeeze() pred_prob[:, 2] =(out[:, 1] *(1 - out[:, 2])).squeeze() pred_prob[:, 3] =(out[:, 2] *(1 - out[:, 3])).squeeze() pred_prob[:, 4] = out[:, 3].squeeze().cpu() return F.softmax(pred_prob, dim=1) def regress2class_prob(out): pred_prob = torch.zeros(( out.size(0), 5)).cuda() for i in range(out.size(0)) : if out[i] < 4.: l1 = int(math.floor(out[i])) l2 = int(math.ceil(out[i])) pred_prob[i][l1] = 1 -(out[i] - l1) pred_prob[i][l2] = 1 -(l2 - out[i]) else: pred_prob[i][4] = 1. return pred_prob def combine3output(r_out, c_out, o_out): R = regress2class(r_out.data) _, C = torch.max(c_out.data, 1) C = C.squeeze().item() _, O = torch.max(o_out.data, 1) O = O.squeeze().item() P =(R + C + O)/ 3. P = int(round(P)) return P<normalization>
submission = pd.DataFrame({ "PassengerId": test["PassengerId"], "Survived": pre }) submission.to_csv('submission1_LG_pipline.csv', index=False )
Titanic - Machine Learning from Disaster
14,271,675
def gem(x, p=3, eps=1e-6): return F.avg_pool2d(x.clamp(min=eps ).pow(p),(x.size(-2), x.size(-1)) ).pow(1./p) class GeM(nn.Module): def __init__(self, p=3, eps=1e-6, flatten=False): super(GeM,self ).__init__() self.p = Parameter(torch.ones(1)*p) self.eps = eps self.flatten = flatten def forward(self, x): x = gem(x, p=self.p, eps=self.eps) if self.flatten: x = x.flatten(1) return x def __repr__(self): return self.__class__.__name__ + '(' + 'p=' + '{:.4f}'.format(self.p.data.tolist() [0])+ ', ' + 'eps=' + str(self.eps)+ ')' class Regressor(nn.Module): def __init__(self): super(Regressor, self ).__init__() self.backbone = timm.models.tf_efficientnet_b5_ns(pretrained=False) self.backbone.global_pool = GeM(flatten=True) self.regressor = nn.Linear(1000, 1) def forward(self, x): x = self.backbone(x) out = self.regressor(x) out = torch.sigmoid(out)* 4.5 return out class ThreeStage_Model(nn.Module): def __init__(self, backbone=None): super(ThreeStage_Model, self ).__init__() self.backbone = timm.models.tf_efficientnet_b4_ns(pretrained=False) self.backbone.global_pool = GeM(flatten=True) self.classifier = nn.Sequential( nn.SiLU() , nn.Linear(1000, 500), nn.SiLU() , nn.Linear(500, 5), ) self.regressor = nn.Sequential( nn.SiLU() , nn.Linear(1000, 500), nn.SiLU() , nn.Linear(500, 1), ) self.ordinal = nn.Sequential( nn.SiLU() , nn.Linear(1000, 500), nn.SiLU() , nn.Linear(500, 4), ) self.final_regressor = nn.Sequential( nn.SiLU() , nn.Linear(10, 1), ) def forward(self, x, final=False): x = self.backbone(x) c_out = self.classifier(x) r_out = self.regressor(x) o_out = self.ordinal(x) if final: out = torch.cat(( c_out, r_out, o_out), 1) out = self.final_regressor(out) out = torch.sigmoid(out)* 4.5 return out else: r_out = torch.sigmoid(r_out)* 4.5 o_out = torch.sigmoid(o_out) return c_out, r_out, o_out<load_from_csv>
lr_pipe2 = Pipeline([ ('sscaler2', StandardScaler()), ('knn', KNeighborsClassifier()) ]) pipe_2_params = {'sscaler2__with_mean': [True, False], 'sscaler2__with_std': [True, False], 'knn__n_neighbors': [3, 5, 7, 9, 11, 20, 50, 100], 'knn__weights': ['uniform', 'distance'], 'knn__metric': ['manhattan', 'euclidean']} pipe_2_gridsearch = GridSearchCV(lr_pipe2, pipe_2_params, cv=5, verbose=1 , scoring='roc_auc' )
Titanic - Machine Learning from Disaster
14,271,675
test_ids = pd.read_csv('.. /input/aptos2019-blindness-detection/test.csv') test_ids = np.squeeze(test_ids.values) transform1 = transforms.Compose([ trim() , cropTo4_3() , transforms.Resize(( 288, 384)) , transforms.ToTensor() , transforms.Normalize(mean=[0.384, 0.258, 0.174], std=[0.124, 0.089, 0.094]), ]) transform2 = transforms.Compose([ transforms.Resize(( 280,280)) , transforms.CenterCrop(256), transforms.ToTensor() , transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) net1 = ThreeStage_Model() net1.load_state_dict(torch.load(".. /input/weights/0.926_B4_3stage_5epoch_320finetune.pkl")) net1 = net1.to(device) net1.eval() net2 = backboneNet_efficient() net2.load_state_dict(torch.load(".. /input/weights/1.pth", map_location='cpu')) net2 = net2.to(device) net2.eval() net3 = backboneNet_efficient() net3.load_state_dict(torch.load(".. /input/weights/2.pth", map_location='cpu')) net3 = net3.to(device) net3.eval() net5 = backboneNet_efficient() net5.load_state_dict(torch.load(".. /input/weights/0.929292_4", map_location='cpu')) net5 = net5.to(device) net5.eval()<save_to_csv>
pipe_2_gridsearch.fit(X_train, y_train);
Titanic - Machine Learning from Disaster
14,271,675
df = pd.DataFrame(submission, columns=["id_code", "diagnosis"]) df.to_csv("submission.csv", index=False )<import_modules>
y_pre_GS_knn = pipe_2_gridsearch.predict(X_test )
Titanic - Machine Learning from Disaster
14,271,675
from __future__ import print_function, absolute_import import os import sys import time import datetime import argparse import os.path as osp import numpy as np import random from PIL import Image import tqdm import cv2 import csv import math import torchvision as tv import torchvision import torch.nn.functional as F import torch.optim as optim import torch import torch.nn as nn import torch.backends.cudnn as cudnn from sklearn.metrics import f1_score from torch.utils.data import DataLoader from torch.autograd import Variable from torch.optim import lr_scheduler from tqdm import tqdm from torch.utils.data import Dataset import torchvision.transforms as transforms from tensorboardX import SummaryWriter<load_from_csv>
pipe_2_gridsearch.best_score_
Titanic - Machine Learning from Disaster
14,271,675
name_file='.. /input/aptos2019-blindness-detection/test.csv' csv_file=csv.reader(open(name_file,'r')) content=[] for line in csv_file: content.append(line[0]+'.png') content=content[1:]<normalization>
pipe_2_gridsearch.best_estimator_
Titanic - Machine Learning from Disaster
14,271,675
def gem(x, p=3, eps=1e-6): return F.avg_pool2d(x.clamp(min=eps ).pow(p),(x.size(-2), x.size(-1)) ).pow(1./p) class GeM(nn.Module): def __init__(self, p=3, eps=1e-6): super(GeM,self ).__init__() self.p = Parameter(torch.ones(1)*p) self.eps = eps def forward(self, x): return gem(x, p=self.p, eps=self.eps) def __repr__(self): return self.__class__.__name__ + '(' + 'p=' + '{:.4f}'.format(self.p.data.tolist() [0])+ ', ' + 'eps=' + str(self.eps)+ ')' class Baseline_single_gem(nn.Module): def __init__(self, num_classes, loss_type="single BCE", **kwargs): super(Baseline_single_gem, self ).__init__() self.loss_type = loss_type resnet50 = torchvision.models.densenet201(pretrained=False) self.base = nn.Sequential(*list(resnet50.children())[:-1]) self.feature_dim = 1920 if self.loss_type == "single BCE": self.ap = GeM() self.classifiers = nn.Linear(in_features=self.feature_dim, out_features=num_classes) self.sigmoid = nn.Sigmoid() self.dropout=nn.Dropout(0.5) self.cal_score=nn.Linear(in_features=num_classes, out_features=1) def freeze_base(self): for p in self.base.parameters() : p.requires_grad = False def unfreeze_all(self): for p in self.parameters() : p.requires_grad = True def forward(self, x1): x = self.base(x1) map_feature=torch.tensor(x) if self.loss_type == "single BCE": x = self.ap(x) x = self.dropout(x) x = x.view(x.size(0), -1) feat_m=torch.tensor(x) ys = self.classifiers(x) return ys class Baseline_single(nn.Module): def __init__(self, num_classes, loss_type="single BCE", **kwargs): super(Baseline_single, self ).__init__() self.loss_type = loss_type resnet50 = torchvision.models.densenet201(pretrained=False) self.base = nn.Sequential(*list(resnet50.children())[:-1]) self.feature_dim = 1920 if self.loss_type == "single BCE": self.ap = nn.AdaptiveAvgPool2d(1) self.classifiers = nn.Linear(in_features=self.feature_dim, out_features=num_classes) self.sigmoid = nn.Sigmoid() self.dropout=nn.Dropout(0.5) self.cal_score=nn.Linear(in_features=num_classes, out_features=1) def freeze_base(self): for p in self.base.parameters() : p.requires_grad = False def unfreeze_all(self): for p in self.parameters() : p.requires_grad = True def forward(self, x1): x = self.base(x1) map_feature=torch.tensor(x) if self.loss_type == "single BCE": x = self.ap(x) x = self.dropout(x) x = x.view(x.size(0), -1) feat_m=torch.tensor(x) ys = self.classifiers(x) return ys<train_on_grid>
submission = pd.DataFrame({ "PassengerId": test["PassengerId"], "Survived": y_pre_GS_knn }) submission.to_csv('submission2_GS_knn.csv', index=False )
Titanic - Machine Learning from Disaster
14,271,675
def cv_imread(file_path): cv_img=cv2.imdecode(np.fromfile(file_path,dtype=np.uint8),-1) return cv_img def change_size(image): b=cv2.threshold(image,15,255,cv2.THRESH_BINARY) binary_image=b[1] binary_image=cv2.cvtColor(binary_image,cv2.COLOR_BGR2GRAY) print(binary_image.shape) x=binary_image.shape[0] print("高度x=",x) y=binary_image.shape[1] print("宽度y=",y) edges_x=[] edges_y=[] for i in range(x): for j in range(y): if binary_image[i][j]==255: edges_x.append(i) edges_y.append(j) left=min(edges_x) right=max(edges_x) width=right-left bottom=min(edges_y) top=max(edges_y) height=top-bottom pre1_picture=image[left:left+width,bottom:bottom+height] return pre1_picture def crop_image1(img,tol=7): mask = img>tol return img[np.ix_(mask.any(1),mask.any(0)) ] def crop_image_from_gray(img,tol=7): if img.ndim ==2: mask = img>tol return img[np.ix_(mask.any(1),mask.any(0)) ] elif img.ndim==3: gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) mask = gray_img>tol check_shape = img[:,:,0][np.ix_(mask.any(1),mask.any(0)) ].shape[0] if(check_shape == 0): return img else: img1=img[:,:,0][np.ix_(mask.any(1),mask.any(0)) ] img2=img[:,:,1][np.ix_(mask.any(1),mask.any(0)) ] img3=img[:,:,2][np.ix_(mask.any(1),mask.any(0)) ] img = np.stack([img1,img2,img3],axis=-1) return img def load_ben_color(image, sigmaX=10): image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image = crop_image_from_gray(image) image = cv2.resize(image,(492, 492)) image=cv2.addWeighted(image,4, cv2.GaussianBlur(image ,(0,0), sigmaX),-4 ,128) return image def findCircle(image): hsv_img=cv2.cvtColor(image,cv2.COLOR_BGR2HSV) h_img=image[:,:,0] s_img=image[:,:,1] v_img=image[:,:,2] height,width=v_img.shape mask_v_a=cv2.adaptiveThreshold(v_img,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,int(max(height,width)/16)*2+1,1) ratio=128/min(height,width) msk=cv2.resize(mask_v_a,(int(width*ratio),int(height*ratio)) ,interpolation=cv2.INTER_CUBIC) h,w=msk.shape msk_expand=np.zeros(( 3*h,3*w),np.uint8) msk_expand[h:2*h,w:2*w]=msk long_edge=max(h,w) r0=round(0.3*long_edge) r1=round(0.7*long_edge) circles=cv2.HoughCircles(msk_expand,cv2.HOUGH_GRADIENT,1,90,param1 = 50,param2 = 5,minRadius = r0,maxRadius = r1) if circles is None: c_x=width/2 c_y=height/2 radius=0.55*max(height,width) else: circles = np.uint16(np.around(circles)) c_x=(circles[0,0,0]-w)/ratio c_y=(circles[0,0,1]-h)/ratio radius=circles[0,0,2]/ratio return c_x,c_y,radius def circleCrop(c_x,c_y,radius,height,width): if math.floor(radius+c_y)>height: y0=max(math.ceil(c_y-radius),0); y1=height; if math.floor(radius+c_x)>width: x1=width else: x1=math.floor(radius+c_x) if math.floor(c_x-radius<0): x0=0 else: x0=math.floor(c_x-radius) elif math.ceil(c_y-radius)<0: y0=0 y1=min(math.floor(c_y+radius),height) if math.floor(radius+c_x)>width: x1=width else: x1=math.floor(radius+c_x) if math.floor(c_x-radius<0): x0=0 else: x0=math.floor(c_x-radius) else: y0=math.ceil(c_y-radius) y1=math.floor(c_y+radius) x0=math.ceil(c_x-radius) x1=math.floor(c_x+radius) return x0,x1,y0,y1 def trimFundus(image): c_x,c_y,radius=findCircle(image) height=image.shape[0] width=image.shape[1] x0,x1,y0,y1=circleCrop(c_x,c_y,radius,height,width) trimed=image[y0:y1,x0:x1,:] return trimed def load_ben_yuan(image,sigmaX=10): image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image = crop_image_from_gray(image) image = cv2.resize(image,(512, 512)) return image PARAM = 92 def Radius_Reduction(img,PARAM): h,w,c=img.shape Frame=np.zeros(( h,w,c),dtype=np.uint8) cv2.circle(Frame,(int(math.floor(w/2)) ,int(math.floor(h/2))),int(math.floor(( h*PARAM)/float(2*100))),(255,255,255), -1) Frame1=cv2.cvtColor(Frame, cv2.COLOR_BGR2GRAY) img1 =cv2.bitwise_and(img,img,mask=Frame1) return img1 def info_image(im): cy = im.shape[0]//2 midline = im[cy,:] midline = np.where(midline>midline.mean() /3)[0] if len(midline)>im.shape[1]//2: x_start, x_end = np.min(midline), np.max(midline) else: x_start, x_end = im.shape[1]//10, 9*im.shape[1]//10 cx =(x_start + x_end)/2 r =(x_end - x_start)/2 return cx, cy, r def resize_image(im, img_size, augmentation=False): cx, cy, r = info_image(im) scaling = img_size/(2*r) rotation = 0 if augmentation: scaling *= 1 + 0.3 *(np.random.rand() -0.5) rotation = 360 * np.random.rand() M = cv2.getRotationMatrix2D(( cx,cy), rotation, scaling) M[0,2] -= cx - img_size/2 M[1,2] -= cy - img_size/2 return cv2.warpAffine(im, M,(img_size, img_size)) def subtract_median_bg_image(im): k = np.max(im.shape)//20*2+1 bg = cv2.medianBlur(im, k) return cv2.addWeighted(im, 4, bg, -4, 128) def subtract_gaussian_bg_image(im): bg = cv2.GaussianBlur(im ,(0,0), 10) return cv2.addWeighted(im, 4, bg, -4, 128) def open_img(fn, size): "Open image in `fn`, subclass and overwrite for custom behavior." image = cv2.imread(fn) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image = resize_image(image, size) image = subtract_gaussian_bg_image(image) image = Radius_Reduction(image, PARAM) image = crop_image_from_gray(image) image = cv2.resize(image,(512,512)) return image def get_preds(arr): mask = arr == 0 return np.clip(np.where(mask.any(1), mask.argmax(1), 5)- 1, 0, 4) cnt_t=0 class eye_dataset(Dataset): def __init__(self, txt_path,transform=None,transform2=None): imgs = [] for img in txt_path: imgs.append(img) self.imgs = imgs self.transform = transform self.transform2 = transform2 def __getitem__(self, index): fn= self.imgs[index] img=cv2.imread('/kaggle/input/aptos2019-blindness-detection/test_images/'+fn) img_copy=img.copy() img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img_copy=img.copy() try: img=trimFundus(img) img = cv2.resize(img,(512, 512)) except Exception as e: print(e) img = crop_image_from_gray(img_copy) img = cv2.resize(img,(512, 512)) img = Image.fromarray(img) if self.transform is not None: img1 = self.transform(img) img2 = self.transform2(img) return img1,img2, fn[:-4] def __len__(self): return len(self.imgs) class eye_dataset_circle(Dataset): def __init__(self, txt_path,transform=None): imgs = [] for img in txt_path: imgs.append(img) self.imgs = imgs self.transform = transform def __getitem__(self, index): fn= self.imgs[index] img=open_img('/kaggle/input/aptos2019-blindness-detection/test_images/'+fn,530) img = Image.fromarray(img) if self.transform is not None: img = self.transform(img) return img, fn[:-4] def __len__(self): return len(self.imgs) class eye_dataset_orl(Dataset): def __init__(self, txt_path,transform=None,transform2=None,transform3=None,transform4=None): imgs = [] for img in txt_path: imgs.append(img) self.imgs = imgs self.transform = transform self.transform2 = transform2 self.transform3 = transform3 self.transform4 = transform4 def __getitem__(self, index): fn= self.imgs[index] img=cv2.imread('/kaggle/input/aptos2019-blindness-detection/test_images/'+fn) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img=crop_image_from_gray(img) img3_=open_img('/kaggle/input/aptos2019-blindness-detection/test_images/'+fn,530) img4_=open_img('/kaggle/input/aptos2019-blindness-detection/test_images/'+fn,530) img3_ = Image.fromarray(img3_) img4_ = Image.fromarray(img4_) img=cv2.resize(img,(512, 512)) img = Image.fromarray(img) if self.transform is not None: img1 = self.transform(img) img2 = self.transform2(img) img3 = self.transform3(img3_) img4 = self.transform4(img4_) return img1,img2,img3,img4, fn[:-4] def __len__(self): return len(self.imgs )<categorify>
knn = KNeighborsClassifier()
Titanic - Machine Learning from Disaster
14,271,675
def load_para_dict(model1): state_dict_1=torch.load(model1) new_state_dict = OrderedDict() for k, v in state_dict_1.items() : if 'module' in k: name = k[7:] else: name=k new_state_dict[name] = v return new_state_dict<set_options>
knn.fit(Xs_train, y_train )
Titanic - Machine Learning from Disaster
14,271,675
%reload_ext autoreload %autoreload 2 %matplotlib inline <import_modules>
knn.score(Xs_train, y_train )
Titanic - Machine Learning from Disaster
14,271,675
from fastai import * from fastai.vision import * import pandas as pd import matplotlib.pyplot as plt<set_options>
cross_val_score(knn, Xs_train, y_train, cv=5 ).mean()
Titanic - Machine Learning from Disaster
14,271,675
print('Make sure cudnn is enabled:', torch.backends.cudnn.enabled )<define_variables>
pre=knn.predict(X_test )
Titanic - Machine Learning from Disaster
14,271,675
PATH = Path('.. /input/aptos2019-blindness-detection' )<load_from_csv>
submission = pd.DataFrame({ "PassengerId": test["PassengerId"], "Survived": pre }) submission.to_csv('submission2_KNN.csv', index=False )
Titanic - Machine Learning from Disaster
14,271,675
df = pd.read_csv(PATH/'train.csv') df.head()<set_options>
model=RandomForestClassifier() param={'n_estimators':[100,200,300], 'max_depth':[1,3,5,7], 'criterion':["gini"], 'max_features': [1,3,5], "min_samples_split": [2,3,5] } clf=GridSearchCV(estimator=model, param_grid=param, scoring="accuracy", verbose=1, n_jobs=-1, cv=5) clf.fit(X_train, y_train) clf.best_estimator_ clf.best_score_
Titanic - Machine Learning from Disaster
14,271,675
def seed_everything(seed): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = True SEED = 999 seed_everything(SEED )<feature_engineering>
pre=clf.predict(X_test)
Titanic - Machine Learning from Disaster
14,271,675
base_image_dir = os.path.join('.. ', 'input/aptos2019-blindness-detection/') train_dir = os.path.join(base_image_dir,'train_images/') df = pd.read_csv(os.path.join(base_image_dir, 'train.csv')) df['path'] = df['id_code'].map(lambda x: os.path.join(train_dir,'{}.png'.format(x))) df = df.drop(columns=['id_code']) df = df.sample(frac=1 ).reset_index(drop=True) df.head(10 )<define_variables>
submission = pd.DataFrame({ "PassengerId": test["PassengerId"], "Survived": pre }) submission.to_csv('submission2_RF_GS.csv', index=False )
Titanic - Machine Learning from Disaster
14,271,675
len_df = len(df) len_df<set_options>
lr_pipe2 = Pipeline([ ('sscaler2', StandardScaler()), ('rf', RandomForestClassifier() ) ]) pipe_2_params = {'sscaler2__with_mean': [True, False], 'sscaler2__with_std': [True, False], 'rf__bootstrap': [True], 'rf__max_depth': [1,3,5,7], 'rf__max_features': [1, 3,5], 'rf__criterion':["gini"], 'rf__min_samples_leaf': [3, 4, 5], 'rf__min_samples_split': [2,3,5], 'rf__n_estimators': [100, 200,300]} pipe_2_gridsearch = GridSearchCV(lr_pipe2, pipe_2_params, cv=5, verbose=1 , scoring="accuracy" , n_jobs=-1 )
Titanic - Machine Learning from Disaster
14,271,675
im = Image.open(df['path'][1]) width, height = im.size print(width,height) im.show()<define_variables>
pipe_2_gridsearch.fit(X_train, y_train);
Titanic - Machine Learning from Disaster
14,271,675
bs = 64 sz=224<define_variables>
pipe_2_gridsearch.best_score_
Titanic - Machine Learning from Disaster
14,271,675
data.show_batch(rows=3, figsize=(7,6))<compute_test_metric>
pre= pipe_2_gridsearch.predict(X_test )
Titanic - Machine Learning from Disaster
14,271,675
def quadratic_kappa(y_hat, y): return torch.tensor(cohen_kappa_score(torch.round(y_hat), y, weights='quadratic'),device='cuda:0' )<choose_model_class>
submission = pd.DataFrame({ "PassengerId": test["PassengerId"], "Survived": pre }) submission.to_csv('submission2_RF_pip_GS.csv', index=False )
Titanic - Machine Learning from Disaster
14,271,675
learn = cnn_learner(data, base_arch=models.resnet50, metrics = [quadratic_kappa] )<find_best_params>
lr_pipe3 = Pipeline([ ('sscaler2', StandardScaler()), ('dt', DecisionTreeClassifier() ) ]) pipe_3_params = {'sscaler2__with_mean': [True, False], 'sscaler2__with_std': [True, False], 'dt__max_depth': [10], 'dt__random_state':[100], 'dt__max_features': [1, 3,5], 'dt__criterion':["gini"], 'dt__min_samples_leaf': [10], 'dt__min_samples_split': [2,3,4]} pipe_3_gridsearch = GridSearchCV(lr_pipe3, pipe_3_params, cv=5, verbose=1 , scoring="accuracy" , n_jobs=-1)
Titanic - Machine Learning from Disaster
14,271,675
learn.lr_find() <train_model>
pipe_3_gridsearch.fit(X_train, y_train);
Titanic - Machine Learning from Disaster
14,271,675
learn.fit_one_cycle(5,max_lr = 1e-2 )<train_model>
pipe_3_gridsearch.best_score_
Titanic - Machine Learning from Disaster
14,271,675
learn.fit_one_cycle(6, max_lr=slice(1e-6,1e-3))<set_options>
pre= pipe_3_gridsearch.predict(X_test )
Titanic - Machine Learning from Disaster
14,271,675
learn.export() learn.save('stage-2' )<find_best_params>
submission = pd.DataFrame({ "PassengerId": test["PassengerId"], "Survived": pre }) submission.to_csv('submission2_DT_pip_GS.csv', index=False )
Titanic - Machine Learning from Disaster
14,271,675
interp = ClassificationInterpretation.from_learner(learn) losses,idxs = interp.top_losses() len(data.valid_ds)==len(losses)==len(idxs )<predict_on_test>
tree = DecisionTreeClassifier(criterion='gini',max_depth=10,random_state=100,min_samples_leaf=10) tree.fit(X_train,y_train) y_predicted = tree.predict(Xs_test)
Titanic - Machine Learning from Disaster
14,271,675
valid_preds = learn.get_preds(ds_type=DatasetType.Valid )<import_modules>
tree.score(Xs_train, y_train )
Titanic - Machine Learning from Disaster
14,271,675
import numpy as np import pandas as pd import os import scipy as sp from functools import partial from sklearn import metrics from collections import Counter import json<compute_test_metric>
pre= tree.predict(X_test )
Titanic - Machine Learning from Disaster
14,271,675
class OptimizedRounder(object): def __init__(self): self.coef_ = 0 def _kappa_loss(self, coef, X, y): X_p = np.copy(X) for i, pred in enumerate(X_p): if pred < coef[0]: X_p[i] = 0 elif pred >= coef[0] and pred < coef[1]: X_p[i] = 1 elif pred >= coef[1] and pred < coef[2]: X_p[i] = 2 elif pred >= coef[2] and pred < coef[3]: X_p[i] = 3 else: X_p[i] = 4 ll = metrics.cohen_kappa_score(y, X_p, weights='quadratic') return -ll def fit(self, X, y): loss_partial = partial(self._kappa_loss, X=X, y=y) initial_coef = [0.5, 1.5, 2.5, 3.5] self.coef_ = sp.optimize.minimize(loss_partial, initial_coef, method='nelder-mead') print(-loss_partial(self.coef_['x'])) def predict(self, X, coef): X_p = np.copy(X) for i, pred in enumerate(X_p): if pred < coef[0]: X_p[i] = 0 elif pred >= coef[0] and pred < coef[1]: X_p[i] = 1 elif pred >= coef[1] and pred < coef[2]: X_p[i] = 2 elif pred >= coef[2] and pred < coef[3]: X_p[i] = 3 else: X_p[i] = 4 return X_p def coefficients(self): return self.coef_['x']<train_model>
submission = pd.DataFrame({ "PassengerId": test["PassengerId"], "Survived": pre }) submission.to_csv('submission2_DT.csv', index=False )
Titanic - Machine Learning from Disaster
14,271,675
optR = OptimizedRounder() optR.fit(valid_preds[0],valid_preds[1] )<load_from_csv>
SVM = SVC() SVM.fit(Xs_train, y_train) SVM_predictions = SVM.predict(Xs_test) SVM.score(Xs_train, y_train )
Titanic - Machine Learning from Disaster
14,271,675
sample_df = pd.read_csv('.. /input/aptos2019-blindness-detection/sample_submission.csv') sample_df.head()<define_variables>
submission = pd.DataFrame({ "PassengerId": test["PassengerId"], "Survived": SVM_predictions }) submission.to_csv('submission2_SVM.csv', index=False )
Titanic - Machine Learning from Disaster
14,271,675
learn.data.add_test(ImageList.from_df(sample_df,'.. /input/aptos2019-blindness-detection',folder='test_images',suffix='.png'))<feature_engineering>
scores ={'LR_pip': 0.772,'LR': 0.770,'Knn_pip': 0.779, 'Knn': 0.669, 'RF_GS': 0.787, 'RF_pip_GS': 0.775, 'DT_pip_GS': 0.760, 'DT':0.779, 'SVM': 0.779}
Titanic - Machine Learning from Disaster
14,301,061
preds,y = learn.TTA(ds_type=DatasetType.Test )<predict_on_test>
!jupyter nbextension enable --py widgetsnbextension
Titanic - Machine Learning from Disaster
14,301,061
test_predictions = optR.predict(preds, coefficients )<data_type_conversions>
data = pd.read_csv("/kaggle/input/titanic/train.csv") data.head(5 )
Titanic - Machine Learning from Disaster
14,301,061
sample_df.diagnosis = test_predictions.astype(int) sample_df.head()<save_to_csv>
data.groupby('Sex')['Survived'].mean()
Titanic - Machine Learning from Disaster
14,301,061
sample_df.to_csv('submission.csv',index=False )<set_options>
data.groupby(['Pclass', 'Sex'])['Survived'].mean()
Titanic - Machine Learning from Disaster
14,301,061
%pylab inline <import_modules>
data['Initial']=0 for i in data: data['Initial']=data.Name.str.extract('([A-Za-z]+)\.') pd.crosstab(data.Initial,data.Sex ).T.style.background_gradient(cmap='summer_r' )
Titanic - Machine Learning from Disaster
14,301,061
from sklearn.preprocessing import StandardScaler from sklearn.cross_validation import train_test_split from sklearn.preprocessing import LabelEncoder<import_modules>
data['Initial'].replace(['Mlle','Mme','Ms','Dr','Major','Lady','Countess','Jonkheer','Col','Rev','Capt','Sir','Don'], ['Miss','Miss','Miss','Mr','Mr','Mrs','Mrs','Other','Other','Other','Mr','Mr','Mr'],inplace=True) data.groupby('Initial')['Age'].mean()
Titanic - Machine Learning from Disaster
14,301,061
from keras.models import Sequential from keras.layers import Dense,Dropout,Activation from keras.utils.np_utils import to_categorical<import_modules>
data.loc[(data.Age.isnull())&(data.Initial=='Mr'),'Age']=33 data.loc[(data.Age.isnull())&(data.Initial=='Mrs'),'Age']=36 data.loc[(data.Age.isnull())&(data.Initial=='Master'),'Age']=5 data.loc[(data.Age.isnull())&(data.Initial=='Miss'),'Age']=22 data.loc[(data.Age.isnull())&(data.Initial=='Other'),'Age']=46 data.Age.isnull().any()
Titanic - Machine Learning from Disaster
14,301,061
print(sys.version )<import_modules>
data['Embarked'] = data['Embarked'].fillna('S' )
Titanic - Machine Learning from Disaster
14,301,061
pd.__version__<set_options>
data['Age_band']=0 data.loc[(data['Age']>16)&(data['Age']<=32),'Age_band']=1 data.loc[(data['Age']>32)&(data['Age']<=48),'Age_band']=2 data.loc[(data['Age']>48),'Age_band']=3
Titanic - Machine Learning from Disaster
14,301,061
rcParams['figure.figsize'] = 8,8<load_from_csv>
data['FamilySize'] = data['SibSp'] + data['Parch'] + 1 data['IsAlone'] = 1 data['IsAlone'].loc[data['FamilySize'] > 1] = 0
Titanic - Machine Learning from Disaster
14,301,061
data = pd.read_csv('.. /input/train.csv') parent_data = data.copy() ID = data.pop('id' )<categorify>
data['Sex'] = data['Sex'].map({'female': 0, 'male': 1} ).astype(int) data['Embarked'] = data['Embarked'].map({'S': 0, 'C': 1, 'Q': 2} ).astype(int )
Titanic - Machine Learning from Disaster
14,301,061
y = data.pop('species') y = LabelEncoder().fit(y ).transform(y) print(y.shape )<normalization>
from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import train_test_split from sklearn import metrics from sklearn.metrics import confusion_matrix from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from sklearn.model_selection import cross_val_predict from sklearn.model_selection import GridSearchCV
Titanic - Machine Learning from Disaster
14,301,061
X = StandardScaler().fit(data ).transform(data) print(X.shape )<categorify>
train,val=train_test_split(data,test_size=0.3,random_state=42,stratify=data['Survived']) train_X=train[train.columns[1:]] train_Y=train[train.columns[:1]] val_X=val[val.columns[1:]] val_Y=val[val.columns[:1]]
Titanic - Machine Learning from Disaster
14,301,061
y_cat = to_categorical(y) print(y_cat.shape )<choose_model_class>
model = LogisticRegression() model.fit(train_X,train_Y) prediction=model.predict(val_X) print('The accuracy of the Logistic Regression is',metrics.accuracy_score(prediction,val_Y))
Titanic - Machine Learning from Disaster
14,301,061
model = Sequential() model.add(Dense(2048,input_dim=192, init='uniform', activation='relu')) model.add(Dropout(0.3)) model.add(Dense(1024, activation='sigmoid')) model.add(Dropout(0.3)) model.add(Dense(99, activation='softmax'))<choose_model_class>
X=data[data.columns[1:]] Y=data['Survived'] kfold = KFold(n_splits=10, random_state=22 )
Titanic - Machine Learning from Disaster
14,301,061
model.compile(loss='categorical_crossentropy',optimizer='Adamax', metrics = ["accuracy"] )<train_model>
logistic_cv_result = cross_val_score(LogisticRegression() ,X,Y, cv = kfold,scoring = "accuracy") print('The mean accuracy of the Logistic Regression under 10-fold validation is: ', np.mean(logistic_cv_result), 'std is: ', np.std(logistic_cv_result))
Titanic - Machine Learning from Disaster
14,301,061
start = time.time() history = model.fit(X,y_cat,batch_size=100, nb_epoch=125,verbose=0, validation_split=0.1) end = time.time() print('runtime: ',"%.3f" %(end-start),' [sec]' )<train_model>
tree_cv_result = cross_val_score(DecisionTreeClassifier() ,X,Y, cv = kfold,scoring = "accuracy") print('The mean accuracy of the decision tree under 10-fold validation is: ', np.mean(tree_cv_result), 'std is: ', np.std(tree_cv_result))
Titanic - Machine Learning from Disaster
14,301,061
print('---------------------------------------') print('acc: ',max(history.history['acc'])) print('loss: ',min(history.history['loss'])) print('---------------------------------------') print('val_acc: ',max(history.history['val_acc'])) print('val_loss: ',min(history.history['val_loss']))<load_from_csv>
forest_cv_result = cross_val_score(RandomForestClassifier(n_estimators=100),X,Y, cv = kfold,scoring = "accuracy") print('The mean accuracy of the random forest under 10-fold validation is: ', np.mean(forest_cv_result), 'std is: ', np.std(forest_cv_result))
Titanic - Machine Learning from Disaster
14,301,061
test = pd.read_csv('.. /input/test.csv') index = test.pop('id') test = StandardScaler().fit(test ).transform(test) yPred = model.predict_proba(test )<create_dataframe>
forest = RandomForestClassifier(n_estimators=100, min_samples_leaf=1, min_samples_split=10) forest.fit(train_X,train_Y) prediction=forest.predict(val_X) print('The accuracy is',metrics.accuracy_score(prediction,val_Y))
Titanic - Machine Learning from Disaster
14,301,061
yPred = pd.DataFrame(yPred,index=index,columns=sort(parent_data.species.unique()))<save_to_csv>
import xgboost as xgb
Titanic - Machine Learning from Disaster
14,301,061
fp = open('submission_nn_kernel.csv','w') fp.write(yPred.to_csv() )<define_variables>
gbm = xgb.XGBClassifier( n_estimators= 2000, max_depth= 4, min_child_weight= 2, gamma=0.9, subsample=0.8, colsample_bytree=0.8, objective= 'binary:logistic', scale_pos_weight=1 ).fit(train_X, train_Y) predictions = gbm.predict(val_X) print('The accuracy of XG Boost is',metrics.accuracy_score(predictions,val_Y))
Titanic - Machine Learning from Disaster
14,301,061
DEBUG = False<define_variables>
from catboost import CatBoostClassifier, Pool, cv
Titanic - Machine Learning from Disaster
14,301,061
sys.path = [ '.. /input/efficientnet-pytorch/EfficientNet-PyTorch/EfficientNet-PyTorch-master', ] + sys.path<import_modules>
Titanic - Machine Learning from Disaster
14,301,061
import skimage.io import numpy as np import pandas as pd import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader, Dataset from efficientnet_pytorch import model as enet import matplotlib.pyplot as plt from tqdm import tqdm_notebook as tqdm <load_from_csv>
cat = CatBoostClassifier( l2_leaf_reg=1, learning_rate=0.003842420425736234, iterations=500, eval_metric='Accuracy', random_seed=42, verbose=False, loss_function='Logloss', ) cv_data = cv(Pool(X, Y, cat_features=range(train_X.shape[1])) , cat.get_params() )
Titanic - Machine Learning from Disaster
14,301,061
data_dir = '.. /input/prostate-cancer-grade-assessment' df_train = pd.read_csv(os.path.join(data_dir, 'train.csv')) df_test = pd.read_csv(os.path.join(data_dir, 'test.csv')) df_sub = pd.read_csv(os.path.join(data_dir, 'sample_submission.csv')) model_dir = '.. /input/panda-public-models' image_folder = os.path.join(data_dir, 'test_images') is_test = os.path.exists(image_folder) image_folder = image_folder if is_test else os.path.join(data_dir, 'train_images') df = df_test if is_test else df_train.loc[:100] tile_size = 256 image_size = 256 n_tiles = 36 batch_size = 8 num_workers = 4 device = torch.device('cuda') print(image_folder )<define_search_model>
print('Precise validation accuracy score: {}'.format(np.max(cv_data['test-Accuracy-mean'])) )
Titanic - Machine Learning from Disaster
14,301,061
class enetv2(nn.Module): def __init__(self, backbone, out_dim): super(enetv2, self ).__init__() self.enet = enet.EfficientNet.from_name(backbone) self.myfc = nn.Linear(self.enet._fc.in_features, out_dim) self.enet._fc = nn.Identity() def extract(self, x): return self.enet(x) def forward(self, x): x = self.extract(x) x = self.myfc(x) return x def load_models(model_files): models = [] for model_f in model_files: model_f = os.path.join(model_dir, model_f) backbone = 'efficientnet-b0' model = enetv2(backbone, out_dim=5) model.load_state_dict(torch.load(model_f, map_location=lambda storage, loc: storage), strict=True) model.eval() model.to(device) models.append(model) print(f'{model_f} loaded!') return models model_files = [ 'cls_effnet_b0_Rand36r36tiles256_big_bce_lr0.3_augx2_30epo_model_fold0.pth' ] models = load_models(model_files )<categorify>
test_data = pd.read_csv("/kaggle/input/titanic/test.csv") test_data.head()
Titanic - Machine Learning from Disaster
14,301,061
def get_tiles(img, mode=0): result = [] h, w, c = img.shape pad_h =(tile_size - h % tile_size)% tile_size +(( tile_size * mode)// 2) pad_w =(tile_size - w % tile_size)% tile_size +(( tile_size * mode)// 2) img2 = np.pad(img,[[pad_h // 2, pad_h - pad_h // 2], [pad_w // 2,pad_w - pad_w//2], [0,0]], constant_values=255) img3 = img2.reshape( img2.shape[0] // tile_size, tile_size, img2.shape[1] // tile_size, tile_size, 3 ) img3 = img3.transpose(0,2,1,3,4 ).reshape(-1, tile_size, tile_size,3) n_tiles_with_info =(img3.reshape(img3.shape[0],-1 ).sum(1)< tile_size ** 2 * 3 * 255 ).sum() if len(img)< n_tiles: img3 = np.pad(img3,[[0,N-len(img3)],[0,0],[0,0],[0,0]], constant_values=255) idxs = np.argsort(img3.reshape(img3.shape[0],-1 ).sum(-1)) [:n_tiles] img3 = img3[idxs] for i in range(len(img3)) : result.append({'img':img3[i], 'idx':i}) return result, n_tiles_with_info >= n_tiles class PANDADataset(Dataset): def __init__(self, df, image_size, n_tiles=n_tiles, tile_mode=0, rand=False, sub_imgs=False ): self.df = df.reset_index(drop=True) self.image_size = image_size self.n_tiles = n_tiles self.tile_mode = tile_mode self.rand = rand self.sub_imgs = sub_imgs def __len__(self): return self.df.shape[0] def __getitem__(self, index): row = self.df.iloc[index] img_id = row.image_id tiff_file = os.path.join(image_folder, f'{img_id}.tiff') image = skimage.io.MultiImage(tiff_file)[1] tiles, OK = get_tiles(image, self.tile_mode) if self.rand: idxes = np.random.choice(list(range(self.n_tiles)) , self.n_tiles, replace=False) else: idxes = list(range(self.n_tiles)) idxes = np.asarray(idxes)+ self.n_tiles if self.sub_imgs else idxes n_row_tiles = int(np.sqrt(self.n_tiles)) images = np.zeros(( image_size * n_row_tiles, image_size * n_row_tiles, 3)) for h in range(n_row_tiles): for w in range(n_row_tiles): i = h * n_row_tiles + w if len(tiles)> idxes[i]: this_img = tiles[idxes[i]]['img'] else: this_img = np.ones(( self.image_size, self.image_size, 3)).astype(np.uint8)* 255 this_img = 255 - this_img h1 = h * image_size w1 = w * image_size images[h1:h1+image_size, w1:w1+image_size] = this_img images = images.astype(np.float32) images /= 255 images = images.transpose(2, 0, 1) return torch.tensor(images) <load_pretrained>
test_data['Sex'] = test_data['Sex'].map({'female': 0, 'male': 1} ).astype(int) test_data['Embarked'] = test_data['Embarked'].map({'S': 0, 'C': 1, 'Q': 2} ).astype(int) test_data['Age_band']=0 test_data.loc[(test_data['Age']>16)&(test_data['Age']<=32),'Age_band']=1 test_data.loc[(test_data['Age']>32)&(test_data['Age']<=48),'Age_band']=2 test_data.loc[(test_data['Age']>48),'Age_band']=3 test_data['FamilySize'] = test_data['SibSp'] + test_data['Parch'] + 1 test_data['IsAlone'] = 1 test_data['IsAlone'].loc[test_data['FamilySize'] > 1] = 0
Titanic - Machine Learning from Disaster
14,301,061
dataset = PANDADataset(df, image_size, n_tiles, 0) loader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, shuffle=False) dataset2 = PANDADataset(df, image_size, n_tiles, 2) loader2 = DataLoader(dataset2, batch_size=batch_size, num_workers=num_workers, shuffle=False )<save_to_csv>
features = ['Pclass', 'Sex', 'Embarked', 'Age_band', 'IsAlone'] X_test = pd.get_dummies(test_data[features])
Titanic - Machine Learning from Disaster
14,301,061
LOGITS = [] LOGITS2 = [] with torch.no_grad() : for data in tqdm(loader): data = data.to(device) logits = models[0](data) LOGITS.append(logits) for data in tqdm(loader2): data = data.to(device) logits = models[0](data) LOGITS2.append(logits) LOGITS =(torch.cat(LOGITS ).sigmoid().cpu() + torch.cat(LOGITS2 ).sigmoid().cpu())/ 2 PREDS = LOGITS.sum(1 ).round().numpy() df['isup_grade'] = PREDS.astype(int) df[['image_id', 'isup_grade']].to_csv('submission.csv', index=False) print(df.head()) print() print(df.isup_grade.value_counts() )<define_variables>
Titanic - Machine Learning from Disaster
14,301,061
DEBUG = False<define_variables>
cat = CatBoostClassifier( l2_leaf_reg=1, learning_rate=0.003842420425736234, iterations=500, eval_metric='Accuracy', random_seed=42, verbose=False, loss_function='Logloss', ) forest = RandomForestClassifier(n_estimators=100, min_samples_leaf=1, min_samples_split=10 )
Titanic - Machine Learning from Disaster
14,301,061
sys.path = [ '.. /input/efficientnet-pytorch/EfficientNet-PyTorch/EfficientNet-PyTorch-master', ] + sys.path<import_modules>
gbm = xgb.XGBClassifier( n_estimators= 2000, max_depth= 4, min_child_weight= 2, gamma=0.9, subsample=0.8, colsample_bytree=0.8, objective= 'binary:logistic', scale_pos_weight=1 )
Titanic - Machine Learning from Disaster
14,301,061
import skimage.io import numpy as np import pandas as pd import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader, Dataset from efficientnet_pytorch import model as enet import matplotlib.pyplot as plt from tqdm import tqdm_notebook as tqdm<load_from_csv>
from sklearn.ensemble import VotingClassifier
Titanic - Machine Learning from Disaster
14,301,061
data_dir = '.. /input/prostate-cancer-grade-assessment' df_train = pd.read_csv(os.path.join(data_dir, 'train.csv')) df_test = pd.read_csv(os.path.join(data_dir, 'test.csv')) df_sub = pd.read_csv(os.path.join(data_dir, 'sample_submission.csv')) model_dir = '.. /input/ck-epoch6/' image_folder = os.path.join(data_dir, 'test_images') is_test = os.path.exists(image_folder) image_folder = image_folder if is_test else os.path.join(data_dir, 'train_images') df = df_test if is_test else df_train.loc[:100] tile_size = 256 image_size = 256 n_tiles = 36 batch_size = 2 num_workers = 4 device = torch.device('cuda') print(image_folder )<define_search_model>
votingC = VotingClassifier(estimators=[('rfc', forest),('xgb', gbm),('cat', cat)], voting='soft', n_jobs=4) votingC = votingC.fit(X, Y )
Titanic - Machine Learning from Disaster
14,301,061
class enetv2(nn.Module): def __init__(self, backbone, out_dim): super(enetv2, self ).__init__() self.enet = enet.EfficientNet.from_name(backbone) self.myfc = nn.Linear(self.enet._fc.in_features, out_dim) self.enet._fc = nn.Identity() def extract(self, x): return self.enet(x) def forward(self, x): x = self.extract(x) x = self.myfc(x) return x def load_models(model_files): models = [] for model_f in model_files: model_f = os.path.join(model_dir, model_f) backbone = 'efficientnet-b1' model = enetv2(backbone, out_dim=5) ck = torch.load(model_f) model.load_state_dict(ck['model_state_dict'], strict=True) model.eval() model.to(device) models.append(model) print(f'{model_f} loaded!') return models model_files = [ 'model_checkpoint_epoch6.pth' ] models = load_models(model_files )<categorify>
predictions = votingC.predict(X_test) output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions}) output.to_csv('my_submission.csv', index=False) print("Your submission was successfully saved!" )
Titanic - Machine Learning from Disaster
14,301,061
<load_pretrained><EOS>
Titanic - Machine Learning from Disaster
14,255,995
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<save_to_csv>
!pip uninstall -y dataclasses
Titanic - Machine Learning from Disaster
14,255,995
LOGITS = [] LOGITS2 = [] with torch.no_grad() : for data in tqdm(loader): data = data.to(device) logits = models[0](data) LOGITS.append(logits) for data in tqdm(loader2): data = data.to(device) logits = models[0](data) LOGITS2.append(logits) LOGITS =(torch.cat(LOGITS ).sigmoid().cpu() + torch.cat(LOGITS2 ).sigmoid().cpu())/ 2 PREDS = LOGITS.sum(1 ).round().numpy() df['isup_grade'] = PREDS.astype(int) df[['image_id', 'isup_grade']].to_csv('submission.csv', index=False) print(df.head()) print() print(df.isup_grade.value_counts() )<set_options>
train_data = pd.read_csv("/kaggle/input/titanic/train.csv") train_data.head() test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
Titanic - Machine Learning from Disaster
14,255,995
warnings.filterwarnings("ignore") sys.path.insert(0, '.. /input/semisupervised-imagenet-models/semi-supervised-ImageNet1K-models-master/') <define_variables>
features = ["Pclass", "Sex", "SibSp", "Parch", "Fare", "Embarked", "Age"] label = ["Survived"] X_train = pd.get_dummies(train_data[features + label]) X_test = pd.get_dummies(test_data[features]) X_train.head()
Titanic - Machine Learning from Disaster
14,255,995
DATA = '.. /input/prostate-cancer-grade-assessment/test_images' TEST = '.. /input/prostate-cancer-grade-assessment/test.csv' SAMPLE = '.. /input/prostate-cancer-grade-assessment/sample_submission.csv' MODELS = [f'.. /input/panda-init-class-model1/RNXT50_128krnew1_3featureB_{i}.pth' for i in range(4)] + \ [f'.. /input/panda-init-class-model1/RNXT50_128krnew1_2featureB_{i}.pth' for i in range(4)] + \ [f'.. /input/panda-init-class-model1/RNXT50_128krnew1_1featureB_{i}.pth' for i in range(4)] MODELS1 = [f'.. /input/panda-init-class-model1/RNXT50_128krnew_2_{i}.pth' for i in range(4)] sz = 128 bs = 1 N_max = 3*128 N_max1 = 256 nworkers = 2<choose_model_class>
X_train.isna().sum()
Titanic - Machine Learning from Disaster
14,255,995
class Modelm1(nn.Module): def __init__(self, arch='resnext50_32x4d', n=6, pre=True): super().__init__() m = _resnext(semi_supervised_model_urls[arch], Bottleneck, [3, 4, 6, 3], False, progress=False,\ groups=32,width_per_group=4) self.enc = nn.Sequential(*list(m.children())[:-2]) nc = list(m.children())[-1].in_features self.head = nn.Sequential(AdaptiveConcatPool2d() ,Flatten() , nn.Linear(2*nc,512),Mish() ,nn.GroupNorm(32,512), nn.Dropout(0.5),nn.Linear(512,n)) def forward(self, x): shape = x.shape n = shape[1] x = x.view(-1,shape[2],shape[3],shape[4]) x = self.enc(x) shape = x.shape x = x.view(-1,n,shape[1],shape[2],shape[3] ).permute(0,2,1,3,4 ).contiguous() \ .view(-1,shape[1],shape[2]*n,shape[3]) x = self.head(x) return x[:,:1]<choose_model_class>
X_test.isna().sum()
Titanic - Machine Learning from Disaster
14,255,995
class AdaptiveConcatPool2dm1(Module): "Layer that concats `AdaptiveAvgPool2d` and `AdaptiveMaxPool2d`." def __init__(self, sz:Optional[int]=None): "Output will be 2*sz or 2 if sz is None" self.output_size = sz or 1 self.ap = nn.AdaptiveAvgPool2d(self.output_size) self.mp = nn.AdaptiveMaxPool2d(self.output_size) def forward(self, x): return torch.cat([self.mp(x), self.ap(x)], 1) def _resnext(url, block, layers, pretrained, progress, **kwargs): model = ResNet(block, layers, **kwargs) return model class Model(nn.Module): def __init__(self, arch='resnext50_32x4d', n=6, pre=True): super().__init__() m = _resnext(semi_supervised_model_urls[arch], Bottleneck, [3, 4, 6, 3], False, progress=False,\ groups=32,width_per_group=4) self.enc = nn.Sequential(*list(m.children())[:-2]) nc = list(m.children())[-1].in_features self.head = nn.Sequential(AdaptiveConcatPool2d() ,Flatten() , nn.Linear(2*nc,512),Mish() ,nn.GroupNorm(32,512), nn.Dropout(0.5),nn.Linear(512,n)) def forward(self, x): shape = x.shape n = shape[1] x = x.view(-1,shape[2],shape[3],shape[4]) x = self.enc(x) shape = x.shape x = x.view(-1,n,shape[1],shape[2],shape[3] ).permute(0,2,1,3,4 ).contiguous() \ .view(-1,shape[1],shape[2]*n,shape[3]) x = self.head(x) return x[:,:1]<train_on_grid>
X_train = X_train.fillna(X_train.mean()) X_test = X_test.fillna(X_train.mean() )
Titanic - Machine Learning from Disaster
14,255,995
class AdaptiveConcatPool2dm(Module): "Layer that concats `AdaptiveAvgPool2d` and `AdaptiveMaxPool2d`." def __init__(self, sz:Optional[int]=None): "Output will be 2*sz or 2 if sz is None" self.output_size = sz or 1 self.ap = nn.AdaptiveAvgPool2d(self.output_size) self.mp = nn.AdaptiveMaxPool2d(self.output_size) def forward(self, x): return torch.cat([self.ap(x),self.mp(x)], 1) def _resnext(url, block, layers, pretrained, progress, **kwargs): model = ResNet(block, layers, **kwargs) return model class Modelm(nn.Module): def __init__(self, arch='resnext50_32x4d', n=6, pre=True): super().__init__() m = _resnext(semi_supervised_model_urls[arch], Bottleneck, [3, 4, 6, 3], False, progress=False,\ groups=32,width_per_group=4) self.enc = nn.Sequential(*list(m.children())[:-2]) nc = list(m.children())[-1].in_features self.head = nn.Sequential(AdaptiveConcatPool2dm() ,Flatten() , nn.Linear(2*nc,512),Mish() ,nn.GroupNorm(32,512), nn.Dropout(0.5),nn.Linear(512,n)) def forward(self, x): shape = x.shape n = shape[1] x = x.view(-1,shape[2],shape[3],shape[4]) x = self.enc(x) shape = x.shape x = x.view(-1,n,shape[1],shape[2],shape[3] ).permute(0,2,1,3,4 ).contiguous() \ .view(-1,shape[1],shape[2]*n,shape[3]) x = self.head(x) return x[:,:1]<find_best_params>
y = X_train[label].values.ravel() X_train = X_train.drop(label, axis=1 )
Titanic - Machine Learning from Disaster
14,255,995
models = [] for path in MODELS[:-4]: state_dict = torch.load(path,map_location=torch.device('cpu')) model = Model(n=1+10) model.load_state_dict(state_dict) model.float() model.eval() model.cuda() models.append(model) for path in MODELS[-4:]: state_dict = torch.load(path,map_location=torch.device('cpu')) model = Modelm(n=1+10) model.load_state_dict(state_dict) model.float() model.eval() model.cuda() models.append(model) models2 = [] for path in MODELS1: state_dict = torch.load(path,map_location=torch.device('cpu')) model = Modelm1(n=1+10) model.load_state_dict(state_dict) model.float() model.eval() model.cuda() models2.append(model) del state_dict<load_from_csv>
dt_config = { "class": DecisionTreeClassifier, "criterion": tune.choice(['gini', 'entropy']), "max_depth": tune.randint(2, 8), "min_samples_split": tune.randint(2, 10), 'min_samples_leaf': tune.randint(1, 10), "random_state": 1 } rf_config = { "class": RandomForestClassifier, "max_depth": tune.randint(2, 8), "n_estimators": tune.qrandint(lower=10, upper=200, q=10), 'random_state': 1 } xgb_config = { "class": xgb.XGBClassifier, "max_depth": tune.randint(1, 9), "min_child_weight": tune.choice([1, 2, 3]), "subsample": tune.uniform(0.5, 1.0), "eta": tune.loguniform(1e-4, 1e-1), 'random_state': 1 } svm_config = { "class": SVC, "kernel": tune.choice(['linear', 'rbf', 'sigmoid']), "gamma": tune.choice(['scale', 'auto']), "random_state": 1 }
Titanic - Machine Learning from Disaster
14,255,995
sub_df = pd.read_csv(SAMPLE) if os.path.exists(DATA): ds = PandaDataset(DATA,TEST) dl = DataLoader(ds, batch_size=bs, num_workers=nworkers, shuffle=False) names,preds = [],[] with torch.no_grad() : for x,x2,y in tqdm(dl): x = x.cuda() x = torch.stack([x,x.flip(-1),x.flip(-2),x.flip(-1,-2),x.transpose(-1,-2),\ x.transpose(-1,-2 ).flip(-1), x.transpose(-1,-2 ).flip(-2),\ x.transpose(-1,-2 ).flip(-1,-2)],1) x = x.view(8,-1,3,sz,sz) p = [model(x[a].unsqueeze(0)) for model in models for a in range(x.shape[0])] x2 = x2.cuda() x2 = torch.stack([x2,x2.flip(-1),x2.flip(-2),x2.flip(-1,-2),x2.transpose(-1,-2),\ x2.transpose(-1,-2 ).flip(-1), x2.transpose(-1,-2 ).flip(-2),\ x2.transpose(-1,-2 ).flip(-1,-2)],1) x2 = x2.view(8,-1,3,sz,sz) p += [model(x2[a].unsqueeze(0)) for model in models2 for a in range(x2.shape[0])] p = torch.stack(p,1) p = p.view(bs,8*(len(models)+len(models2)) ).mean(1 ).cpu() p = torch.clamp(( 6.0*torch.sigmoid(p)).long().view(-1),0,5) names.append(y) preds.append(p) names = np.concatenate(names) preds = torch.cat(preds ).numpy() sub_df = pd.DataFrame({'image_id': names, 'isup_grade': preds}) sub_df.to_csv('submission.csv', index=False) sub_df.head()<save_to_csv>
methods = {"rf": rf_config, "xgb": xgb_config, "svm": svm_config, "dt": dt_config} def export_csv(predictions, name:str): output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions}) filename = f'{name}_submission.csv' output.to_csv(filename, index=False) print(f"Your submission({name})was successfully saved! {filename}") def run_tune(method: str, num_samples: int)-> tune.ExperimentAnalysis: optuna_search = OptunaSearch(metric="mean_accuracy", mode="max") return tune.run( trial, config=methods[method], num_samples=num_samples, search_alg=optuna_search, verbose=-1 ) def trial(config: Dict[str, Any]): _config = deepcopy(config) model_class = config["class"] _config.pop("class", None) model = model_class( **_config ) scores = cross_val_score(model, X_train, y, cv=5, scoring="accuracy") tune.report( mean_accuracy=np.mean(scores), done=True ) tree_config ={"max_depth":5} model = methods["dt"]["class"](**tree_config) scores = cross_val_score(model, X_train, y, cv=5, scoring="accuracy") model = methods["rf"]["class"](**tree_config) scores = cross_val_score(model, X_train, y, cv=5, scoring="accuracy") model = methods["xgb"]["class"](**tree_config) scores = cross_val_score(model, X_train, y, cv=5, scoring="accuracy") model = methods["svm"]["class"](**{"kernel": "linear"}) scores = cross_val_score(model, X_train, y, cv=5, scoring="accuracy" )
Titanic - Machine Learning from Disaster
14,255,995
sub_df.to_csv("submission.csv", index=False) sub_df.head()<define_variables>
def run_experiment(method: str, num_samples: int=50)-> Dict[str, Any]: result = run_tune(method, num_samples ).get_best_trial(metric="mean_accuracy", mode="max") _config = deepcopy(result.config) model_class = _config["class"] _config.pop("class") model = model_class(**_config) model.fit(X_train, y) predictions = model.predict(X_test) export_csv(predictions, method) return result
Titanic - Machine Learning from Disaster
14,255,995
package_path = '/kaggle/input/efficientnet-pytorch/EfficientNet-PyTorch/EfficientNet-PyTorch-master' sys.path.append(package_path) <define_variables>
dt = run_experiment(method="dt", num_samples=100) print(f"result {dt.last_result}") print(f"{dt.config}" )
Titanic - Machine Learning from Disaster
14,255,995
<define_variables>
rf = run_experiment(method="rf", num_samples=100) print(f"result {rf.last_result}") print(f"{rf.config}" )
Titanic - Machine Learning from Disaster
14,255,995
mean_224 = torch.tensor([1.0-0.82097102, 1.0-0.63302738, 1.0-0.75392824]) std_224 = torch.tensor([0.37723779, 0.49839178, 0.4015415]) first_resnext_pth_path_224 = ".. /input/lb91-224tile/LB91_224tile_best_resnext50_X20_30e_0.pth" ensemble_1 ={"mean":mean_224,"std":std_224,"tileSize":224,"isExpandTile":False,"arch":"resnext50_32x4d","path":first_resnext_pth_path_224,"NN":64} mean_288 = torch.tensor([1.0-0.82809473, 1.0-0.65345352, 1.0-0.76934528]) std_288 = torch.tensor([0.40728608, 0.52557509, 0.42674383]) second_resnext_pth_path_288 = ".. /input/cv8899-expand-288/cv8899_expand_288_tiles_resnext50_X20_30e_0.pth" expand_sz = 288 ensemble_2 ={"mean":mean_288,"std":std_288,"tileSize":288,"isExpandTile":True,"arch":"resnext50_32x4d","path":second_resnext_pth_path_288,"NN":64} ensemble_list = [] ensemble_list.append(ensemble_1) ensemble_list.append(ensemble_2) MODELS = [] for model_path_index in range(len(ensemble_list)) : MODELS.append(ensemble_list[model_path_index]['path']) DATA = '.. /input/prostate-cancer-grade-assessment/test_images' TEST = '.. /input/prostate-cancer-grade-assessment/test.csv' DATA2 = '.. /input/prostate-cancer-grade-assessment/train_images' TEST2 = '.. /input/prostate-cancer-grade-assessment/train.csv' SAMPLE = '.. /input/prostate-cancer-grade-assessment/sample_submission.csv' mixnet_pth=".. /input/segmmixnetv2/checkpoint_segm_mixnet_hardaug_14elr0005_acc88.59.pth" bs = 2 sz2=224 NUM=20 NN=64 nworkers = 2 debug = False <set_options>
xgb = run_experiment("xgb", num_samples=100) print(f"result {xgb.last_result}") print(f"{xgb.config}" )
Titanic - Machine Learning from Disaster
14,255,995
<choose_model_class><EOS>
svm = run_experiment(method="svm", num_samples=8) print(f"result {svm.last_result}") print(f"{svm.config}" )
Titanic - Machine Learning from Disaster
14,242,857
<SOS> metric: categorizationaccuracy Kaggle data source: titanic-machine-learning-from-disaster<choose_model_class>
%matplotlib inline titanic_df = pd.read_csv('.. /input/titanic/train.csv') titanic_df.head(5 )
Titanic - Machine Learning from Disaster
14,242,857
segmmodel = timm.create_model('mixnet_xl', pretrained=False) for param in segmmodel.parameters() : param.requires_grad = False segmmodel.classifier=nn.Linear(1536, 1) segmmodel.fc = nn.Linear(1536, 1 )<normalization>
titanic_df['Age'].fillna(titanic_df['Age'].mean() , inplace=True) titanic_df['Cabin'].fillna('N',inplace=True) titanic_df['Embarked'].fillna('N',inplace=True) titanic_df.isnull().sum()
Titanic - Machine Learning from Disaster
14,242,857
checkpoint = torch.load(mixnet_pth, map_location=device) segmmodel.load_state_dict(checkpoint) segmmodel.eval() segmmodel.cuda() del checkpoint<load_pretrained>
print('Sex ','-------------- ', titanic_df['Sex'].value_counts() ,' ') print('Cabin ','-------------- ', titanic_df['Cabin'].value_counts() ,' ') print('Embarked ','-------------- ', titanic_df['Embarked'].value_counts() ,' ' )
Titanic - Machine Learning from Disaster
14,242,857
aknell_models = [] for model_index, path in enumerate(MODELS): print("path",path) state_dict = torch.load(path,map_location=torch.device(device)) model = Model(arch=ensemble_list[model_index]['arch']) model.load_state_dict(state_dict) model.float() model.eval() model.cuda() aknell_models.append(model) del state_dict<load_from_csv>
titanic_df['Cabin'] = titanic_df['Cabin'].str[:1] print(titanic_df['Cabin'].value_counts() )
Titanic - Machine Learning from Disaster
14,242,857
test=pd.read_csv(TEST) if chk: pass test=test[:][:nchk]<define_variables>
titanic_df.groupby(['Sex','Survived'])['Survived'].count()
Titanic - Machine Learning from Disaster
14,242,857
test_image_dir='/kaggle/input/prostate-cancer-grade-assessment/test_images' if os.path.exists(test_image_dir): print('test set exist') chk=False mode='test' test_image_dir='/kaggle/input/prostate-cancer-grade-assessment/{}_images'.format(mode) csv_path='/kaggle/input/prostate-cancer-grade-assessment/{}.csv'.format(mode) else: print('test set not exist') chk=True mode='train' test_image_dir='/kaggle/input/prostate-cancer-grade-assessment/{}_images'.format(mode) csv_path='/kaggle/input/prostate-cancer-grade-assessment/{}.csv'.format(mode) submission_path='/kaggle/input/prostate-cancer-grade-assessment/sample_submission.csv' df=pd.read_csv(csv_path) if chk: pass df=df[:][:nchk] mean_256 = [0.85841531, 0.78983734, 0.89534979] std_256 = [0.16897951, 0.25758676, 0.13730845] first_effb3_path_256 = '/kaggle/input/pandalayertest4wunertaintyacc/pandalayertest4wunertaintyacc/fold{}top{}.ckpt' ensemble_shujun_1 ={"mean":mean_256,"std":std_256,"tileImageSize":256,"isCutThreshold":False,"arch":"resnext50_32x4d","path":first_effb3_path_256,"TILE_SIZE":36,"TILE_ALL":False,"CVT_COLOR":True} mean_224= [0.81702482, 0.6299724, 0.75427603] std_224=[0.40702703, 0.52099041, 0.42290275] second_resnext50_path_224 = '/kaggle/input/pandaakselltest3/fold{}top{}.ckpt' ensemble_shujun_2 ={"mean":mean_224,"std":std_224,"tileImageSize":224,"isCutThreshold":True,"arch":"resnext50_32x4d","path":second_resnext50_path_224,"TILE_SIZE":20,"TILE_ALL":False,"CVT_COLOR":True} mean_224_30tile= [0.82514849, 0.64166622, 0.76170417] std_224_30tile=[0.40444484, 0.52074353, 0.42308628] third_resnext50_path_224 = '/kaggle/input/pandaakselltest4/fold{}top{}.ckpt' ensemble_shujun_3 ={"mean":mean_224_30tile,"std":std_224_30tile,"tileImageSize":224,"isCutThreshold":True,"arch":"resnext50_32x4d","path":third_resnext50_path_224,"TILE_SIZE":30,"TILE_ALL":False,"CVT_COLOR":False} mean_288 = mean_224 std_288 = std_224 fourth_resnext50_path_288 = '/kaggle/input/pandaakselltest5/fold{}top{}.ckpt' ensemble_shujun_4 ={"mean":mean_288,"std":std_288,"tileImageSize":288,"isCutThreshold":True,"arch":"resnext50_32x4d","path":fourth_resnext50_path_288,"TILE_SIZE":20,"TILE_ALL":False,"CVT_COLOR":False} mean_224_32= [0.82514849, 0.64166622, 0.76170417] std_224_32=[0.40444484, 0.52074353, 0.42308628] fifth_resnext50_path_224 = '/kaggle/input/pandaakselltest7/fold{}top{}.ckpt' ensemble_shujun_5 ={"mean":mean_224_32,"std":std_224_32,"tileImageSize":224,"isCutThreshold":True,"arch":"resnext50_32x4d","path":fifth_resnext50_path_224,"TILE_SIZE":32,"TILE_ALL":True,"CVT_COLOR":True} mean_256_64= [0.8021112084388733, 0.7040286064147949, 0.8552106022834778] std_256_64=[0.18155023455619812, 0.27437373995780945, 0.15957728028297424] sixth_resnext50_path_256 = '/kaggle/input/resnext50-new/fold{}top{}.ckpt' ensemble_shujun_6 ={"mean":mean_256_64,"std":std_256_64,"tileImageSize":256,"isCutThreshold":False,"arch":"resnext50_32x4d","path":sixth_resnext50_path_256,"TILE_SIZE":64,"TILE_ALL":False,"CVT_COLOR":True} ensemble_shujun_list = [] ensemble_shujun_list.append(ensemble_shujun_1) ensemble_shujun_list.append(ensemble_shujun_2) ensemble_shujun_list.append(ensemble_shujun_4) ensemble_shujun_list.append(ensemble_shujun_5) ensemble_shujun_list.append(ensemble_shujun_6 )<normalization>
def encode_feature(dataDF): features = ['Cabin','Sex','Embarked'] for feature in features: le = preprocessing.LabelEncoder() le = le.fit(dataDF[feature]) dataDF[feature] = le.transform(dataDF[feature]) return dataDF titanic_df = encode_feature(titanic_df) titanic_df.head()
Titanic - Machine Learning from Disaster