kernel_id int64 24.2k 23.3M | prompt stringlengths 8 1.85M | completetion stringlengths 1 182k | comp_name stringlengths 5 57 |
|---|---|---|---|
8,522,656 | train["fold"] = -1
for fold_id,(trn_idx, val_idx)in enumerate(train_val_indexs):
train.loc[val_idx, "fold"] = fold_id
train.groupby("fold")[CLASSES].sum()<train_model> | package_path = '.. /input/kha-efficientnet/EfficientNet-PyTorch/'
sys.path.append(package_path)
| Deepfake Detection Challenge |
8,522,656 | def resize_images(img_id, input_dir, output_dir, resize_to=(640, 640), ext="png"):
img_path = input_dir / f"{img_id}.jpg"
save_path = output_dir / f"{img_id}.{ext}"
img = cv2.imread(str(img_path), cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, resize_to)
cv2.imwrite(str(save_path), img,)
TEST_RESIZED = TMP / "test_{0}x{1}".format(*IMAGE_SIZE)
TEST_RESIZED.mkdir(exist_ok=True)
TEST_RESIZED
_ = Parallel(n_jobs=2, verbose=5 )([
delayed(resize_images )(img_id, TEST, TEST_RESIZED, IMAGE_SIZE, "png")
for img_id in smpl_sub.StudyInstanceUID.values
] )<choose_model_class> | %%capture
!pip install /kaggle/input/khafacenet/facenet_pytorch-2.2.7-py3-none-any.whl
!pip install /kaggle/input/imutils/imutils-0.5.3 | Deepfake Detection Challenge |
8,522,656 | def get_activation(activ_name: str="relu"):
act_dict = {
"relu": nn.ReLU(inplace=True),
"tanh": nn.Tanh() ,
"sigmoid": nn.Sigmoid() ,
"identity": nn.Identity() }
if activ_name in act_dict:
return act_dict[activ_name]
else:
raise NotImplementedError
class Conv2dBNActiv(nn.Module):
def __init__(
self, in_channels: int, out_channels: int,
kernel_size: int, stride: int=1, padding: int=0,
bias: bool=False, use_bn: bool=True, activ: str="relu"
):
super(Conv2dBNActiv, self ).__init__()
layers = []
layers.append(nn.Conv2d(
in_channels, out_channels,
kernel_size, stride, padding, bias=bias))
if use_bn:
layers.append(nn.BatchNorm2d(out_channels))
layers.append(get_activation(activ))
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class SSEBlock(nn.Module):
def __init__(self, in_channels: int):
super(SSEBlock, self ).__init__()
self.channel_squeeze = nn.Conv2d(
in_channels=in_channels, out_channels=1,
kernel_size=1, stride=1, padding=0, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
h = self.sigmoid(self.channel_squeeze(x))
return x * h
class SpatialAttentionBlock(nn.Module):
def __init__(
self, in_channels: int,
out_channels_list: tp.List[int],
):
super(SpatialAttentionBlock, self ).__init__()
self.n_layers = len(out_channels_list)
channels_list = [in_channels] + out_channels_list
assert self.n_layers > 0
assert channels_list[-1] == 1
for i in range(self.n_layers - 1):
in_chs, out_chs = channels_list[i: i + 2]
layer = Conv2dBNActiv(in_chs, out_chs, 3, 1, 1, activ="relu")
setattr(self, f"conv{i + 1}", layer)
in_chs, out_chs = channels_list[-2:]
layer = Conv2dBNActiv(in_chs, out_chs, 3, 1, 1, activ="sigmoid")
setattr(self, f"conv{self.n_layers}", layer)
def forward(self, x):
h = x
for i in range(self.n_layers):
h = getattr(self, f"conv{i + 1}" )(h)
h = h * x
return h<choose_model_class> | Deepfake Detection Challenge | |
8,522,656 | class MultiHeadResNet200D(nn.Module):
def __init__(
self, out_dims_head: tp.List[int]=[3, 4, 3, 1], pretrained=False
):
self.base_name = "resnet200d_320"
self.n_heads = len(out_dims_head)
super(MultiHeadResNet200D, self ).__init__()
base_model = timm.create_model(
self.base_name, num_classes=sum(out_dims_head), pretrained=False)
in_features = base_model.num_features
if pretrained:
pretrained_model_path = '.. /input/startingpointschestx/resnet200d_320_chestx.pth'
state_dict = dict()
for k, v in torch.load(pretrained_model_path, map_location='cpu')["model"].items() :
if k[:6] == "model.":
k = k.replace("model.", "")
state_dict[k] = v
base_model.load_state_dict(state_dict)
base_model.reset_classifier(0, '')
self.backbone = base_model
for i, out_dim in enumerate(out_dims_head):
layer_name = f"head_{i}"
layer = nn.Sequential(
SpatialAttentionBlock(in_features, [64, 32, 16, 1]),
nn.AdaptiveAvgPool2d(output_size=1),
nn.Flatten(start_dim=1),
nn.Linear(in_features, in_features),
nn.ReLU(inplace=True),
nn.Dropout(0.5),
nn.Linear(in_features, out_dim))
setattr(self, layer_name, layer)
def forward(self, x):
h = self.backbone(x)
hs = [
getattr(self, f"head_{i}" )(h)for i in range(self.n_heads)]
y = torch.cat(hs, axis=1)
return y
m = MultiHeadResNet200D([3, 4, 3, 1], False)
m = m.eval()
x = torch.rand(1, 3, 256, 256)
with torch.no_grad() :
y = m(x)
print("[forward test]")
print("input:\t{}
output:\t{}".format(x.shape, y.shape))
del m; del x; del y
gc.collect()<categorify> | Deepfake Detection Challenge | |
8,522,656 | class LabeledImageDataset(data.Dataset):
def __init__(
self,
file_list: tp.List[
tp.Tuple[tp.Union[str, Path], tp.Union[int, float, np.ndarray]]],
transform_list: tp.List[tp.Dict],
):
self.file_list = file_list
self.transform = ImageTransformForCls(transform_list)
def __len__(self):
return len(self.file_list)
def __getitem__(self, index):
img_path, label = self.file_list[index]
img = self._read_image_as_array(img_path)
img, label = self.transform(( img, label))
return img, label
def _read_image_as_array(self, path: str):
img_arr = cv2.imread(str(path))
img_arr = cv2.cvtColor(img_arr, cv2.COLOR_BGR2RGB)
return img_arr<create_dataframe> | %matplotlib inline
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
| Deepfake Detection Challenge |
8,522,656 | def get_dataloaders_for_inference(
file_list: tp.List[tp.List], batch_size=64,
):
dataset = LabeledImageDataset(
file_list,
transform_list=[
["Normalize", {
"always_apply": True, "max_pixel_value": 255.0,
"mean": ["0.4887381077884414"], "std": ["0.23064819430546407"]}],
["ToTensorV2", {"always_apply": True}],
])
loader = data.DataLoader(
dataset,
batch_size=batch_size, shuffle=False,
num_workers=2, pin_memory=True,
drop_last=False)
return loader<categorify> | def conv_bn(inp, oup, stride, conv_layer=nn.Conv2d, norm_layer=nn.BatchNorm2d, nlin_layer=nn.ReLU):
return nn.Sequential(
conv_layer(inp, oup, 3, stride, 1, bias=False),
norm_layer(oup),
nlin_layer(inplace=True)
)
def conv_1x1_bn(inp, oup, conv_layer=nn.Conv2d, norm_layer=nn.BatchNorm2d, nlin_layer=nn.ReLU):
return nn.Sequential(
conv_layer(inp, oup, 1, 1, 0, bias=False),
norm_layer(oup),
nlin_layer(inplace=True)
)
class Hswish(nn.Module):
def __init__(self, inplace=True):
super(Hswish, self ).__init__()
self.inplace = inplace
def forward(self, x):
return x * F.relu6(x + 3., inplace=self.inplace)/ 6.
class Hsigmoid(nn.Module):
def __init__(self, inplace=True):
super(Hsigmoid, self ).__init__()
self.inplace = inplace
def forward(self, x):
return F.relu6(x + 3., inplace=self.inplace)/ 6.
class SEModule(nn.Module):
def __init__(self, channel, reduction=4):
super(SEModule, self ).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
Hsigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x ).view(b, c)
y = self.fc(y ).view(b, c, 1, 1)
return x * y.expand_as(x)
class Identity(nn.Module):
def __init__(self, channel):
super(Identity, self ).__init__()
def forward(self, x):
return x
def make_divisible(x, divisible_by=8):
return int(np.ceil(x * 1./ divisible_by)* divisible_by)
class MobileBottleneck(nn.Module):
def __init__(self, inp, oup, kernel, stride, exp, se=False, nl='RE'):
super(MobileBottleneck, self ).__init__()
assert stride in [1, 2]
assert kernel in [3, 5]
padding =(kernel - 1)// 2
self.use_res_connect = stride == 1 and inp == oup
conv_layer = nn.Conv2d
norm_layer = nn.BatchNorm2d
if nl == 'RE':
nlin_layer = nn.ReLU
elif nl == 'HS':
nlin_layer = Hswish
else:
raise NotImplementedError
if se:
SELayer = SEModule
else:
SELayer = Identity
self.conv = nn.Sequential(
conv_layer(inp, exp, 1, 1, 0, bias=False),
norm_layer(exp),
nlin_layer(inplace=True),
conv_layer(exp, exp, kernel, stride, padding, groups=exp, bias=False),
norm_layer(exp),
SELayer(exp),
nlin_layer(inplace=True),
conv_layer(exp, oup, 1, 1, 0, bias=False),
norm_layer(oup),
)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV3(nn.Module):
def __init__(self, n_class=1000, input_size=224, dropout=0.8, mode='small', width_mult=1.0):
super(MobileNetV3, self ).__init__()
input_channel = 16
last_channel = 1280
if mode == 'large':
mobile_setting = [
[3, 16, 16, False, 'RE', 1],
[3, 64, 24, False, 'RE', 2],
[3, 72, 24, False, 'RE', 1],
[5, 72, 40, True, 'RE', 2],
[5, 120, 40, True, 'RE', 1],
[5, 120, 40, True, 'RE', 1],
[3, 240, 80, False, 'HS', 2],
[3, 200, 80, False, 'HS', 1],
[3, 184, 80, False, 'HS', 1],
[3, 184, 80, False, 'HS', 1],
[3, 480, 112, True, 'HS', 1],
[3, 672, 112, True, 'HS', 1],
[5, 672, 160, True, 'HS', 2],
[5, 960, 160, True, 'HS', 1],
[5, 960, 160, True, 'HS', 1],
]
elif mode == 'small':
mobile_setting = [
[3, 16, 16, True, 'RE', 2],
[3, 72, 24, False, 'RE', 2],
[3, 88, 24, False, 'RE', 1],
[5, 96, 40, True, 'HS', 2],
[5, 240, 40, True, 'HS', 1],
[5, 240, 40, True, 'HS', 1],
[5, 120, 48, True, 'HS', 1],
[5, 144, 48, True, 'HS', 1],
[5, 288, 96, True, 'HS', 2],
[5, 576, 96, True, 'HS', 1],
[5, 576, 96, True, 'HS', 1],
]
else:
raise NotImplementedError
assert input_size % 32 == 0
last_channel = make_divisible(last_channel * width_mult)if width_mult > 1.0 else last_channel
self.features = [conv_bn(3, input_channel, 2, nlin_layer=Hswish)]
self.classifier = []
for k, exp, c, se, nl, s in mobile_setting:
output_channel = make_divisible(c * width_mult)
exp_channel = make_divisible(exp * width_mult)
self.features.append(MobileBottleneck(input_channel, output_channel, k, s, exp_channel, se, nl))
input_channel = output_channel
if mode == 'large':
last_conv = make_divisible(960 * width_mult)
self.features.append(conv_1x1_bn(input_channel, last_conv, nlin_layer=Hswish))
self.features.append(nn.AdaptiveAvgPool2d(1))
self.features.append(nn.Conv2d(last_conv, last_channel, 1, 1, 0))
self.features.append(Hswish(inplace=True))
elif mode == 'small':
last_conv = make_divisible(576 * width_mult)
self.features.append(conv_1x1_bn(input_channel, last_conv, nlin_layer=Hswish))
self.features.append(nn.AdaptiveAvgPool2d(1))
self.features.append(nn.Conv2d(last_conv, last_channel, 1, 1, 0))
self.features.append(Hswish(inplace=True))
else:
raise NotImplementedError
self.features = nn.Sequential(*self.features)
self.classifier = nn.Sequential(
nn.Dropout(p=dropout),
nn.Linear(last_channel, n_class),
)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.mean(3 ).mean(2)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules() :
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.zeros_(m.bias)
def mobilenetv3(pretrained=False, **kwargs):
model = MobileNetV3(**kwargs)
if pretrained:
state_dict = torch.load(CFG.pretrained)
model.load_state_dict(state_dict, strict=True)
return model
| Deepfake Detection Challenge |
8,522,656 | class ImageTransformBase:
def __init__(self, data_augmentations: tp.List[tp.Tuple[str, tp.Dict]]):
augmentations_list = [
self._get_augmentation(aug_name )(**params)
for aug_name, params in data_augmentations]
self.data_aug = albumentations.Compose(augmentations_list)
def __call__(self, pair: tp.Tuple[np.ndarray])-> tp.Tuple[np.ndarray]:
raise NotImplementedError
def _get_augmentation(self, aug_name: str)-> tp.Tuple[ImageOnlyTransform, DualTransform]:
if hasattr(albumentations, aug_name):
return getattr(albumentations, aug_name)
else:
return eval(aug_name)
class ImageTransformForCls(ImageTransformBase):
def __init__(self, data_augmentations: tp.List[tp.Tuple[str, tp.Dict]]):
super(ImageTransformForCls, self ).__init__(data_augmentations)
def __call__(self, in_arrs: tp.Tuple[np.ndarray])-> tp.Tuple[np.ndarray]:
img, label = in_arrs
augmented = self.data_aug(image=img)
img = augmented["image"]
return img, label<load_pretrained> | net = mobilenetv3(mode='small', pretrained=False)
net.classifier[1] = torch.nn.Linear(in_features=1280, out_features=1)
net = net.to(device)
state_dict = torch.load(CHECKPOINT)
net.load_state_dict(state_dict)
net.cuda()
net.eval() | Deepfake Detection Challenge |
8,522,656 | def load_setting_file(path: str):
with open(path)as f:
settings = yaml.safe_load(f)
return settings
def set_random_seed(seed: int = 42, deterministic: bool = False):
random.seed(seed)
np.random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = deterministic
def run_inference_loop(stgs, model, loader, device):
model.to(device)
model.eval()
pred_list = []
with torch.no_grad() :
for x, t in tqdm(loader):
y_preds1 = model(x.to(device))
y_preds2 = model(x.flip(-1 ).to(device))
y_preds =(y_preds1.sigmoid().to('cpu' ).numpy() *0.6 + y_preds2.sigmoid().to('cpu' ).numpy() *0.4)
pred_list.append(y_preds)
pred_arr = np.concatenate(pred_list)
del pred_list
return pred_arr<set_options> | net2 = mobilenetv3(mode='small', pretrained=False)
net2.classifier[1] = torch.nn.Linear(in_features=1280, out_features=1)
net2 = net2.to(device)
state_dict = torch.load(CHECKPOINT2)
net2.load_state_dict(state_dict)
net2.cuda()
net2.eval() | Deepfake Detection Challenge |
8,522,656 | if not torch.cuda.is_available() :
device = torch.device("cpu")
else:
device = torch.device("cuda")
print(device )<load_pretrained> | net3 = EfficientNet.from_name("efficientnet-b0")
net3._fc = torch.nn.Linear(in_features=net3._fc.in_features, out_features=1)
net3.load_state_dict(torch.load(CHECKPOINT3))
net3.cuda()
net3.eval() | Deepfake Detection Challenge |
8,522,656 | model_dir = TRAINED_MODEL
test_dir = TEST_RESIZED
test_file_list = [
(test_dir / f"{img_id}.png", [-1] * 11)
for img_id in smpl_sub["StudyInstanceUID"].values]
test_loader = get_dataloaders_for_inference(test_file_list, batch_size=32)
test_preds_arr = np.zeros(( N_FOLD , len(smpl_sub), N_CLASSES))
for fold_id in [0,1,2,3,4]:
print(f"[fold {fold_id}]")
stgs = load_setting_file(".. /input/ranzer-models/settings.yml")
stgs["model"]["params"]["pretrained"] = False
model = MultiHeadResNet200D(**stgs["model"]["params"])
model_path = model_dir / f"best_model_fold{fold_id}.pth"
print(model_path)
model.load_state_dict(torch.load(model_path, map_location=device))
test_pred = run_inference_loop(stgs, model,test_loader, device)
test_preds_arr[fold_id] = test_pred
del model
torch.cuda.empty_cache()
gc.collect()
<save_to_csv> | net6 = EfficientNet.from_name("efficientnet-b1")
net6._fc = torch.nn.Linear(in_features=net6._fc.in_features, out_features=1)
net6.load_state_dict(torch.load(CHECKPOINT6))
net6.cuda()
net6.eval() | Deepfake Detection Challenge |
8,522,656 | if CONVERT_TO_RANK:
test_preds_arr = test_preds_arr.argsort(axis=1 ).argsort(axis=1)
sub = smpl_sub.copy()
sub[CLASSES] = test_preds_arr.mean(axis=0)
sub.to_csv("submission.csv", index=False )<load_pretrained> | class SeparableConv2d(nn.Module):
def __init__(self,in_channels,out_channels,kernel_size=1,stride=1,padding=0,dilation=1,bias=False):
super(SeparableConv2d,self ).__init__()
self.conv1 = nn.Conv2d(in_channels,in_channels,kernel_size,stride,padding,dilation,groups=in_channels,bias=bias)
self.pointwise = nn.Conv2d(in_channels,out_channels,1,1,0,1,1,bias=bias)
def forward(self,x):
x = self.conv1(x)
x = self.pointwise(x)
return x
class Block(nn.Module):
def __init__(self,in_filters,out_filters,reps,strides=1,start_with_relu=True,grow_first=True):
super(Block, self ).__init__()
if out_filters != in_filters or strides!=1:
self.skip = nn.Conv2d(in_filters,out_filters,1,stride=strides, bias=False)
self.skipbn = nn.BatchNorm2d(out_filters)
else:
self.skip=None
rep=[]
filters=in_filters
if grow_first:
rep.append(nn.ReLU(inplace=True))
rep.append(SeparableConv2d(in_filters,out_filters,3,stride=1,padding=1,bias=False))
rep.append(nn.BatchNorm2d(out_filters))
filters = out_filters
for i in range(reps-1):
rep.append(nn.ReLU(inplace=True))
rep.append(SeparableConv2d(filters,filters,3,stride=1,padding=1,bias=False))
rep.append(nn.BatchNorm2d(filters))
if not grow_first:
rep.append(nn.ReLU(inplace=True))
rep.append(SeparableConv2d(in_filters,out_filters,3,stride=1,padding=1,bias=False))
rep.append(nn.BatchNorm2d(out_filters))
if not start_with_relu:
rep = rep[1:]
else:
rep[0] = nn.ReLU(inplace=False)
if strides != 1:
rep.append(nn.MaxPool2d(3,strides,1))
self.rep = nn.Sequential(*rep)
def forward(self,inp):
x = self.rep(inp)
if self.skip is not None:
skip = self.skip(inp)
skip = self.skipbn(skip)
else:
skip = inp
x+=skip
return x
class Xception(nn.Module):
def __init__(self, num_classes=1000):
super(Xception, self ).__init__()
self.num_classes = num_classes
self.conv1 = nn.Conv2d(3, 32, 3,2, 0, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(32,64,3,bias=False)
self.bn2 = nn.BatchNorm2d(64)
self.relu2 = nn.ReLU(inplace=True)
self.block1=Block(64,128,2,2,start_with_relu=False,grow_first=True)
self.block2=Block(128,256,2,2,start_with_relu=True,grow_first=True)
self.block3=Block(256,728,2,2,start_with_relu=True,grow_first=True)
self.block4=Block(728,728,3,1,start_with_relu=True,grow_first=True)
self.block5=Block(728,728,3,1,start_with_relu=True,grow_first=True)
self.block6=Block(728,728,3,1,start_with_relu=True,grow_first=True)
self.block7=Block(728,728,3,1,start_with_relu=True,grow_first=True)
self.block8=Block(728,728,3,1,start_with_relu=True,grow_first=True)
self.block9=Block(728,728,3,1,start_with_relu=True,grow_first=True)
self.block10=Block(728,728,3,1,start_with_relu=True,grow_first=True)
self.block11=Block(728,728,3,1,start_with_relu=True,grow_first=True)
self.block12=Block(728,1024,2,2,start_with_relu=True,grow_first=False)
self.conv3 = SeparableConv2d(1024,1536,3,1,1)
self.bn3 = nn.BatchNorm2d(1536)
self.relu3 = nn.ReLU(inplace=True)
self.conv4 = SeparableConv2d(1536,2048,3,1,1)
self.bn4 = nn.BatchNorm2d(2048)
self.fc = nn.Linear(2048, num_classes)
def features(self, input):
x = self.conv1(input)
x = self.bn1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu2(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
x = self.block12(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu3(x)
x = self.conv4(x)
x = self.bn4(x)
return x
def logits(self, features):
x = nn.ReLU(inplace=True )(features)
x = F.adaptive_avg_pool2d(x,(1, 1))
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
def xception(num_classes=1000, pretrained='imagenet'):
model = Xception(num_classes=num_classes)
if pretrained:
settings = pretrained_settings['xception'][pretrained]
assert num_classes == settings['num_classes'], \
"num_classes should be {}, but is {}".format(settings['num_classes'], num_classes)
model = Xception(num_classes=num_classes)
model.load_state_dict(model_zoo.load_url(settings['url']))
model.input_space = settings['input_space']
model.input_size = settings['input_size']
model.input_range = settings['input_range']
model.mean = settings['mean']
model.std = settings['std']
model.last_linear = model.fc
del model.fc
return model
net4 = Xception()
net4.last_linear = net4.fc
del net4.fc
net4.last_linear = nn.Linear(in_features=2048, out_features=1, bias=True)
net4.load_state_dict(torch.load(CHECKPOINT4))
net4 = net4.to(device)
net4.cuda()
net4.eval()
net10 = Xception()
net10.last_linear = net10.fc
del net10.fc
net10.last_linear = nn.Linear(in_features=2048, out_features=1, bias=True)
net10.load_state_dict(torch.load(CHECKPOINT10))
net10 = net10.to(device)
net10.cuda()
net10.eval() | Deepfake Detection Challenge |
8,522,656 | model_dir = TRAINED_MODEL
test_dir = TEST_RESIZED
test_file_list = [
(test_dir / f"{img_id}.png", [-1] * 11)
for img_id in smpl_sub["StudyInstanceUID"].values]
test_loader = get_dataloaders_for_inference(test_file_list, batch_size=4)
N_FOLD = len([1024])
test_preds_arr = np.zeros(( N_FOLD , len(smpl_sub), N_CLASSES))
for i,fold_id in enumerate([1024]):
print(f"[fold {fold_id}]")
stgs = load_setting_file(".. /input/ranzer-models/settings.yml")
stgs["model"]["params"]["pretrained"] = False
model = MultiHeadResNet200D(**stgs["model"]["params"])
model_path = model_dir / f"best_model_fold{fold_id}.pth"
print(model_path)
model.load_state_dict(torch.load(model_path, map_location=device))
test_pred = run_inference_loop(stgs, model,test_loader, device)
test_preds_arr[i] = test_pred
del model
torch.cuda.empty_cache()
gc.collect()
<prepare_output> | class MaxPoolPad(nn.Module):
def __init__(self):
super(MaxPoolPad, self ).__init__()
self.pad = nn.ZeroPad2d(( 1, 0, 1, 0))
self.pool = nn.MaxPool2d(3, stride=2, padding=1)
def forward(self, x):
x = self.pad(x)
x = self.pool(x)
x = x[:, :, 1:, 1:].contiguous()
return x
class AvgPoolPad(nn.Module):
def __init__(self, stride=2, padding=1):
super(AvgPoolPad, self ).__init__()
self.pad = nn.ZeroPad2d(( 1, 0, 1, 0))
self.pool = nn.AvgPool2d(3, stride=stride, padding=padding, count_include_pad=False)
def forward(self, x):
x = self.pad(x)
x = self.pool(x)
x = x[:, :, 1:, 1:].contiguous()
return x
class SeparableConv2d(nn.Module):
def __init__(self, in_channels, out_channels, dw_kernel, dw_stride, dw_padding, bias=False):
super(SeparableConv2d, self ).__init__()
self.depthwise_conv2d = nn.Conv2d(in_channels, in_channels, dw_kernel,
stride=dw_stride,
padding=dw_padding,
bias=bias,
groups=in_channels)
self.pointwise_conv2d = nn.Conv2d(in_channels, out_channels, 1, stride=1, bias=bias)
def forward(self, x):
x = self.depthwise_conv2d(x)
x = self.pointwise_conv2d(x)
return x
class BranchSeparables(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, name=None, bias=False):
super(BranchSeparables, self ).__init__()
self.relu = nn.ReLU()
self.separable_1 = SeparableConv2d(in_channels, in_channels, kernel_size, stride, padding, bias=bias)
self.bn_sep_1 = nn.BatchNorm2d(in_channels, eps=0.001, momentum=0.1, affine=True)
self.relu1 = nn.ReLU()
self.separable_2 = SeparableConv2d(in_channels, out_channels, kernel_size, 1, padding, bias=bias)
self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1, affine=True)
self.name = name
def forward(self, x):
x = self.relu(x)
if self.name == 'specific':
x = nn.ZeroPad2d(( 1, 0, 1, 0))(x)
x = self.separable_1(x)
if self.name == 'specific':
x = x[:, :, 1:, 1:].contiguous()
x = self.bn_sep_1(x)
x = self.relu1(x)
x = self.separable_2(x)
x = self.bn_sep_2(x)
return x
class BranchSeparablesStem(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=False):
super(BranchSeparablesStem, self ).__init__()
self.relu = nn.ReLU()
self.separable_1 = SeparableConv2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias)
self.bn_sep_1 = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1, affine=True)
self.relu1 = nn.ReLU()
self.separable_2 = SeparableConv2d(out_channels, out_channels, kernel_size, 1, padding, bias=bias)
self.bn_sep_2 = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1, affine=True)
def forward(self, x):
x = self.relu(x)
x = self.separable_1(x)
x = self.bn_sep_1(x)
x = self.relu1(x)
x = self.separable_2(x)
x = self.bn_sep_2(x)
return x
class BranchSeparablesReduction(BranchSeparables):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, z_padding=1, bias=False):
BranchSeparables.__init__(self, in_channels, out_channels, kernel_size, stride, padding, bias)
self.padding = nn.ZeroPad2d(( z_padding, 0, z_padding, 0))
def forward(self, x):
x = self.relu(x)
x = self.padding(x)
x = self.separable_1(x)
x = x[:, :, 1:, 1:].contiguous()
x = self.bn_sep_1(x)
x = self.relu1(x)
x = self.separable_2(x)
x = self.bn_sep_2(x)
return x
class CellStem0(nn.Module):
def __init__(self, stem_filters, num_filters=42):
super(CellStem0, self ).__init__()
self.num_filters = num_filters
self.stem_filters = stem_filters
self.conv_1x1 = nn.Sequential()
self.conv_1x1.add_module('relu', nn.ReLU())
self.conv_1x1.add_module('conv', nn.Conv2d(self.stem_filters, self.num_filters, 1, stride=1, bias=False))
self.conv_1x1.add_module('bn', nn.BatchNorm2d(self.num_filters, eps=0.001, momentum=0.1, affine=True))
self.comb_iter_0_left = BranchSeparables(self.num_filters, self.num_filters, 5, 2, 2)
self.comb_iter_0_right = BranchSeparablesStem(self.stem_filters, self.num_filters, 7, 2, 3, bias=False)
self.comb_iter_1_left = nn.MaxPool2d(3, stride=2, padding=1)
self.comb_iter_1_right = BranchSeparablesStem(self.stem_filters, self.num_filters, 7, 2, 3, bias=False)
self.comb_iter_2_left = nn.AvgPool2d(3, stride=2, padding=1, count_include_pad=False)
self.comb_iter_2_right = BranchSeparablesStem(self.stem_filters, self.num_filters, 5, 2, 2, bias=False)
self.comb_iter_3_right = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_4_left = BranchSeparables(self.num_filters, self.num_filters, 3, 1, 1, bias=False)
self.comb_iter_4_right = nn.MaxPool2d(3, stride=2, padding=1)
def forward(self, x):
x1 = self.conv_1x1(x)
x_comb_iter_0_left = self.comb_iter_0_left(x1)
x_comb_iter_0_right = self.comb_iter_0_right(x)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x1)
x_comb_iter_1_right = self.comb_iter_1_right(x)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x1)
x_comb_iter_2_right = self.comb_iter_2_right(x)
x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right
x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0)
x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1
x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0)
x_comb_iter_4_right = self.comb_iter_4_right(x1)
x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right
x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class CellStem1(nn.Module):
def __init__(self, stem_filters, num_filters):
super(CellStem1, self ).__init__()
self.num_filters = num_filters
self.stem_filters = stem_filters
self.conv_1x1 = nn.Sequential()
self.conv_1x1.add_module('relu', nn.ReLU())
self.conv_1x1.add_module('conv', nn.Conv2d(2*self.num_filters, self.num_filters, 1, stride=1, bias=False))
self.conv_1x1.add_module('bn', nn.BatchNorm2d(self.num_filters, eps=0.001, momentum=0.1, affine=True))
self.relu = nn.ReLU()
self.path_1 = nn.Sequential()
self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False))
self.path_1.add_module('conv', nn.Conv2d(self.stem_filters, self.num_filters//2, 1, stride=1, bias=False))
self.path_2 = nn.ModuleList()
self.path_2.add_module('pad', nn.ZeroPad2d(( 0, 1, 0, 1)))
self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False))
self.path_2.add_module('conv', nn.Conv2d(self.stem_filters, self.num_filters//2, 1, stride=1, bias=False))
self.final_path_bn = nn.BatchNorm2d(self.num_filters, eps=0.001, momentum=0.1, affine=True)
self.comb_iter_0_left = BranchSeparables(self.num_filters, self.num_filters, 5, 2, 2, name='specific', bias=False)
self.comb_iter_0_right = BranchSeparables(self.num_filters, self.num_filters, 7, 2, 3, name='specific', bias=False)
self.comb_iter_1_left = MaxPoolPad()
self.comb_iter_1_right = BranchSeparables(self.num_filters, self.num_filters, 7, 2, 3, name='specific', bias=False)
self.comb_iter_2_left = AvgPoolPad()
self.comb_iter_2_right = BranchSeparables(self.num_filters, self.num_filters, 5, 2, 2, name='specific', bias=False)
self.comb_iter_3_right = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_4_left = BranchSeparables(self.num_filters, self.num_filters, 3, 1, 1, name='specific', bias=False)
self.comb_iter_4_right = MaxPoolPad()
def forward(self, x_conv0, x_stem_0):
x_left = self.conv_1x1(x_stem_0)
x_relu = self.relu(x_conv0)
x_path1 = self.path_1(x_relu)
x_path2 = self.path_2.pad(x_relu)
x_path2 = x_path2[:, :, 1:, 1:]
x_path2 = self.path_2.avgpool(x_path2)
x_path2 = self.path_2.conv(x_path2)
x_right = self.final_path_bn(torch.cat([x_path1, x_path2], 1))
x_comb_iter_0_left = self.comb_iter_0_left(x_left)
x_comb_iter_0_right = self.comb_iter_0_right(x_right)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_left)
x_comb_iter_1_right = self.comb_iter_1_right(x_right)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_left)
x_comb_iter_2_right = self.comb_iter_2_right(x_right)
x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right
x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0)
x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1
x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0)
x_comb_iter_4_right = self.comb_iter_4_right(x_left)
x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right
x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class FirstCell(nn.Module):
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
super(FirstCell, self ).__init__()
self.conv_1x1 = nn.Sequential()
self.conv_1x1.add_module('relu', nn.ReLU())
self.conv_1x1.add_module('conv', nn.Conv2d(in_channels_right, out_channels_right, 1, stride=1, bias=False))
self.conv_1x1.add_module('bn', nn.BatchNorm2d(out_channels_right, eps=0.001, momentum=0.1, affine=True))
self.relu = nn.ReLU()
self.path_1 = nn.Sequential()
self.path_1.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False))
self.path_1.add_module('conv', nn.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False))
self.path_2 = nn.ModuleList()
self.path_2.add_module('pad', nn.ZeroPad2d(( 0, 1, 0, 1)))
self.path_2.add_module('avgpool', nn.AvgPool2d(1, stride=2, count_include_pad=False))
self.path_2.add_module('conv', nn.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False))
self.final_path_bn = nn.BatchNorm2d(out_channels_left * 2, eps=0.001, momentum=0.1, affine=True)
self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2, bias=False)
self.comb_iter_0_right = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
self.comb_iter_1_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2, bias=False)
self.comb_iter_1_right = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
self.comb_iter_2_left = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_3_left = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_3_right = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
def forward(self, x, x_prev):
x_relu = self.relu(x_prev)
x_path1 = self.path_1(x_relu)
x_path2 = self.path_2.pad(x_relu)
x_path2 = x_path2[:, :, 1:, 1:]
x_path2 = self.path_2.avgpool(x_path2)
x_path2 = self.path_2.conv(x_path2)
x_left = self.final_path_bn(torch.cat([x_path1, x_path2], 1))
x_right = self.conv_1x1(x)
x_comb_iter_0_left = self.comb_iter_0_left(x_right)
x_comb_iter_0_right = self.comb_iter_0_right(x_left)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_left)
x_comb_iter_1_right = self.comb_iter_1_right(x_left)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_right)
x_comb_iter_2 = x_comb_iter_2_left + x_left
x_comb_iter_3_left = self.comb_iter_3_left(x_left)
x_comb_iter_3_right = self.comb_iter_3_right(x_left)
x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right
x_comb_iter_4_left = self.comb_iter_4_left(x_right)
x_comb_iter_4 = x_comb_iter_4_left + x_right
x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class NormalCell(nn.Module):
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
super(NormalCell, self ).__init__()
self.conv_prev_1x1 = nn.Sequential()
self.conv_prev_1x1.add_module('relu', nn.ReLU())
self.conv_prev_1x1.add_module('conv', nn.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False))
self.conv_prev_1x1.add_module('bn', nn.BatchNorm2d(out_channels_left, eps=0.001, momentum=0.1, affine=True))
self.conv_1x1 = nn.Sequential()
self.conv_1x1.add_module('relu', nn.ReLU())
self.conv_1x1.add_module('conv', nn.Conv2d(in_channels_right, out_channels_right, 1, stride=1, bias=False))
self.conv_1x1.add_module('bn', nn.BatchNorm2d(out_channels_right, eps=0.001, momentum=0.1, affine=True))
self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 1, 2, bias=False)
self.comb_iter_0_right = BranchSeparables(out_channels_left, out_channels_left, 3, 1, 1, bias=False)
self.comb_iter_1_left = BranchSeparables(out_channels_left, out_channels_left, 5, 1, 2, bias=False)
self.comb_iter_1_right = BranchSeparables(out_channels_left, out_channels_left, 3, 1, 1, bias=False)
self.comb_iter_2_left = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_3_left = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_3_right = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
def forward(self, x, x_prev):
x_left = self.conv_prev_1x1(x_prev)
x_right = self.conv_1x1(x)
x_comb_iter_0_left = self.comb_iter_0_left(x_right)
x_comb_iter_0_right = self.comb_iter_0_right(x_left)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_left)
x_comb_iter_1_right = self.comb_iter_1_right(x_left)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_right)
x_comb_iter_2 = x_comb_iter_2_left + x_left
x_comb_iter_3_left = self.comb_iter_3_left(x_left)
x_comb_iter_3_right = self.comb_iter_3_right(x_left)
x_comb_iter_3 = x_comb_iter_3_left + x_comb_iter_3_right
x_comb_iter_4_left = self.comb_iter_4_left(x_right)
x_comb_iter_4 = x_comb_iter_4_left + x_right
x_out = torch.cat([x_left, x_comb_iter_0, x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class ReductionCell0(nn.Module):
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
super(ReductionCell0, self ).__init__()
self.conv_prev_1x1 = nn.Sequential()
self.conv_prev_1x1.add_module('relu', nn.ReLU())
self.conv_prev_1x1.add_module('conv', nn.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False))
self.conv_prev_1x1.add_module('bn', nn.BatchNorm2d(out_channels_left, eps=0.001, momentum=0.1, affine=True))
self.conv_1x1 = nn.Sequential()
self.conv_1x1.add_module('relu', nn.ReLU())
self.conv_1x1.add_module('conv', nn.Conv2d(in_channels_right, out_channels_right, 1, stride=1, bias=False))
self.conv_1x1.add_module('bn', nn.BatchNorm2d(out_channels_right, eps=0.001, momentum=0.1, affine=True))
self.comb_iter_0_left = BranchSeparablesReduction(out_channels_right, out_channels_right, 5, 2, 2, bias=False)
self.comb_iter_0_right = BranchSeparablesReduction(out_channels_right, out_channels_right, 7, 2, 3, bias=False)
self.comb_iter_1_left = MaxPoolPad()
self.comb_iter_1_right = BranchSeparablesReduction(out_channels_right, out_channels_right, 7, 2, 3, bias=False)
self.comb_iter_2_left = AvgPoolPad()
self.comb_iter_2_right = BranchSeparablesReduction(out_channels_right, out_channels_right, 5, 2, 2, bias=False)
self.comb_iter_3_right = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_4_left = BranchSeparablesReduction(out_channels_right, out_channels_right, 3, 1, 1, bias=False)
self.comb_iter_4_right = MaxPoolPad()
def forward(self, x, x_prev):
x_left = self.conv_prev_1x1(x_prev)
x_right = self.conv_1x1(x)
x_comb_iter_0_left = self.comb_iter_0_left(x_right)
x_comb_iter_0_right = self.comb_iter_0_right(x_left)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_right)
x_comb_iter_1_right = self.comb_iter_1_right(x_left)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_right)
x_comb_iter_2_right = self.comb_iter_2_right(x_left)
x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right
x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0)
x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1
x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0)
x_comb_iter_4_right = self.comb_iter_4_right(x_right)
x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right
x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class ReductionCell1(nn.Module):
def __init__(self, in_channels_left, out_channels_left, in_channels_right, out_channels_right):
super(ReductionCell1, self ).__init__()
self.conv_prev_1x1 = nn.Sequential()
self.conv_prev_1x1.add_module('relu', nn.ReLU())
self.conv_prev_1x1.add_module('conv', nn.Conv2d(in_channels_left, out_channels_left, 1, stride=1, bias=False))
self.conv_prev_1x1.add_module('bn', nn.BatchNorm2d(out_channels_left, eps=0.001, momentum=0.1, affine=True))
self.conv_1x1 = nn.Sequential()
self.conv_1x1.add_module('relu', nn.ReLU())
self.conv_1x1.add_module('conv', nn.Conv2d(in_channels_right, out_channels_right, 1, stride=1, bias=False))
self.conv_1x1.add_module('bn', nn.BatchNorm2d(out_channels_right, eps=0.001, momentum=0.1, affine=True))
self.comb_iter_0_left = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, name='specific', bias=False)
self.comb_iter_0_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, name='specific', bias=False)
self.comb_iter_1_left = MaxPoolPad()
self.comb_iter_1_right = BranchSeparables(out_channels_right, out_channels_right, 7, 2, 3, name='specific', bias=False)
self.comb_iter_2_left = AvgPoolPad()
self.comb_iter_2_right = BranchSeparables(out_channels_right, out_channels_right, 5, 2, 2, name='specific', bias=False)
self.comb_iter_3_right = nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False)
self.comb_iter_4_left = BranchSeparables(out_channels_right, out_channels_right, 3, 1, 1, name='specific', bias=False)
self.comb_iter_4_right =MaxPoolPad()
def forward(self, x, x_prev):
x_left = self.conv_prev_1x1(x_prev)
x_right = self.conv_1x1(x)
x_comb_iter_0_left = self.comb_iter_0_left(x_right)
x_comb_iter_0_right = self.comb_iter_0_right(x_left)
x_comb_iter_0 = x_comb_iter_0_left + x_comb_iter_0_right
x_comb_iter_1_left = self.comb_iter_1_left(x_right)
x_comb_iter_1_right = self.comb_iter_1_right(x_left)
x_comb_iter_1 = x_comb_iter_1_left + x_comb_iter_1_right
x_comb_iter_2_left = self.comb_iter_2_left(x_right)
x_comb_iter_2_right = self.comb_iter_2_right(x_left)
x_comb_iter_2 = x_comb_iter_2_left + x_comb_iter_2_right
x_comb_iter_3_right = self.comb_iter_3_right(x_comb_iter_0)
x_comb_iter_3 = x_comb_iter_3_right + x_comb_iter_1
x_comb_iter_4_left = self.comb_iter_4_left(x_comb_iter_0)
x_comb_iter_4_right = self.comb_iter_4_right(x_right)
x_comb_iter_4 = x_comb_iter_4_left + x_comb_iter_4_right
x_out = torch.cat([x_comb_iter_1, x_comb_iter_2, x_comb_iter_3, x_comb_iter_4], 1)
return x_out
class NASNetAMobile(nn.Module):
def __init__(self, num_classes=1000, stem_filters=32, penultimate_filters=1056, filters_multiplier=2):
super(NASNetAMobile, self ).__init__()
self.num_classes = num_classes
self.stem_filters = stem_filters
self.penultimate_filters = penultimate_filters
self.filters_multiplier = filters_multiplier
filters = self.penultimate_filters // 24
self.conv0 = nn.Sequential()
self.conv0.add_module('conv', nn.Conv2d(in_channels=3, out_channels=self.stem_filters, kernel_size=3, padding=0, stride=2,
bias=False))
self.conv0.add_module('bn', nn.BatchNorm2d(self.stem_filters, eps=0.001, momentum=0.1, affine=True))
self.cell_stem_0 = CellStem0(self.stem_filters, num_filters=filters //(filters_multiplier ** 2))
self.cell_stem_1 = CellStem1(self.stem_filters, num_filters=filters // filters_multiplier)
self.cell_0 = FirstCell(in_channels_left=filters, out_channels_left=filters//2,
in_channels_right=2*filters, out_channels_right=filters)
self.cell_1 = NormalCell(in_channels_left=2*filters, out_channels_left=filters,
in_channels_right=6*filters, out_channels_right=filters)
self.cell_2 = NormalCell(in_channels_left=6*filters, out_channels_left=filters,
in_channels_right=6*filters, out_channels_right=filters)
self.cell_3 = NormalCell(in_channels_left=6*filters, out_channels_left=filters,
in_channels_right=6*filters, out_channels_right=filters)
self.reduction_cell_0 = ReductionCell0(in_channels_left=6*filters, out_channels_left=2*filters,
in_channels_right=6*filters, out_channels_right=2*filters)
self.cell_6 = FirstCell(in_channels_left=6*filters, out_channels_left=filters,
in_channels_right=8*filters, out_channels_right=2*filters)
self.cell_7 = NormalCell(in_channels_left=8*filters, out_channels_left=2*filters,
in_channels_right=12*filters, out_channels_right=2*filters)
self.cell_8 = NormalCell(in_channels_left=12*filters, out_channels_left=2*filters,
in_channels_right=12*filters, out_channels_right=2*filters)
self.cell_9 = NormalCell(in_channels_left=12*filters, out_channels_left=2*filters,
in_channels_right=12*filters, out_channels_right=2*filters)
self.reduction_cell_1 = ReductionCell1(in_channels_left=12*filters, out_channels_left=4*filters,
in_channels_right=12*filters, out_channels_right=4*filters)
self.cell_12 = FirstCell(in_channels_left=12*filters, out_channels_left=2*filters,
in_channels_right=16*filters, out_channels_right=4*filters)
self.cell_13 = NormalCell(in_channels_left=16*filters, out_channels_left=4*filters,
in_channels_right=24*filters, out_channels_right=4*filters)
self.cell_14 = NormalCell(in_channels_left=24*filters, out_channels_left=4*filters,
in_channels_right=24*filters, out_channels_right=4*filters)
self.cell_15 = NormalCell(in_channels_left=24*filters, out_channels_left=4*filters,
in_channels_right=24*filters, out_channels_right=4*filters)
self.relu = nn.ReLU()
self.avg_pool = nn.AvgPool2d(7, stride=1, padding=0)
self.dropout = nn.Dropout()
self.last_linear = nn.Linear(24*filters, self.num_classes)
def features(self, input):
x_conv0 = self.conv0(input)
x_stem_0 = self.cell_stem_0(x_conv0)
x_stem_1 = self.cell_stem_1(x_conv0, x_stem_0)
x_cell_0 = self.cell_0(x_stem_1, x_stem_0)
x_cell_1 = self.cell_1(x_cell_0, x_stem_1)
x_cell_2 = self.cell_2(x_cell_1, x_cell_0)
x_cell_3 = self.cell_3(x_cell_2, x_cell_1)
x_reduction_cell_0 = self.reduction_cell_0(x_cell_3, x_cell_2)
x_cell_6 = self.cell_6(x_reduction_cell_0, x_cell_3)
x_cell_7 = self.cell_7(x_cell_6, x_reduction_cell_0)
x_cell_8 = self.cell_8(x_cell_7, x_cell_6)
x_cell_9 = self.cell_9(x_cell_8, x_cell_7)
x_reduction_cell_1 = self.reduction_cell_1(x_cell_9, x_cell_8)
x_cell_12 = self.cell_12(x_reduction_cell_1, x_cell_9)
x_cell_13 = self.cell_13(x_cell_12, x_reduction_cell_1)
x_cell_14 = self.cell_14(x_cell_13, x_cell_12)
x_cell_15 = self.cell_15(x_cell_14, x_cell_13)
return x_cell_15
def logits(self, features):
x = self.relu(features)
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.dropout(x)
x = self.last_linear(x)
return x
def forward(self, input):
x = self.features(input)
x = self.logits(x)
return x
net5 = NASNetAMobile()
net5.last_linear = nn.Linear(in_features=1056, out_features=1, bias=True)
net5.load_state_dict(torch.load(CHECKPOINT5))
net5 = net5.to(device)
net5.cuda()
net5.eval()
net9 = NASNetAMobile()
net9.last_linear = nn.Linear(in_features=1056, out_features=1, bias=True)
net9.load_state_dict(torch.load(CHECKPOINT9))
net9 = net9.to(device)
net9.cuda()
net9.eval() | Deepfake Detection Challenge |
8,522,656 | sub_2 = smpl_sub.copy()
sub_2[CLASSES] = test_preds_arr.mean(axis=0 )<feature_engineering> | class CFG:
seq_len=10
lstm_in = 16
lstm_out = 16
class LSTM_Model(nn.Module):
def __init__(self):
super(LSTM_Model, self ).__init__()
self.cnn_net = mobilenetv3(mode='small', pretrained=False)
self.cnn_net.classifier[1] = nn.Linear(in_features=1280, out_features=1)
self.cnn_net.classifier[1] = nn.Linear(in_features=1280, out_features=CFG.lstm_in)
self.lstm = nn.LSTM(CFG.lstm_in, CFG.lstm_out, bidirectional=False, batch_first=True)
self.reg_layer = nn.Sequential(nn.Dropout(0.5),
nn.Linear(CFG.lstm_out, 1))
def forward(self, x):
n_samples = x.shape[0]
x = x.view(-1, 3, 224, 224)
x = self.cnn_net(x)
x = x.view(n_samples, CFG.seq_len, -1)
x,(h_n, h_c)= self.lstm(x)
x = x[:,-1]
y = self.reg_layer(x)
return y
net7 = LSTM_Model()
net7.load_state_dict(torch.load(CHECKPOINT7))
net7 = net7.to(device)
net7.cuda()
net7.eval()
net8 = LSTM_Model()
net8.load_state_dict(torch.load(CHECKPOINT8))
net8 = net8.to(device)
net8.cuda()
net8.eval() | Deepfake Detection Challenge |
8,522,656 | sub[CLASSES] = 0.6 * sub[CLASSES] + 0.4 * sub_2[CLASSES]<save_to_csv> | Deepfake Detection Challenge | |
8,522,656 | sub.to_csv("submission.csv", index=False )<import_modules> | net11 = torchvision.models.resnet18(pretrained=False)
net11.fc = nn.Linear(in_features=512, out_features=1, bias=True)
net11.load_state_dict(torch.load(CHECKPOINT11))
net11 = net11.to(device)
net11.cuda()
net11.eval() | Deepfake Detection Challenge |
8,522,656 | import pandas as pd
from IPython.display import display
from sklearn.feature_selection import VarianceThreshold
from sklearn.ensemble import ExtraTreesClassifier
from sklearn import svm
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score, mean_squared_error
import numpy as np<categorify> | import albumentations
from albumentations.augmentations.transforms import ShiftScaleRotate, HorizontalFlip, RandomBrightnessContrast, MotionBlur, Blur, GaussNoise, JpegCompression
| Deepfake Detection Challenge |
8,522,656 | types = {'ID':np.uint32, 'target':np.uint8, 'VAR_0002':np.uint16, 'VAR_0003':np.uint16, 'VAR_0532':np.uint8, 'VAR_0533':np.uint8, 'VAR_0534':np.uint8,
'VAR_0535':np.uint8, 'VAR_0536':np.uint8, 'VAR_0537':np.uint8,'VAR_0538':np.uint8, 'VAR_0539':np.uint8, 'VAR_0540':np.uint8, 'VAR_0545':np.uint16,
'VAR_0546':np.uint16, 'VAR_0547':np.uint16, 'VAR_0548':np.uint16, 'VAR_0549':np.uint16, 'VAR_0550':np.uint16, 'VAR_0551':np.uint16,
'VAR_0552':np.uint8, 'VAR_0553':np.uint8, 'VAR_0554':np.uint16, 'VAR_0555':np.uint16, 'VAR_0556':np.uint16, 'VAR_0557':np.uint16,
'VAR_0558':np.uint16, 'VAR_0559':np.uint8, 'VAR_0560':np.uint8, 'VAR_0561':np.uint16, 'VAR_0562':np.uint8, 'VAR_0563':np.uint8,
'VAR_0564':np.uint8, 'VAR_0565':np.uint8, 'VAR_0566':np.uint8, 'VAR_0567':np.uint8, 'VAR_0568':np.uint8, 'VAR_0569':np.uint8,
'VAR_0570':np.uint16, 'VAR_0572':np.uint8, 'VAR_0580':np.uint8, 'VAR_0581':np.uint8, 'VAR_0582':np.uint8, 'VAR_0604':np.uint8,
'VAR_0605':np.uint8, 'VAR_0606':np.uint8, 'VAR_0617':np.uint8, 'VAR_0618':np.uint8, 'VAR_0619':np.uint8, 'VAR_0620':np.uint8,
'VAR_0621':np.uint8, 'VAR_0622':np.uint8, 'VAR_0623':np.uint8, 'VAR_0624':np.uint8, 'VAR_0625':np.uint8, 'VAR_0626':np.uint8,
'VAR_0627':np.uint8, 'VAR_0637':np.uint8, 'VAR_0638':np.uint8, 'VAR_0639':np.uint8, 'VAR_0640':np.uint8, 'VAR_0646':np.uint8,
'VAR_0647':np.uint8, 'VAR_0657':np.uint8, 'VAR_0658':np.uint8, 'VAR_0662':np.uint8, 'VAR_0663':np.uint8, 'VAR_0664':np.uint8, 'VAR_0665':np.uint8,
'VAR_0666':np.uint8,'VAR_0667':np.uint8, 'VAR_0668':np.uint8, 'VAR_0685':np.uint8, 'VAR_0686':np.uint8, 'VAR_0689':np.uint8, 'VAR_0690':np.uint8,
'VAR_0696':np.uint8, 'VAR_0697':np.uint8, 'VAR_0703':np.uint8, 'VAR_0708':np.uint8, 'VAR_0709':np.uint8, 'VAR_0710':np.uint8, 'VAR_0711':np.uint8,
'VAR_0712':np.uint8, 'VAR_0713':np.uint8, 'VAR_0714':np.uint8, 'VAR_0715':np.uint8, 'VAR_0716':np.uint8, 'VAR_0717':np.uint8, 'VAR_0718':np.uint8,
'VAR_0719':np.uint8, 'VAR_0720':np.uint8, 'VAR_0721':np.uint8, 'VAR_0722':np.uint8, 'VAR_0723':np.uint8, 'VAR_0724':np.uint8, 'VAR_0725':np.uint8,
'VAR_0726':np.uint8, 'VAR_0727':np.uint8, 'VAR_0728':np.uint8, 'VAR_0729':np.uint8, 'VAR_0730':np.uint8, 'VAR_0731':np.uint8, 'VAR_0732':np.uint8,
'VAR_0733':np.uint8, 'VAR_0734':np.uint8, 'VAR_0735':np.uint8, 'VAR_0736':np.uint8, 'VAR_0737':np.uint8, 'VAR_0738':np.uint8, 'VAR_0739':np.uint8,
'VAR_0740':np.uint8, 'VAR_0741':np.uint8, 'VAR_0742':np.uint8, 'VAR_0743':np.uint8, 'VAR_0744':np.uint8, 'VAR_0745':np.uint8, 'VAR_0746':np.uint8,
'VAR_0747':np.uint8, 'VAR_0748':np.uint8, 'VAR_0749':np.uint8, 'VAR_0750':np.uint8, 'VAR_0751':np.uint8, 'VAR_0752':np.uint8, 'VAR_0753':np.uint8,
'VAR_0754':np.uint8, 'VAR_0755':np.uint8, 'VAR_0756':np.uint8, 'VAR_0758':np.uint8, 'VAR_0759':np.uint8, 'VAR_0760':np.uint8, 'VAR_0761':np.uint8,
'VAR_0762':np.uint8, 'VAR_0763':np.uint8, 'VAR_0764':np.uint8, 'VAR_0765':np.uint8, 'VAR_0766':np.uint8, 'VAR_0767':np.uint8, 'VAR_0768':np.uint8,
'VAR_0769':np.uint8, 'VAR_0770':np.uint8, 'VAR_0771':np.uint8, 'VAR_0772':np.uint8, 'VAR_0773':np.uint8, 'VAR_0774':np.uint8, 'VAR_0775':np.uint8,
'VAR_0776':np.uint8, 'VAR_0777':np.uint8, 'VAR_0778':np.uint8, 'VAR_0779':np.uint8, 'VAR_0780':np.uint8, 'VAR_0781':np.uint8, 'VAR_0782':np.uint8,
'VAR_0783':np.uint8, 'VAR_0784':np.uint8, 'VAR_0785':np.uint8, 'VAR_0786':np.uint8, 'VAR_0787':np.uint8, 'VAR_0788':np.uint8, 'VAR_0789':np.uint8,
'VAR_0790':np.uint8, 'VAR_0791':np.uint8, 'VAR_0792':np.uint8, 'VAR_0793':np.uint8, 'VAR_0794':np.uint8, 'VAR_0795':np.uint8, 'VAR_0796':np.uint8,
'VAR_0797':np.uint8, 'VAR_0798':np.uint8, 'VAR_0799':np.uint8, 'VAR_0800':np.uint8, 'VAR_0801':np.uint8, 'VAR_0802':np.uint8, 'VAR_0803':np.uint8,
'VAR_0804':np.uint8, 'VAR_0805':np.uint8, 'VAR_0806':np.uint8, 'VAR_0807':np.uint8, 'VAR_0808':np.uint8, 'VAR_0809':np.uint8, 'VAR_0810':np.uint8,
'VAR_0812':np.uint8, 'VAR_0813':np.uint8, 'VAR_0814':np.uint8, 'VAR_0815':np.uint8, 'VAR_0816':np.uint8, 'VAR_0817':np.uint8, 'VAR_0818':np.uint8,
'VAR_0819':np.uint8, 'VAR_0820':np.uint8, 'VAR_0821':np.uint8, 'VAR_0822':np.uint8, 'VAR_0823':np.uint8, 'VAR_0824':np.uint8, 'VAR_0825':np.uint8,
'VAR_0826':np.uint8, 'VAR_0827':np.uint8, 'VAR_0828':np.uint8, 'VAR_0829':np.uint8, 'VAR_0830':np.uint8, 'VAR_0831':np.uint8, 'VAR_0832':np.uint8,
'VAR_0833':np.uint8, 'VAR_0834':np.uint8, 'VAR_0835':np.uint8, 'VAR_0836':np.uint8, 'VAR_0837':np.uint8, 'VAR_0838':np.uint8, 'VAR_0839':np.uint8,
'VAR_0841':np.uint8, 'VAR_0842':np.uint8, 'VAR_0843':np.uint8, 'VAR_0844':np.uint8, 'VAR_0845':np.uint8, 'VAR_0846':np.uint8, 'VAR_0847':np.uint8,
'VAR_0848':np.uint8, 'VAR_0849':np.uint8, 'VAR_0850':np.uint8, 'VAR_0851':np.uint8, 'VAR_0852':np.uint8, 'VAR_0853':np.uint8, 'VAR_0854':np.uint8,
'VAR_0855':np.uint8, 'VAR_0856':np.uint8, 'VAR_0857':np.uint8, 'VAR_0859':np.uint8, 'VAR_0877':np.uint8, 'VAR_0878':np.uint8, 'VAR_0879':np.uint8,
'VAR_0885':np.uint8, 'VAR_0886':np.uint8, 'VAR_0911':np.uint8, 'VAR_0914':np.uint8, 'VAR_0915':np.uint8, 'VAR_0916':np.uint8, 'VAR_0923':np.uint8,
'VAR_0924':np.uint8, 'VAR_0925':np.uint8, 'VAR_0926':np.uint8, 'VAR_0927':np.uint8, 'VAR_0940':np.uint8, 'VAR_0945':np.uint8, 'VAR_0947':np.uint8,
'VAR_0952':np.uint8, 'VAR_0954':np.uint8, 'VAR_0959':np.uint8, 'VAR_0962':np.uint8, 'VAR_0963':np.uint8, 'VAR_0969':np.uint8, 'VAR_0973':np.uint8,
'VAR_0974':np.uint8, 'VAR_0975':np.uint8, 'VAR_0983':np.uint8, 'VAR_0984':np.uint8, 'VAR_0985':np.uint8, 'VAR_0986':np.uint8, 'VAR_0987':np.uint8,
'VAR_0988':np.uint8, 'VAR_0989':np.uint8, 'VAR_0990':np.uint8, 'VAR_0991':np.uint8, 'VAR_0992':np.uint8, 'VAR_0993':np.uint8, 'VAR_0994':np.uint8,
'VAR_0995':np.uint8, 'VAR_0996':np.uint8, 'VAR_0997':np.uint8, 'VAR_0998':np.uint8, 'VAR_0999':np.uint8, 'VAR_1000':np.uint8, 'VAR_1001':np.uint8,
'VAR_1002':np.uint8, 'VAR_1003':np.uint8, 'VAR_1004':np.uint8, 'VAR_1005':np.uint8, 'VAR_1006':np.uint8, 'VAR_1007':np.uint8, 'VAR_1008':np.uint8,
'VAR_1009':np.uint8, 'VAR_1010':np.uint8, 'VAR_1011':np.uint8, 'VAR_1012':np.uint8, 'VAR_1013':np.uint8, 'VAR_1014':np.uint8, 'VAR_1015':np.uint8,
'VAR_1016':np.uint8, 'VAR_1017':np.uint8, 'VAR_1018':np.uint8, 'VAR_1019':np.uint8, 'VAR_1020':np.uint8, 'VAR_1021':np.uint8, 'VAR_1022':np.uint8,
'VAR_1023':np.uint8, 'VAR_1024':np.uint8, 'VAR_1025':np.uint8, 'VAR_1026':np.uint8, 'VAR_1027':np.uint8, 'VAR_1028':np.uint8, 'VAR_1029':np.uint8,
'VAR_1030':np.uint8, 'VAR_1031':np.uint8, 'VAR_1032':np.uint8, 'VAR_1033':np.uint8, 'VAR_1034':np.uint8, 'VAR_1035':np.uint8, 'VAR_1036':np.uint8,
'VAR_1037':np.uint8, 'VAR_1038':np.uint8, 'VAR_1039':np.uint8, 'VAR_1040':np.uint8, 'VAR_1041':np.uint8, 'VAR_1042':np.uint8, 'VAR_1043':np.uint8,
'VAR_1044':np.uint8, 'VAR_1045':np.uint8, 'VAR_1046':np.uint8, 'VAR_1047':np.uint8, 'VAR_1048':np.uint8, 'VAR_1049':np.uint8, 'VAR_1050':np.uint8,
'VAR_1051':np.uint8, 'VAR_1052':np.uint8, 'VAR_1053':np.uint8, 'VAR_1054':np.uint8, 'VAR_1055':np.uint8, 'VAR_1056':np.uint8, 'VAR_1057':np.uint8,
'VAR_1058':np.uint8, 'VAR_1059':np.uint8, 'VAR_1060':np.uint8, 'VAR_1061':np.uint8, 'VAR_1062':np.uint8, 'VAR_1063':np.uint8, 'VAR_1064':np.uint8,
'VAR_1065':np.uint8, 'VAR_1066':np.uint8, 'VAR_1067':np.uint8, 'VAR_1068':np.uint8, 'VAR_1069':np.uint8, 'VAR_1070':np.uint8, 'VAR_1071':np.uint8,
'VAR_1072':np.uint8, 'VAR_1073':np.uint8, 'VAR_1080':np.uint8, 'VAR_1108':np.uint8, 'VAR_1109':np.uint8, 'VAR_1161':np.uint8, 'VAR_1162':np.uint8,
'VAR_1163':np.uint8, 'VAR_1164':np.uint8, 'VAR_1165':np.uint8, 'VAR_1166':np.uint8, 'VAR_1167':np.uint8, 'VAR_1168':np.uint8, 'VAR_1175':np.uint8,
'VAR_1176':np.uint8, 'VAR_1177':np.uint8, 'VAR_1178':np.uint8, 'VAR_1185':np.uint8, 'VAR_1186':np.uint8, 'VAR_1187':np.uint8, 'VAR_1188':np.uint8,
'VAR_1189':np.uint8, 'VAR_1190':np.uint8, 'VAR_1191':np.uint8, 'VAR_1192':np.uint8, 'VAR_1193':np.uint8, 'VAR_1194':np.uint8, 'VAR_1195':np.uint8,
'VAR_1196':np.uint8, 'VAR_1197':np.uint8, 'VAR_1198':np.uint8, 'VAR_1212':np.uint8, 'VAR_1213':np.uint8, 'VAR_1217':np.uint8, 'VAR_1218':np.uint8,
'VAR_1224':np.uint8, 'VAR_1225':np.uint8, 'VAR_1226':np.uint8, 'VAR_1229':np.uint8, 'VAR_1230':np.uint8, 'VAR_1231':np.uint8, 'VAR_1232':np.uint8,
'VAR_1233':np.uint8, 'VAR_1234':np.uint8, 'VAR_1235':np.uint8, 'VAR_1236':np.uint8, 'VAR_1237':np.uint8, 'VAR_1238':np.uint8, 'VAR_1239':np.uint8,
'VAR_1267':np.uint8, 'VAR_1268':np.uint8, 'VAR_1269':np.uint8, 'VAR_1270':np.uint8, 'VAR_1271':np.uint8, 'VAR_1272':np.uint8, 'VAR_1273':np.uint8,
'VAR_1274':np.uint8, 'VAR_1275':np.uint8, 'VAR_1276':np.uint8, 'VAR_1277':np.uint8, 'VAR_1278':np.uint8, 'VAR_1279':np.uint8, 'VAR_1280':np.uint8,
'VAR_1281':np.uint8, 'VAR_1282':np.uint8, 'VAR_1283':np.uint8, 'VAR_1284':np.uint8, 'VAR_1285':np.uint8, 'VAR_1286':np.uint8, 'VAR_1287':np.uint8,
'VAR_1288':np.uint8, 'VAR_1289':np.uint8, 'VAR_1290':np.uint8, 'VAR_1291':np.uint8, 'VAR_1292':np.uint8, 'VAR_1293':np.uint8, 'VAR_1294':np.uint8,
'VAR_1295':np.uint8, 'VAR_1296':np.uint8, 'VAR_1297':np.uint8, 'VAR_1298':np.uint8, 'VAR_1299':np.uint8, 'VAR_1300':np.uint8, 'VAR_1301':np.uint8,
'VAR_1302':np.uint8, 'VAR_1303':np.uint8, 'VAR_1304':np.uint8, 'VAR_1305':np.uint8, 'VAR_1306':np.uint8, 'VAR_1307':np.uint8, 'VAR_1338':np.uint8,
'VAR_1339':np.uint8, 'VAR_1340':np.uint8, 'VAR_1345':np.uint8, 'VAR_1346':np.uint8, 'VAR_1347':np.uint8, 'VAR_1348':np.uint8, 'VAR_1349':np.uint8,
'VAR_1350':np.uint8, 'VAR_1351':np.uint8, 'VAR_1352':np.uint8, 'VAR_1359':np.uint8, 'VAR_1360':np.uint8, 'VAR_1361':np.uint8, 'VAR_1362':np.uint8,
'VAR_1363':np.uint8, 'VAR_1364':np.uint8, 'VAR_1365':np.uint8, 'VAR_1366':np.uint8, 'VAR_1367':np.uint8, 'VAR_1368':np.uint8, 'VAR_1369':np.uint8,
'VAR_1386':np.uint8, 'VAR_1387':np.uint8, 'VAR_1388':np.uint8, 'VAR_1389':np.uint8, 'VAR_1392':np.uint8, 'VAR_1393':np.uint8, 'VAR_1394':np.uint8,
'VAR_1395':np.uint8, 'VAR_1396':np.uint8, 'VAR_1404':np.uint8, 'VAR_1405':np.uint8, 'VAR_1406':np.uint8, 'VAR_1407':np.uint8, 'VAR_1408':np.uint8,
'VAR_1409':np.uint8, 'VAR_1410':np.uint8, 'VAR_1411':np.uint8, 'VAR_1412':np.uint8, 'VAR_1413':np.uint8, 'VAR_1414':np.uint8, 'VAR_1415':np.uint8,
'VAR_1416':np.uint8, 'VAR_1417':np.uint8, 'VAR_1427':np.uint8, 'VAR_1428':np.uint8, 'VAR_1429':np.uint8, 'VAR_1430':np.uint8, 'VAR_1431':np.uint8,
'VAR_1432':np.uint8, 'VAR_1433':np.uint8, 'VAR_1434':np.uint8, 'VAR_1435':np.uint8, 'VAR_1449':np.uint8, 'VAR_1450':np.uint8, 'VAR_1456':np.uint8,
'VAR_1457':np.uint8, 'VAR_1458':np.uint8, 'VAR_1459':np.uint8, 'VAR_1460':np.uint8, 'VAR_1461':np.uint8, 'VAR_1462':np.uint8, 'VAR_1463':np.uint8,
'VAR_1464':np.uint8, 'VAR_1465':np.uint8, 'VAR_1466':np.uint8, 'VAR_1467':np.uint8, 'VAR_1468':np.uint8, 'VAR_1469':np.uint8, 'VAR_1470':np.uint8,
'VAR_1471':np.uint8, 'VAR_1472':np.uint8, 'VAR_1473':np.uint8, 'VAR_1474':np.uint8, 'VAR_1475':np.uint8, 'VAR_1476':np.uint8, 'VAR_1477':np.uint8,
'VAR_1478':np.uint8, 'VAR_1479':np.uint8, 'VAR_1480':np.uint8, 'VAR_1481':np.uint8, 'VAR_1482':np.uint8, 'VAR_1532':np.uint8, 'VAR_1533':np.uint8,
'VAR_1534':np.uint8, 'VAR_1535':np.uint8, 'VAR_1537':np.uint8, 'VAR_1538':np.uint8, 'VAR_1539':np.uint8, 'VAR_1540':np.uint8, 'VAR_1542':np.uint8,
'VAR_1543':np.uint8, 'VAR_1544':np.uint8, 'VAR_1545':np.uint8, 'VAR_1546':np.uint8, 'VAR_1547':np.uint8, 'VAR_1548':np.uint8, 'VAR_1549':np.uint8,
'VAR_1551':np.uint8, 'VAR_1552':np.uint8, 'VAR_1553':np.uint8, 'VAR_1554':np.uint8, 'VAR_1556':np.uint8, 'VAR_1557':np.uint8, 'VAR_1558':np.uint8,
'VAR_1559':np.uint8, 'VAR_1561':np.uint8, 'VAR_1562':np.uint8, 'VAR_1563':np.uint8, 'VAR_1564':np.uint8, 'VAR_1565':np.uint8, 'VAR_1566':np.uint8,
'VAR_1567':np.uint8, 'VAR_1568':np.uint8, 'VAR_1569':np.uint8, 'VAR_1570':np.uint8, 'VAR_1571':np.uint8, 'VAR_1572':np.uint8, 'VAR_1574':np.uint8,
'VAR_1575':np.uint8, 'VAR_1576':np.uint8, 'VAR_1577':np.uint8, 'VAR_1578':np.uint8, 'VAR_1579':np.uint8, 'VAR_1583':np.uint8, 'VAR_1584':np.uint8,
'VAR_1585':np.uint8, 'VAR_1586':np.uint8, 'VAR_1587':np.uint8, 'VAR_1588':np.uint8, 'VAR_1589':np.uint8, 'VAR_1590':np.uint8, 'VAR_1591':np.uint8,
'VAR_1592':np.uint8, 'VAR_1593':np.uint8, 'VAR_1594':np.uint8, 'VAR_1595':np.uint8, 'VAR_1596':np.uint8, 'VAR_1597':np.uint8, 'VAR_1598':np.uint8,
'VAR_1599':np.uint8, 'VAR_1600':np.uint8, 'VAR_1601':np.uint8, 'VAR_1602':np.uint8, 'VAR_1603':np.uint8, 'VAR_1604':np.uint8, 'VAR_1605':np.uint8,
'VAR_1606':np.uint8, 'VAR_1607':np.uint8, 'VAR_1608':np.uint8, 'VAR_1609':np.uint8, 'VAR_1610':np.uint8, 'VAR_1656':np.uint8, 'VAR_1657':np.uint8,
'VAR_1658':np.uint8, 'VAR_1659':np.uint8, 'VAR_1660':np.uint8, 'VAR_1661':np.uint8, 'VAR_1662':np.uint8, 'VAR_1663':np.uint8, 'VAR_1664':np.uint8,
'VAR_1665':np.uint8, 'VAR_1666':np.uint8, 'VAR_1667':np.uint8, 'VAR_1668':np.uint8, 'VAR_1669':np.uint8, 'VAR_1670':np.uint8, 'VAR_1671':np.uint8,
'VAR_1672':np.uint8, 'VAR_1673':np.uint8, 'VAR_1674':np.uint8, 'VAR_1675':np.uint8, 'VAR_1676':np.uint8, 'VAR_1677':np.uint8, 'VAR_1678':np.uint8,
'VAR_1679':np.uint8, 'VAR_1680':np.uint8, 'VAR_1681':np.uint8, 'VAR_1682':np.uint8, 'VAR_1683':np.uint8, 'VAR_1713':np.uint8, 'VAR_1714':np.uint8,
'VAR_1721':np.uint8, 'VAR_1722':np.uint8, 'VAR_1723':np.uint8, 'VAR_1724':np.uint8, 'VAR_1725':np.uint8, 'VAR_1726':np.uint8, 'VAR_1727':np.uint8,
'VAR_1728':np.uint8, 'VAR_1740':np.uint8, 'VAR_1741':np.uint8, 'VAR_1742':np.uint8, 'VAR_1743':np.uint8, 'VAR_1744':np.uint8, 'VAR_1745':np.uint8,
'VAR_1746':np.uint8, 'VAR_1752':np.uint8, 'VAR_1753':np.uint8, 'VAR_1760':np.uint8, 'VAR_1761':np.uint8, 'VAR_1762':np.uint8, 'VAR_1763':np.uint8,
'VAR_1764':np.uint8, 'VAR_1765':np.uint8, 'VAR_1766':np.uint8, 'VAR_1767':np.uint8, 'VAR_1768':np.uint8, 'VAR_1769':np.uint8, 'VAR_1770':np.uint8,
'VAR_1771':np.uint8, 'VAR_1772':np.uint8, 'VAR_1773':np.uint8, 'VAR_1774':np.uint8, 'VAR_1775':np.uint8, 'VAR_1776':np.uint8, 'VAR_1777':np.uint8,
'VAR_1778':np.uint8, 'VAR_1779':np.uint8, 'VAR_1780':np.uint8, 'VAR_1781':np.uint8, 'VAR_1782':np.uint8, 'VAR_1783':np.uint8, 'VAR_1784':np.uint8,
'VAR_1785':np.uint8, 'VAR_1786':np.uint8, 'VAR_1787':np.uint8, 'VAR_1788':np.uint8, 'VAR_1789':np.uint8, 'VAR_1790':np.uint8, 'VAR_1791':np.uint8,
'VAR_1792':np.uint8, 'VAR_1793':np.uint8, 'VAR_1794':np.uint8, 'VAR_1843':np.uint8, 'VAR_1844':np.uint8, 'VAR_1853':np.uint8, 'VAR_1854':np.uint8,
'VAR_1855':np.uint8, 'VAR_1856':np.uint8, 'VAR_1857':np.uint8, 'VAR_1866':np.uint8, 'VAR_1867':np.uint8, 'VAR_1872':np.uint8, 'VAR_1873':np.uint8,
'VAR_1874':np.uint8, 'VAR_1875':np.uint8, 'VAR_1876':np.uint8, 'VAR_1877':np.uint8, 'VAR_1878':np.uint8, 'VAR_1879':np.uint8, 'VAR_1880':np.uint8,
'VAR_1881':np.uint8, 'VAR_1882':np.uint8, 'VAR_1883':np.uint8, 'VAR_1884':np.uint8, 'VAR_1885':np.uint8, 'VAR_1886':np.uint8, 'VAR_1887':np.uint8,
'VAR_1888':np.uint8, 'VAR_1903':np.uint8, 'VAR_1904':np.uint8, 'VAR_1905':np.uint8, 'VAR_1906':np.uint8, 'VAR_1907':np.uint8, 'VAR_1908':np.uint8,
'VAR_1909':np.uint8, 'VAR_1910':np.uint8, 'VAR_1920':np.uint8, 'VAR_1921':np.uint8, 'VAR_1925':np.uint8, 'VAR_1926':np.uint8, 'VAR_1927':np.uint8,
'VAR_1928':np.uint16, 'VAR_1930':np.uint16}<load_from_csv> | def predict_on_video(model, model2, model3, model4, model5, model6, model7, model8, model9, model10, model11, video_path):
try:
x, x_sqr, x_299_sqr, x_lstm, frame_skip = extract_frames(video_path)
if x is None or x_sqr is None: return 0.5
else:
with torch.no_grad() :
y_pred = model(x.to(device))
y_pred = torch.sigmoid(y_pred.squeeze().mean())
y_pred2 = model2(x_sqr.to(device))
y_pred2 = torch.sigmoid(y_pred2.squeeze().mean())
y_pred3 = model3(x_sqr.to(device))
y_pred3 = torch.sigmoid(y_pred3.squeeze().mean())
y_pred4 = model4(x_299_sqr.to(device))
y_pred4 = torch.sigmoid(y_pred4.squeeze().mean())
y_pred5 = model5(x.to(device))
y_pred5 = torch.sigmoid(y_pred5.squeeze().mean())
y_pred6 = model6(x_sqr.to(device))
y_pred6 = torch.sigmoid(y_pred6.squeeze().mean())
seq_len = len(x_lstm)//2
x_sqr_1, x_sqr_2 = x_lstm[:seq_len], x_lstm[seq_len:]
skip = max(1, seq_len//15)
start_indices = list(range(0, seq_len, skip))
gap = max(1, int(np.round(20/frame_skip)))
for ss, start in enumerate(start_indices):
indices = list(range(start, 500, gap))
indices = [i%seq_len for i in indices]
indices = indices[:CFG.seq_len]
seqs1, seqs2 = x_sqr_1[indices].unsqueeze(0), x_sqr_2[indices].unsqueeze(0)
if ss == 0:
batch_seqs_1 = seqs1
batch_seqs_2 = seqs2
else:
batch_seqs_1 = torch.cat(( batch_seqs_1, seqs1), 0)
batch_seqs_2 = torch.cat(( batch_seqs_2, seqs2), 0)
batch_seqs = torch.cat(( batch_seqs_1, batch_seqs_2), 0)
y_pred7 = model7(batch_seqs.to(device))
y_pred7 = torch.sigmoid(y_pred7.squeeze().mean())
y_pred8 = model8(batch_seqs.to(device))
y_pred8 = torch.sigmoid(y_pred8.squeeze().mean())
y_pred9 = model9(x_sqr.to(device))
y_pred9 = torch.sigmoid(y_pred9.squeeze().mean())
y_pred10 = model10(x_299_sqr.to(device))
y_pred10 = torch.sigmoid(y_pred10.squeeze().mean())
y_pred11 = model11(x.to(device))
y_pred11 = torch.sigmoid(y_pred11.squeeze().mean())
w = [3, 2, 8, 2, 2, 2, 4, 4, 2, 2, 3]
return(w[0]*y_pred.item() + w[1]*y_pred2.item() + w[2]*y_pred3.item() + w[3]*y_pred4.item()
+ w[4]*y_pred5.item() + w[5]*y_pred6.item() +
w[6]*y_pred7.item() + w[7]*y_pred8.item() +
w[8]*y_pred9.item() + w[9]*y_pred10.item() + w[10]*y_pred11.item())/sum(w)
except Exception as e:
print("Prediction error on video", e)
return 0.5
return 0.5 | Deepfake Detection Challenge |
8,522,656 | train = pd.read_csv(".. /input/springleaf-marketing-response/train.csv.zip")
mixCol = [8,9,10,11,12,18,19,20,21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 38, 39, 40, 41, 42, 43, 44, 45,
73, 74, 98, 99, 100, 106, 107, 108, 156, 157, 158, 159, 166, 167, 168, 169, 176, 177, 178, 179, 180,
181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 202, 205, 206, 207,
208, 209, 210, 211, 212, 213, 214, 215, 216, 218, 219, 220, 221, 222, 223, 224, 225, 240, 371, 372, 373, 374,
375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395,
396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436,
437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457,
458, 459, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478,
479, 480, 481, 492, 493, 494, 495, 496, 497, 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509,
510, 511, 512, 513, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 530, 840]
alphaCol = [283, 305, 325, 352, 353, 354, 1934]
placeCol = [200, 274, 342]
dtCol = [75, 204, 217]
selectColumns = []
rmCol = mixCol+alphaCol+placeCol+dtCol
for i in range(1,1935):
if i not in rmCol:
selectColumns.append(i)
cols = [str(n ).zfill(4)for n in selectColumns]
strColName = ['VAR_' + strNum for strNum in cols]<load_from_csv> | class FastMTCNN(object):
def __init__(self, resize=1, *args, **kwargs):
self.resize = resize
self.mtcnn = MTCNN(*args, **kwargs)
def __call__(self, frames):
if self.resize != 1:
frames = [f.resize([int(d * self.resize)for d in f.size])for f in frames]
boxes, probs = self.mtcnn.detect(frames)
boxes = [b.astype(int ).tolist() if b is not None and type(b)==np.ndarray else [] for b in boxes]
probs = [b.tolist() if b is not None and type(b)==np.ndarray else [] for b in probs]
return boxes, probs
fast_mtcnn = FastMTCNN(
resize=SCALE,
margin=14,
keep_all=True,
device=device
) | Deepfake Detection Challenge |
8,522,656 | nrows = 500
trainData = pd.read_csv(".. /input/springleaf-marketing-response/train.csv.zip", skiprows=[107], usecols=strColName, nrows=nrows, dtype=types)
label = pd.read_csv(".. /input/springleaf-marketing-response/train.csv.zip", skiprows=[107], usecols=['target'], nrows=nrows)
testData = pd.read_csv(".. /input/springleaf-marketing-response/test.csv.zip", skiprows=[107], usecols=strColName, nrows=nrows)
numericFeatures = trainData._get_numeric_data()
removeNA = numericFeatures.fillna(0)
sel = VarianceThreshold(threshold= (.8 *(1 -.8)))
features = sel.fit_transform(removeNA)
y = np.array(label ).ravel()
X_scaled = preprocessing.scale(features)
normalizer = preprocessing.Normalizer().fit(X_scaled)
X_norm = normalizer.transform(X_scaled)
X_train, X_test, y_train, y_test = train_test_split(X_norm, y, test_size=0.33, random_state=42)
y_train = np.array(y_train)
<train_model> | test_videos = sorted([x for x in os.listdir(TEST_DIR)if x[-4:] == ".mp4"])
len(test_videos ) | Deepfake Detection Challenge |
8,522,656 | clf = svm.SVC(C=1.0, kernel='linear', degree=10, gamma=1.00, coef0=0.0, shrinking=True, probability=False,
tol=0.001, cache_size=1000, class_weight=None, verbose=False, max_iter=-1, random_state=None)
clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
print('roc_auc_score', roc_auc_score(y_test, predictions))
print('RMSE', mean_squared_error(y_test, predictions))<train_on_grid> | def predict_on_video_set(model, model2, model3, model4, model5, model6, model7, model8, model9, model10, model11, videos, num_workers):
def process_file(i):
filename = videos[i]
y_pred = predict_on_video(model, model2, model3, model4, model5, model6, model7, model8, model9, model10, model11, os.path.join(TEST_DIR, filename))
print(i, y_pred)
return y_pred
with ThreadPoolExecutor(max_workers=num_workers)as ex:
predictions = ex.map(process_file, range(len(videos)))
return list(predictions)
predictions = predict_on_video_set(net, net2, net3, net4, net5, net6, net7, net8, net9, net10, net11, test_videos, num_workers=NUM_WORKERS ) | Deepfake Detection Challenge |
8,522,656 | testData = pd.read_csv(".. /input/springleaf-marketing-response/test.csv.zip", usecols=strColName, engine='python', dtype=types)
numericFeatures = testData._get_numeric_data()
removeNA = numericFeatures.fillna(0)
features = sel.transform(removeNA)
y = np.array(label ).ravel()
X_scaled = preprocessing.scale(features)
normalizer = preprocessing.Normalizer().fit(X_scaled)
X_norm = normalizer.transform(X_scaled)
<predict_on_test> | predictions = np.clip(predictions, 0.005, 0.995)
submission_df = pd.DataFrame({"filename": test_videos, "label": predictions})
submission_df.to_csv("submission.csv", index=False ) | Deepfake Detection Challenge |
8,681,162 | predictions = clf.predict(X_norm )<load_from_csv> | import random
import re
from copy import deepcopy
from typing import Union, List, Tuple, Optional, Callable
from collections import OrderedDict, defaultdict
import math
import cv2
import torch
import torch.nn as nn
from torch.utils.data import Dataset,DataLoader
from torch.utils.data.sampler import SequentialSampler, RandomSampler
from torchvision import transforms, models
from torchvision.transforms import Normalize
from tqdm import tqdm
from sklearn.cluster import DBSCAN | Deepfake Detection Challenge |
8,681,162 | df_submit = pd.read_csv('.. /input/springleaf-marketing-response/sample_submission.csv.zip')
df_submit['target'] = predictions<save_to_csv> | TARGET_H, TARGET_W = 224, 224
FRAMES_PER_VIDEO = 30
TEST_VIDEOS_PATH = '.. /input/deepfake-detection-challenge/test_videos'
NN_MODEL_PATHS = [
'.. /input/kdold-deepfake-effb2/fold0-effb2-000epoch.pt',
'.. /input/kdold-deepfake-effb2/fold0-effb2-001epoch.pt',
'.. /input/kdold-deepfake-effb2/fold0-effb2-002epoch.pt',
'.. /input/kfolddeepfakeeffb2-flip/fold0-flip-effb2-000epoch.pt',
'.. /input/kfolddeepfakeeffb2-flip/fold0-flip-effb2-001epoch.pt',
'.. /input/kfolddeepfakeeffb2-flip/fold0-flip-effb2-002epoch.pt',
'.. /input/kdold-deepfake-effb2/fold1-effb2-000epoch.pt',
'.. /input/kdold-deepfake-effb2/fold1-effb2-001epoch.pt',
'.. /input/kdold-deepfake-effb2/fold1-effb2-002epoch.pt',
'.. /input/kfolddeepfakeeffb2-flip/fold1-flip-effb2-000epoch.pt',
'.. /input/kfolddeepfakeeffb2-flip/fold1-flip-effb2-001epoch.pt',
'.. /input/kfolddeepfakeeffb2-flip/fold1-flip-effb2-002epoch.pt',
'.. /input/kdold-deepfake-effb2/fold2-effb2-000epoch.pt',
'.. /input/kdold-deepfake-effb2/fold2-effb2-001epoch.pt',
'.. /input/kdold-deepfake-effb2/fold2-effb2-002epoch.pt',
'.. /input/kfolddeepfakeeffb2-flip/fold2-flip-effb2-000epoch.pt',
'.. /input/kfolddeepfakeeffb2-flip/fold2-flip-effb2-001epoch.pt',
'.. /input/kfolddeepfakeeffb2-flip/fold2-flip-effb2-002epoch.pt',
'.. /input/kdold-deepfake-effb2/fold3-effb2-000epoch.pt',
'.. /input/kdold-deepfake-effb2/fold3-effb2-001epoch.pt',
'.. /input/kdold-deepfake-effb2/fold3-effb2-002epoch.pt',
'.. /input/kfolddeepfakeeffb2-flip/fold3-flip-effb2-000epoch.pt',
'.. /input/kfolddeepfakeeffb2-flip/fold3-flip-effb2-001epoch.pt',
'.. /input/kfolddeepfakeeffb2-flip/fold3-flip-effb2-002epoch.pt',
'.. /input/kdold-deepfake-effb2/fold4-effb2-000epoch.pt',
'.. /input/kdold-deepfake-effb2/fold4-effb2-001epoch.pt',
'.. /input/kdold-deepfake-effb2/fold4-effb2-002epoch.pt',
'.. /input/kfolddeepfakeeffb2-flip/fold4-flip-effb2-000epoch.pt',
'.. /input/kfolddeepfakeeffb2-flip/fold4-flip-effb2-001epoch.pt',
'.. /input/kfolddeepfakeeffb2-flip/fold4-flip-effb2-002epoch.pt',
]
| Deepfake Detection Challenge |
8,681,162 | df_submit.to_csv('submission.csv', index=False )<set_options> | SEED = 42
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
seed_everything(SEED ) | Deepfake Detection Challenge |
8,681,162 | %matplotlib inline<import_modules> | !pip install.. /input/pytorchefficientnet/EfficientNet-PyTorch-master > /dev/null
def get_net() :
net = EfficientNet.from_name('efficientnet-b2')
net._fc = nn.Linear(in_features=net._fc.in_features, out_features=2, bias=True)
return net | Deepfake Detection Challenge |
8,681,162 | from xgboost import XGBRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.model_selection import GridSearchCV
from sklearn.feature_selection import RFECV
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error, make_scorer
from mlxtend.preprocessing import minmax_scaling<set_options> | class DatasetRetriever(Dataset):
def __init__(self, df):
self.video_paths = df['video_path']
self.filenames = df.index
self.face_dr = FaceDetector(frames_per_video=FRAMES_PER_VIDEO)
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
self.normalize_transform = Normalize(mean, std)
self.video_reader = VideoReader()
self.video_read_fn = lambda x: self.video_reader.read_frames(x, num_frames=FRAMES_PER_VIDEO)
def __len__(self):
return self.filenames.shape[0]
def __getitem__(self, idx):
video_path = self.video_paths[idx]
filename = self.filenames[idx]
my_frames, my_idxs = self.video_read_fn(video_path)
faces = self.face_dr.get_faces(
my_frames, my_idxs,
0.7, 0.7, 0.7, 0.6
)
n = len(faces)
video = torch.zeros(( n, 3, TARGET_H, TARGET_W))
for i, face in enumerate(faces[:n]):
face = 255 - face
face = face.astype(np.float32)/255.
face = torch.tensor(face)
face = face.permute(2,0,1)
face = self.normalize_transform(face)
video[i] = face
return filename, video | Deepfake Detection Challenge |
8,681,162 | warnings.filterwarnings("ignore" )<count_unique_values> | class DeepFakePredictor:
def __init__(self):
self.models = [self.prepare_model(get_net() , path)for path in NN_MODEL_PATHS]
self.models_count = len(self.models)
def predict(self, dataset):
result = []
with torch.no_grad() :
for filename, video in dataset:
video = video.to(self.device, dtype=torch.float32)
try:
label = self.predict_ensemble(video)
except Exception as e:
print(f'Warning! {e}, {type(e)}')
label = 0.5
result.append({
'filename': filename,
'label': label,
})
return pd.DataFrame(result ).set_index('filename')
def prepare_model(self, model, path):
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model.to(self.device);
if torch.cuda.is_available() :
model = model.cuda()
if torch.cuda.is_available() :
checkpoint = torch.load(path)
else:
checkpoint = torch.load(path, map_location=torch.device('cpu'))
model.load_state_dict(checkpoint['model_state_dict'])
model.eval()
print(f'Model prepared.Device is {self.device}')
return model
@staticmethod
def net_forward(net, inputs):
bs = inputs.size(0)
x = net.extract_features(inputs)
x = net._avg_pooling(x)
emb = x.view(bs, -1)
x = net._dropout(emb)
x = net._fc(x)
return emb, x
def postprocess(self, embs, predictions):
clusters = defaultdict(list)
for prediction, cluster_id in zip(predictions, DBSCAN(eps=1.2, min_samples=1 ).fit_predict(embs)) :
clusters[cluster_id].append(prediction)
sorted_clusters = sorted(clusters.items() , key=lambda x: -len(x[1]))
if len(sorted_clusters)< 2:
return sorted_clusters[0][1]
if len(sorted_clusters[1][1])/ len(predictions)> 0.25:
return sorted_clusters[0][1] + sorted_clusters[1][1]
return sorted_clusters[0][1]
def predict_ensemble(self, video):
embs, predictions = 0, 0
for model in self.models:
emb, prediction = self.net_forward(model, video)
predictions += prediction / self.models_count
embs += emb / self.models_count
predictions = nn.functional.softmax(predictions, dim=1 ).data.cpu().numpy() [:,1]
embs = embs.cpu().numpy()
predictions = self.postprocess(embs, predictions)
return np.mean(predictions ) | Deepfake Detection Challenge |
8,681,162 | def show_uniqs(cols):
for col in cols:
print(col)
show_uniq(col)
print('=======================================' )<load_from_csv> | deep_fake_predictor = DeepFakePredictor() | Deepfake Detection Challenge |
8,681,162 | train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv' )<count_missing_values> | def process_dfs(df, num_workers=2):
def process_df(sub_df):
dataset = DatasetRetriever(sub_df)
result = deep_fake_predictor.predict(dataset)
return result
with ThreadPoolExecutor(max_workers=num_workers)as ex:
results = ex.map(process_df, np.split(df, num_workers))
return results | Deepfake Detection Challenge |
8,681,162 | <define_variables><EOS> | result.to_csv('submission.csv' ) | Deepfake Detection Challenge |
8,236,131 | <SOS> metric: LogLoss Kaggle data source: deepfake-detection-challenge<load_from_csv> | !pip install.. /input/pytorchcv/pytorchcv-0.0.55-py2.py3-none-any.whl --quiet | Deepfake Detection Challenge |
8,236,131 | test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv' )<count_missing_values> | device = 'cuda' if torch.cuda.is_available() else 'cpu' | Deepfake Detection Challenge |
8,236,131 | count_empty_columns_test = test[empty_columns_test].isnull().sum(axis = 0)
count_empty_columns_test<define_variables> | def gem(x, p=3, eps=1e-6):
return F.avg_pool2d(x.clamp(min=eps ).pow(p),(x.size(-2), x.size(-1)) ).pow(1./p)
class GeM(nn.Module):
def __init__(self, p=3, eps=1e-6):
super(GeM,self ).__init__()
self.p = Parameter(torch.ones(1)*p)
self.eps = eps
def forward(self, x):
return gem(x, p=self.p, eps=self.eps)
def __repr__(self):
return self.__class__.__name__ + '(' + 'p=' + '{:.4f}'.format(self.p.data.tolist() [0])+ ', ' + 'eps=' + str(self.eps)+ ')'
class MishFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x * torch.tanh(F.softplus(x))
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_variables[0]
sigmoid = torch.sigmoid(x)
tanh_sp = torch.tanh(F.softplus(x))
return grad_output *(tanh_sp + x * sigmoid *(1 - tanh_sp * tanh_sp))
class Mish(nn.Module):
def forward(self, x):
return MishFunction.apply(x)
def to_Mish(model):
for child_name, child in model.named_children() :
if isinstance(child, nn.ReLU):
setattr(model, child_name, Mish())
else:
to_Mish(child)
class Head(torch.nn.Module):
def __init__(self, in_f, out_f, hidden):
super(Head, self ).__init__()
self.f = nn.Flatten()
self.l = nn.Linear(in_f, hidden)
self.m = Mish()
self.d = nn.Dropout(0.75)
self.o = nn.Linear(hidden, out_f)
self.b1 = nn.BatchNorm1d(in_f)
self.b2 = nn.BatchNorm1d(hidden)
self.r = nn.ReLU()
def forward(self, x):
x = self.f(x)
x = self.b1(x)
x = self.d(x)
x = self.l(x)
x = self.r(x)
x = self.b2(x)
x = self.d(x)
out = self.o(x)
return out
class FCN(torch.nn.Module):
def __init__(self, base, in_f, hidden):
super(FCN, self ).__init__()
self.base = base
self.h1 = Head(in_f, 1, hidden)
def forward(self, x):
x = self.base(x)
return self.h1(x)
net = []
model = get_model("xception", pretrained=False)
model = nn.Sequential(*list(model.children())[:-1])
model[0].final_block.pool = nn.Sequential(nn.AdaptiveAvgPool2d(1))
model = FCN(model, 2048, 512)
model = model.cuda()
model.load_state_dict(torch.load('.. /input/deepfake-models/model2.pth'))
net.append(model)
model = get_model("efficientnet_b1", pretrained=False)
model = nn.Sequential(*list(model.children())[:-1])
model[0].final_pool = nn.Sequential(nn.AdaptiveAvgPool2d(1))
model = FCN(model, 1280, 512)
model = model.cuda()
model.load_state_dict(torch.load('.. /input/deepfake-models/model (.259 ).pth'))
net.append(model)
model = get_model("efficientnet_b1", pretrained=False)
model = nn.Sequential(*list(model.children())[:-1])
model[0].final_pool = nn.Sequential(nn.AdaptiveAvgPool2d(1))
model = FCN(model, 1280, 512)
model = model.cuda()
model.load_state_dict(torch.load('.. /input/deepfake-models/model_16 (.2755 ).pth'))
net.append(model)
| Deepfake Detection Challenge |
8,236,131 | for col in count_empty_columns_test.index:
if col not in count_empty_columns_train:
print(col )<define_variables> | Deepfake Detection Challenge | |
8,236,131 | garage_colums = []
regexp = re.compile(r"([-a-zA-Z]+)?"+r"Garage"+r"([-a-zA-Z]+)?")
for col in train.columns:
if regexp.search(col):
garage_colums.append(col)
garage_colums = np.array(garage_colums)
garage_colums<data_type_conversions> | mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
normalize_transform = Normalize(mean, std ) | Deepfake Detection Challenge |
8,236,131 | for col in garage_num_cols:
train[col] = train[col].fillna(0)
test[col] = test[col].fillna(0 )<count_unique_values> | detection_graph = tf.Graph()
with detection_graph.as_default() :
od_graph_def = tf.compat.v1.GraphDef()
with tf.io.gfile.GFile('.. /input/mobilenet-face/frozen_inference_graph_face.pb', 'rb')as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
sess=tf.compat.v1.Session(graph=detection_graph, config=config)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
boxes_tensor = detection_graph.get_tensor_by_name('detection_boxes:0')
scores_tensor = detection_graph.get_tensor_by_name('detection_scores:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
def get_mobilenet_face(image):
global boxes,scores,num_detections
(im_height,im_width)=image.shape[:-1]
imgs=np.array([image])
(boxes, scores)= sess.run(
[boxes_tensor, scores_tensor],
feed_dict={image_tensor: imgs})
max_=np.where(scores==scores.max())[0][0]
box=boxes[0][max_]
ymin, xmin, ymax, xmax = box
(left, right, top, bottom)=(xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
left, right, top, bottom = int(left), int(right), int(top), int(bottom)
return(left, right, top, bottom)
def crop_image(frame,bbox):
left, right, top, bottom=bbox
return frame[top:bottom,left:right]
def get_img(frame):
return cv2.resize(crop_image(frame,get_mobilenet_face(frame)) ,(160,160))
def detect_video(video):
capture = cv2.VideoCapture(video)
v_len = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
frame_idxs = np.linspace(0,v_len,frame_count, endpoint=False, dtype=np.int)
imgs=[]
i=0
for frame_idx in range(int(v_len)) :
ret = capture.grab()
if not ret:
pass
if frame_idx >= frame_idxs[i]:
ret, frame = capture.retrieve()
if not ret or frame is None:
pass
else:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
try:
face=get_img(frame)
except Exception as err:
print(err)
continue
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
imgs.append(face)
i += 1
if i >= len(frame_idxs):
break
if len(imgs)<frame_count:
return None
return imgs | Deepfake Detection Challenge |
8,236,131 | <define_variables><EOS> | probs = np.asarray(probs)
probs[probs!=probs] = 0.5
plt.hist(probs, 40)
filenames = [os.path.basename(f)for f in filenames]
submission = pd.DataFrame({'filename': filenames, 'label': probs})
submission.to_csv('submission.csv', index=False)
submission | Deepfake Detection Challenge |
8,474,447 | <SOS> metric: LogLoss Kaggle data source: deepfake-detection-challenge<data_type_conversions> | from fastai.vision import * | Deepfake Detection Challenge |
8,474,447 | for col in basement_num_cols:
train[col] = train[col].fillna(0)
test[col] = test[col].fillna(0 )<define_variables> | train_sample_metadata = pd.read_json('.. /input/deepfake-detection-challenge/train_sample_videos/metadata.json' ).T.reset_index()
train_sample_metadata.columns = ['fname','label','split','original']
train_sample_metadata.head() | Deepfake Detection Challenge |
8,474,447 | masvnr_colums = []
regexp = re.compile(r"([-a-zA-Z]+)?"+r"MasVnr"+r"([-a-zA-Z]+)?")
for col in train.columns:
if regexp.search(col):
masvnr_colums.append(col)
masvnr_colums = np.array(masvnr_colums)
masvnr_colums<filter> | fake_sample_df = train_sample_metadata[train_sample_metadata.label == 'FAKE']
real_sample_df = train_sample_metadata[train_sample_metadata.label == 'REAL'] | Deepfake Detection Challenge |
8,474,447 | all(train.loc[train['MasVnrArea'].isnull() ].index == train.loc[train['MasVnrType'].isnull() ].index )<data_type_conversions> | train_dir = Path('/kaggle/input/deepfake-detection-challenge/train_sample_videos/')
test_dir = Path('/kaggle/input/deepfake-detection-challenge/test_videos/')
train_video_files = get_files(train_dir, extensions=['.mp4'])
test_video_files = get_files(test_dir, extensions=['.mp4'] ) | Deepfake Detection Challenge |
8,474,447 | train['MasVnrArea'] = train['MasVnrArea'].fillna(0)
test['MasVnrArea'] = test['MasVnrArea'].fillna(0 )<data_type_conversions> | dummy_video_file = train_video_files[0] | Deepfake Detection Challenge |
8,474,447 | train['MasVnrType'] = train['MasVnrType'].fillna('None')
test['MasVnrType'] = test['MasVnrType'].fillna('None' )<define_variables> | sys.path.insert(0,'/kaggle/working/reader/python')
set_bridge('torch')
device = torch.device("cuda" ) | Deepfake Detection Challenge |
8,474,447 | pool_colums = []
regexp = re.compile(r"([-a-zA-Z]+)?"+r"Pool"+r"([-a-zA-Z]+)?")
for col in train.columns:
if regexp.search(col):
pool_colums.append(col)
pool_colums = np.array(pool_colums)
pool_colums<data_type_conversions> | retinaface_stats = tensor([123,117,104] ).to(device)
def decord_cpu_video_reader(path, freq=None):
video = VideoReader(str(path), ctx=cpu())
len_video = len(video)
if freq: t = video.get_batch(range(0, len(video), freq)).permute(0,3,1,2)
else: t = video.get_batch(range(len_video))
return t, len_video
def get_decord_video_batch_cpu(path, freq=10, sz=640, stats:Tensor=None, device=defaults.device):
"get resized and mean substracted batch tensor of a sampled video(scale of 255)"
t_raw, len_video = decord_cpu_video_reader(path, freq)
H,W = t_raw.size(2), t_raw.size(3)
t = F.interpolate(t_raw.to(device ).to(torch.float32),(sz,sz))
if stats is not None: t -= stats[...,None,None]
return t, t_raw,(H, W ) | Deepfake Detection Challenge |
8,474,447 | train['PoolQC'] = train['PoolQC'].fillna('NA')
test['PoolQC'] = test['PoolQC'].fillna('NA' )<data_type_conversions> | sys.path.insert(0,"/kaggle/input/retina-face-2/Pytorch_Retinaface_2/" ) | Deepfake Detection Challenge |
8,474,447 | train['Alley'] = train['Alley'].fillna('NA')
test['Alley'] = test['Alley'].fillna('NA' )<define_variables> | import os
import torch
import torch.backends.cudnn as cudnn
import numpy as np
from data import cfg_mnet, cfg_re50
from layers.functions.prior_box import PriorBox
from utils.nms.py_cpu_nms import py_cpu_nms
import cv2
from models.retinaface import RetinaFace
from utils.box_utils import decode, decode_landm
import time | Deepfake Detection Challenge |
8,474,447 | fireplace_colums = []
regexp = re.compile(r"([-a-zA-Z]+)?"+r"Fireplace"+r"([-a-zA-Z]+)?")
for col in train.columns:
if regexp.search(col):
fireplace_colums.append(col)
fireplace_colums = np.array(fireplace_colums)
fireplace_colums<data_type_conversions> | def check_keys(model, pretrained_state_dict):
ckpt_keys = set(pretrained_state_dict.keys())
model_keys = set(model.state_dict().keys())
used_pretrained_keys = model_keys & ckpt_keys
unused_pretrained_keys = ckpt_keys - model_keys
missing_keys = model_keys - ckpt_keys
print('Missing keys:{}'.format(len(missing_keys)))
print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys)))
print('Used keys:{}'.format(len(used_pretrained_keys)))
assert len(used_pretrained_keys)> 0, 'load NONE from pretrained checkpoint'
return True
def remove_prefix(state_dict, prefix):
print('remove prefix '{}''.format(prefix))
f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix)else x
return {f(key): value for key, value in state_dict.items() }
def load_model(model, pretrained_path, load_to_cpu):
print('Loading pretrained model from {}'.format(pretrained_path))
if load_to_cpu:
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage)
else:
device = torch.cuda.current_device()
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device))
if "state_dict" in pretrained_dict.keys() :
pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.')
else:
pretrained_dict = remove_prefix(pretrained_dict, 'module.')
check_keys(model, pretrained_dict)
model.load_state_dict(pretrained_dict, strict=False)
return model | Deepfake Detection Challenge |
8,474,447 | train['FireplaceQu'] = train['FireplaceQu'].fillna('NA')
test['FireplaceQu'] = test['FireplaceQu'].fillna('NA' )<data_type_conversions> | cudnn.benchmark = True | Deepfake Detection Challenge |
8,474,447 | train['Fence'] = train['Fence'].fillna('NA')
test['Fence'] = test['Fence'].fillna('NA' )<define_variables> | def get_model(modelname="mobilenet"):
torch.set_grad_enabled(False)
cfg = None
cfg_mnet['pretrain'] = False
cfg_re50['pretrain'] = False
if modelname == "mobilenet":
pretrained_path = ".. /input/retina-face-2/Pytorch_Retinaface_2/weights/mobilenet0.25_Final.pth"
cfg = cfg_mnet
if modelname == "resnet50":
pretrained_path = ".. /input/retina-face/Pytorch_Retinaface/weights/Resnet50_Final.pth"
cfg = cfg_re50
net = RetinaFace(cfg=cfg, phase='test')
net = load_model(net, pretrained_path, False)
net.eval().to(device)
return net, cfg | Deepfake Detection Challenge |
8,474,447 | misc_colums = []
regexp = re.compile(r"([-a-zA-Z]+)?"+r"Misc"+r"([-a-zA-Z]+)?")
for col in train.columns:
if regexp.search(col):
misc_colums.append(col)
misc_colums = np.array(misc_colums)
misc_colums<data_type_conversions> | def predict(model, t, sz, cfg,
confidence_threshold = 0.5, top_k = 5, nms_threshold = 0.5, keep_top_k = 5):
"get prediction for a batch t by model with image sz"
resize = 1
scale_rate = 1
im_height, im_width = sz, sz
scale = torch.Tensor([sz, sz, sz, sz])
scale = scale.to(device)
locs, confs, landmss = torch.Tensor([]), torch.Tensor([]), torch.Tensor([])
locs = locs.to(device)
confs = confs.to(device)
landmss = landmss.to(device)
locs_, confs_, landmss_ = model(t)
locs = torch.cat(( locs, locs_), 0)
confs = torch.cat(( confs, confs_), 0)
landmss = torch.cat(( landmss, landmss_), 0)
bbox_result, landms_result = [], []
priorbox = PriorBox(cfg, image_size=(im_height, im_width))
priors = priorbox.forward()
priors = priors.to(device)
prior_data = priors.data
for idx in range(t.size(0)) :
loc = locs[idx]
conf = confs[idx]
landms = landmss[idx]
boxes = decode(loc.data.squeeze(0), prior_data, cfg['variance'])
boxes = boxes * scale / resize
boxes = boxes.cpu().numpy()
scores = conf.squeeze(0 ).data.cpu().numpy() [:, 1]
landms = decode_landm(landms.data.squeeze(0), prior_data, cfg['variance'])
scale1 = torch.Tensor([t.shape[3], t.shape[2], t.shape[3], t.shape[2],
t.shape[3], t.shape[2], t.shape[3], t.shape[2],
t.shape[3], t.shape[2]])
scale1 = scale1.to(device)
landms = landms * scale1 / resize
landms = landms.cpu().numpy()
inds = np.where(scores > confidence_threshold)[0]
boxes = boxes[inds]
landms = landms[inds]
scores = scores[inds]
order = scores.argsort() [::-1][:top_k]
boxes = boxes[order]
landms = landms[order]
scores = scores[order]
dets = np.hstack(( boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
keep = py_cpu_nms(dets, nms_threshold)
dets = dets[keep, :]
landms = landms[keep]
dets = dets[:keep_top_k, :]
landms = landms[:keep_top_k, :]
bbox_result.append(dets[:,:-1])
landms_result.append(landms)
return bbox_result, landms_result | Deepfake Detection Challenge |
8,474,447 | train['MiscFeature'] = train['MiscFeature'].fillna('NA')
test['MiscFeature'] = test['MiscFeature'].fillna('NA' )<drop_column> | %%time
model, cfg = get_model("mobilenet" ) | Deepfake Detection Challenge |
8,474,447 | train = train.drop(train.loc[train['Electrical'].isnull() ].index )<feature_engineering> | def bboxes_to_original_scale(bboxes, H, W, sz):
res = []
for bb in bboxes:
h_scale, w_scale = H/sz, W/sz
orig_bboxes =(bb*array([w_scale, h_scale, w_scale, h_scale])[None,...] ).astype(int)
res.append(orig_bboxes)
return res | Deepfake Detection Challenge |
8,474,447 | for i in test['Neighborhood'].unique() :
if test.MSZoning[test['Neighborhood'] == i].isnull().sum() > 0:
test.loc[test['Neighborhood'] == i,'MSZoning'] = \
test.loc[test['Neighborhood'] == i,'MSZoning'].fillna(test.loc[test['Neighborhood'] == i,'MSZoning'].mode() [0] )<data_type_conversions> | def landmarks_to_original_scale(landmarks, H, W, sz):
res = []
for landms in landmarks:
h_scale, w_scale = H/sz, W/sz
orig_landms =(landms*array([w_scale, h_scale]*5)[None,...] ).astype(int)
res.append(orig_landms)
return res | Deepfake Detection Challenge |
8,474,447 | train['LotFrontage'].fillna(train['LotFrontage'].median() , inplace=True)
test['LotFrontage'].fillna(test['LotFrontage'].median() , inplace=True )<prepare_x_and_y> | from tqdm import tqdm | Deepfake Detection Challenge |
8,474,447 | y_train = train['SalePrice']
train = train.drop('SalePrice',axis=1)
train = train.drop('Id',axis=1 )<drop_column> | freq = 5
model_args = dict(confidence_threshold = 0.5, top_k = 5, nms_threshold = 0.5, keep_top_k = 5)
sz = cfg['image_size']
imgnet_stats = [tensor(o)for o in imagenet_stats]
rescale_param = 1.3 | Deepfake Detection Challenge |
8,474,447 | test_id = test['Id']
test = test.drop('Id', axis=1 )<concatenate> | !pip install -q.. /input/efficientnetpytorchpip/efficientnet_pytorch-0.6.3/ | Deepfake Detection Challenge |
8,474,447 | df = train.append(test )<categorify> | from fastai.vision.models.efficientnet import * | Deepfake Detection Challenge |
8,474,447 | df['GarageCond'] = df['GarageCond'].map({'NA':0, 'Po':1, 'Fa':2, 'TA':3, 'Gd':4, 'Ex':5})
df['GarageQual'] = df['GarageQual'].map({'NA':0, 'Po':1, 'Fa':2, 'TA':3, 'Gd':4, 'Ex':5})
df['BsmtCond'] = df['BsmtCond'].map({'NA':0, 'Po':1, 'Fa':2, 'TA':3, 'Gd':4, 'Ex':5})
df['BsmtExposure'] = df['BsmtExposure'].map({'NA':0, 'No':1, 'Mn':2, 'Av':3, 'Gd':4})
df['BsmtFinType1'] = df['BsmtFinType1'].map({'NA':0, 'Unf':1, 'LwQ':2, 'Rec':3, 'BLQ':4, 'ALQ':5, 'GLQ':6})
df['BsmtFinType2'] = df['BsmtFinType2'].map({'NA':0, 'Unf':1, 'LwQ':2, 'Rec':3, 'BLQ':4, 'ALQ':5, 'GLQ':6})
df['BsmtQual'] = df['BsmtQual'].map({'NA':0, 'Po':1, 'Fa':2, 'TA':3, 'Gd':4, 'Ex':5})
df['PoolQC'] = df['PoolQC'].map({'NA':0, 'Fa':1, 'TA':2, 'Gd':3, 'Ex':4})
df['Alley'] = df['Alley'].map({'NA':0, 'Grvl':1, 'Pave':2})
df['FireplaceQu'] = df['FireplaceQu'].map({'NA':0, 'Po':1, 'Fa':2, 'TA':3, 'Gd':4, 'Ex':5})
df['ExterCond'] = df['ExterCond'].map({'Po':1, 'Fa':2, 'TA':3, 'Gd':4, 'Ex':5})
df['ExterQual'] = df['ExterQual'].map({'Po':1, 'Fa':2, 'TA':3, 'Gd':4, 'Ex':5})
df['KitchenQual'] = df['KitchenQual'].map({'Po':1, 'Fa':2, 'TA':3, 'Gd':4, 'Ex':5})
df['LandSlope'] = df['LandSlope'].map({'Sev':1, 'Mod':2, 'Gtl':3})
df['PavedDrive'] = df['PavedDrive'].map({'N':1, 'P':2, 'Y':3})
df['Functional'] = df['Functional'].map({'Sal':1, 'Sev':2, 'Maj2':3, 'Maj1':4, 'Mod':5, 'Min2':6, 'Min1':7, 'Typ':8})
df['HeatingQC'] = df['HeatingQC'].map({'Po':1, 'Fa':2, 'TA':3, 'Gd':4, 'Ex':5})
df['Street'] = df['Street'].map({'Grvl':1, 'Pave':2})
df['Utilities'] = df['Utilities'].map({'ELO':1, 'NoSeWa':2, 'NoSewr':3, 'AllPub':4})
df=df.drop('MoSold',axis=1)
df['MSSubClass'] = df['MSSubClass'].map({20:'class1', 30:'class2', 40:'class3', 45:'class4',
50:'class5', 60:'class6', 70:'class7', 75:'class8',
80:'class9', 85:'class10', 90:'class11', 120:'class12',
150:'class13', 160:'class14', 180:'class15', 190:'class16'} )<drop_column> | class DummyDatabunch:
c = 2
path = '.'
device = defaults.device
loss_func = None
data = DummyDatabunch() | Deepfake Detection Challenge |
8,474,447 | df = df.reset_index()
df = df.drop('index',axis = 1 )<filter> | effnet_model = EfficientNet.from_name("efficientnet-b5", override_params={'num_classes': 2})
learner = Learner(data, effnet_model); learner.model_dir = '.'
learner.load('.. /input/deepfakerandmergeaugmodels/single_frame_effnetb5_randmerge')
effnetb5_inference_model = learner.model.eval() | Deepfake Detection Challenge |
8,474,447 | drop_id = df[df['LotArea'] > 100000].index<drop_column> | effnet_model = EfficientNet.from_name("efficientnet-b7", override_params={'num_classes': 2})
learner = Learner(data, effnet_model); learner.model_dir = '.'
learner.load('.. /input/deepfakerandmergeaugmodels/single_frame_effnetb7_randmerge_fp16')
effnetb7_inference_model = learner.model.float().eval() | Deepfake Detection Challenge |
8,474,447 | drop_id = drop_id[drop_id < 1459]<feature_engineering> | learner = cnn_learner(data, models.resnet34, pretrained=False); learner.model_dir = '.'
learner.load('.. /input/deepfakerandmergeaugmodels/single_frame_resnet34_randmerge')
resnet_inference_model = learner.model.eval() | Deepfake Detection Challenge |
8,474,447 | df['MasVnrArea'][df[df['MasVnrArea'] > 1500].index] = df['MasVnrArea'].mean()
df['Utilities'][df[df['Utilities']==2].index] = df['Utilities'].mean()<drop_column> | predictions = []
video_fnames = [] | Deepfake Detection Challenge |
8,474,447 | df = df.drop(drop_id )<drop_column> | fname2pred = dict(zip(video_fnames, predictions)) | Deepfake Detection Challenge |
8,474,447 | y_train = y_train.drop(drop_id )<categorify> | submission_df = pd.read_csv("/kaggle/input/deepfake-detection-challenge/sample_submission.csv" ) | Deepfake Detection Challenge |
8,474,447 | dummy_drop = []
for i in cat_columns:
dummy_drop += [ i+'_'+str(df[i].unique() [-1])]
df = pd.get_dummies(df,columns=cat_columns)
df = df.drop(dummy_drop,axis=1 )<normalization> | submission_df.label = submission_df.filename.map(fname2pred ) | Deepfake Detection Challenge |
8,474,447 | X_train = df[:-1459].drop(['index'], axis=1)
X_test = df[-1459:].drop(['index'], axis=1)
scaler = StandardScaler()
X_train[num_columns]= scaler.fit_transform(X_train[num_columns])
X_test[num_columns]= scaler.transform(X_test[num_columns])
X_train.shape, X_test.shape<feature_engineering> | submission_df['label'] = np.clip(submission_df['label'], 0.01, 0.99 ) | Deepfake Detection Challenge |
8,474,447 | <train_model><EOS> | submission_df.to_csv("submission.csv",index=False ) | Deepfake Detection Challenge |
8,212,093 | <SOS> metric: LogLoss Kaggle data source: deepfake-detection-challenge<train_on_grid> | %matplotlib inline
| Deepfake Detection Challenge |
8,212,093 | ans = {}
for i in range(1, 222):
imp_col = imp_feature.iloc[:i].index
ridge = KernelRidge(alpha = 0.5263157894736842, coef0 = 3.5, degree = 2, kernel ='polynomial')
ridge = ridge.fit(X_train[imp_col], y_train_log)
ans[i] = np.sqrt(mean_squared_error(y_train_log,ridge.predict(X_train[imp_col])) )<define_variables> | test_dir = "/kaggle/input/deepfake-detection-challenge/test_videos/"
test_videos = sorted([x for x in os.listdir(test_dir)if x[-4:] == ".mp4"])
len(test_videos ) | Deepfake Detection Challenge |
8,212,093 | minimum = ans[1]
ind_min = 1
for ind in range(1,len(ans.values())) :
if ans[ind] < minimum:
minimum = ans[ind]
ind_min = ind<features_selection> | print("PyTorch version:", torch.__version__)
print("CUDA version:", torch.version.cuda)
print("cuDNN version:", torch.backends.cudnn.version() ) | Deepfake Detection Challenge |
8,212,093 | imp_col = imp_feature.iloc[:ind_min+1].index<compute_test_metric> | gpu = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
gpu | Deepfake Detection Challenge |
8,212,093 | def neg_rmse(y_true, y_pred):
return -1.0*np.sqrt(mean_squared_error(y_true,y_pred))
neg_rmse = make_scorer(neg_rmse )<train_model> | facedet = BlazeFace().to(gpu)
facedet.load_weights("/kaggle/input/blazeface-pytorch/blazeface.pth")
facedet.load_anchors("/kaggle/input/blazeface-pytorch/anchors.npy")
_ = facedet.train(False ) | Deepfake Detection Challenge |
8,212,093 | model = KernelRidge(alpha = 0.6842105263157894, coef0 = 3.5, degree = 2, kernel = 'polynomial')
model.fit(X_train[imp_col], y_train_log)
print("RMSE of the whole training set: {}".format(np.sqrt(mean_squared_error(y_train_log,model.predict(X_train[imp_col])))) )<predict_on_test> | frames_per_video = 17
video_reader = VideoReader()
video_read_fn = lambda x: video_reader.read_frames(x, num_frames=frames_per_video)
face_extractor = FaceExtractor(video_read_fn, facedet ) | Deepfake Detection Challenge |
8,212,093 | y_pred = np.exp(model.predict(X_test[imp_col]))<save_to_csv> | input_size = 224 | Deepfake Detection Challenge |
8,212,093 | def save_ans(ans, pasanger_id, name_alg):
submission = pd.DataFrame({'Id':pasanger_id,'SalePrice':ans})
print(submission.shape)
filename = r'./{}.csv'.format(name_alg)
submission.to_csv(filename,index=False)
print('Saved file: ' + filename )<compute_test_metric> | mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
normalize_transform = Normalize(mean, std ) | Deepfake Detection Challenge |
8,212,093 | save_ans(y_pred, test_id,'submission' )<load_from_csv> | class MyResNeXt(models.resnet.ResNet):
def __init__(self, training=True):
super(MyResNeXt, self ).__init__(block=models.resnet.Bottleneck,
layers=[3, 4, 6, 3],
groups=32,
width_per_group=4)
self.fc = nn.Linear(2048, 1 ) | Deepfake Detection Challenge |
8,212,093 | train_df = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/train.csv')
test_df = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/test.csv' )<drop_column> | checkpoint = torch.load("/kaggle/input/deepfakes-inference-demo/resnext.pth", map_location=gpu)
model = MyResNeXt().to(gpu)
model.load_state_dict(checkpoint)
_ = model.eval()
del checkpoint | Deepfake Detection Challenge |
8,212,093 | def preprocessing_null(data_df):
data_df.drop(['Alley', 'PoolQC', 'Fence', 'MiscFeature', 'Id'], axis=1, inplace=True)
Bsmtlist = ['BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2']
Bsmtlist2=['BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF']
Garagelist = ['GarageType', 'GarageFinish', 'GarageQual', 'GarageCond']
Bathlist = ['BsmtFullBath', 'BsmtHalfBath']
Extlist = ['Exterior1st', 'Exterior2nd']
data_df.loc[:, Bsmtlist]=data_df.loc[:, Bsmtlist].fillna('TA')
data_df['Electrical']=data_df['Electrical'].fillna('SBrkr')
data_df['LotFrontage']=data_df['LotFrontage'].fillna(data_df['LotFrontage'].mean())
data_df['FireplaceQu'] = data_df['FireplaceQu'].fillna('NA')
data_df.loc[:, Garagelist] = data_df.loc[:, Garagelist].fillna('NA')
data_df['GarageYrBlt']=data_df['GarageYrBlt'].fillna(2005)
data_df.loc[:, 'MasVnrType'] = data_df.loc[:, 'MasVnrType'].fillna('None')
data_df['MasVnrArea']=data_df['MasVnrArea'].fillna(0)
data_df.loc[:, Bsmtlist2]=data_df[Bsmtlist2].fillna(0)
data_df['TotalBsmtSF']=data_df['TotalBsmtSF'].fillna(0)
data_df['GarageArea']=data_df['GarageArea'].fillna(data_df['GarageArea'].median())
data_df['GarageCars']=data_df['GarageCars'].fillna(data_df['GarageCars'].median())
data_df[Bathlist]=data_df[Bathlist].fillna(0)
data_df[Extlist]=data_df[Extlist].fillna('VinylSd')
data_df['MSZoning']=data_df['MSZoning'].fillna('TA')
data_df['Utilities']=data_df['Utilities'].fillna('AllPub')
data_df['KitchenQual']=data_df['KitchenQual'].fillna('TA')
data_df['Functional']=data_df['Functional'].fillna('Typ')
data_df['SaleType']=data_df['SaleType'].fillna('WD')
return data_df<prepare_x_and_y> | def predict_on_video(video_path, batch_size):
try:
faces = face_extractor.process_video(video_path)
face_extractor.keep_only_best_face(faces)
if len(faces)> 0:
x = np.zeros(( batch_size, input_size, input_size, 3), dtype=np.uint8)
n = 0
for frame_data in faces:
for face in frame_data["faces"]:
resized_face = isotropically_resize_image(face, input_size)
resized_face = make_square_image(resized_face)
if n < batch_size:
x[n] = resized_face
n += 1
else:
print("WARNING: have %d faces but batch size is %d" %(n, batch_size))
if n > 0:
x = torch.tensor(x, device=gpu ).float()
x = x.permute(( 0, 3, 1, 2))
for i in range(len(x)) :
x[i] = normalize_transform(x[i] / 255.)
with torch.no_grad() :
y_pred = model(x)
y_pred = torch.sigmoid(y_pred.squeeze())
return y_pred[:n].mean().item()
except Exception as e:
print("Prediction error on video %s: %s" %(video_path, str(e)))
return 0.5 | Deepfake Detection Challenge |
8,212,093 | train_target = train_df['SalePrice']
train_feature = train_df.drop('SalePrice', axis=1 )<categorify> | def predict_on_video_set(videos, num_workers):
def process_file(i):
filename = videos[i]
y_pred = predict_on_video(os.path.join(test_dir, filename), batch_size=frames_per_video)
return y_pred
with ThreadPoolExecutor(max_workers=num_workers)as ex:
predictions = ex.map(process_file, range(len(videos)))
return list(predictions ) | Deepfake Detection Challenge |
8,212,093 | train_feature = preprocessing_null(train_feature)
test_feature = preprocessing_null(test_df )<set_options> | speed_test = False | Deepfake Detection Challenge |
8,212,093 | %matplotlib inline<set_options> | if speed_test:
start_time = time.time()
speedtest_videos = test_videos[:5]
predictions = predict_on_video_set(speedtest_videos, num_workers=4)
elapsed = time.time() - start_time
print("Elapsed %f sec.Average per video: %f sec." %(elapsed, elapsed / len(speedtest_videos)) ) | Deepfake Detection Challenge |
8,212,093 | pd.set_option('display.max_columns', 500)
corr[corr>0.7]<drop_column> | predictions = predict_on_video_set(test_videos, num_workers=4 ) | Deepfake Detection Challenge |
8,212,093 | <split><EOS> | submission_df = pd.DataFrame({"filename": test_videos, "label": predictions})
submission_df.to_csv("submission.csv", index=False ) | Deepfake Detection Challenge |
8,069,381 | <SOS> metric: LogLoss Kaggle data source: deepfake-detection-challenge<split> | %matplotlib inline
| Deepfake Detection Challenge |
8,069,381 | train_feature_num, train_feature_obj = split_num_obj(train_feature)
test_feature_num, test_feature_obj = split_num_obj(test_feature )<categorify> | test_dir = "/kaggle/input/deepfake-detection-challenge/test_videos/"
test_videos = sorted([x for x in os.listdir(test_dir)if x[-4:] == ".mp4"])
frame_h = 5
frame_l = 5
len(test_videos ) | Deepfake Detection Challenge |
8,069,381 | train_dummies = pd.get_dummies(train_feature_obj)
test_dummies = pd.get_dummies(test_feature_obj)
not_in_train = [column for column in train_dummies.columns if column not in test_dummies.columns]
not_in_test = [column for column in test_dummies.columns if column not in train_dummies.columns]
print('
',train_dummies.shape, test_dummies.shape, '
')
print('
', not_in_train, '
')
print('
', not_in_test )<define_search_space> | print("PyTorch version:", torch.__version__)
print("CUDA version:", torch.version.cuda)
print("cuDNN version:", torch.backends.cudnn.version() ) | Deepfake Detection Challenge |
8,069,381 | df_num_cat_col = df_num_col[[0, 3, 4, 15, 16, 17, 18, 19, 20, 21, 22, 28, 29, 30, 31]]<split> | gpu = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
gpu | Deepfake Detection Challenge |
8,069,381 | def split_num_cat(data_df_num):
data_df_num_cat = data_df_num[df_num_cat_col]
data_df_num_non_cat = data_df_num.drop(df_num_cat_col, axis=1)
return data_df_num_cat, data_df_num_non_cat
train_feature_num_cat, train_feature_num_non_cat = split_num_cat(train_feature_num)
test_feature_num_cat, test_feature_num_non_cat = split_num_cat(test_feature_num)
print(train_feature_num_cat.shape,test_feature_num_cat.shape)
print(train_feature_num_non_cat.shape, test_feature_num_non_cat.shape )<split> | facedet = BlazeFace().to(gpu)
facedet.load_weights("/kaggle/input/blazeface-pytorch/blazeface.pth")
facedet.load_anchors("/kaggle/input/blazeface-pytorch/anchors.npy")
_ = facedet.train(False ) | Deepfake Detection Challenge |
8,069,381 | all_data = pd.concat(( train_df, test_df)).reset_index(drop=True)
all_data.drop('SalePrice', axis=1, inplace=True)
print(all_data.shape)
all_data = preprocessing_null(all_data)
all_data = drop_corr_ftr(all_data)
print(all_data.shape)
all_data_num, all_data_obj = split_num_obj(all_data)
print(all_data_obj.shape )<categorify> | frames_per_video = 64
video_reader = VideoReader()
video_read_fn = lambda x: video_reader.read_frames(x, num_frames=frames_per_video)
face_extractor = FaceExtractor(video_read_fn, facedet ) | Deepfake Detection Challenge |
8,069,381 | warnings.filterwarnings(action='ignore')
label = LabelEncoder()
for col in all_data_obj.columns:
label.fit(all_data_obj.loc[:, col])
train_feature_obj.loc[:, col] = label.transform(train_feature_obj.loc[:, col])
test_feature_obj.loc[:, col] = label.transform(test_feature_obj.loc[:, col] )<feature_engineering> | input_size = 224 | Deepfake Detection Challenge |
8,069,381 | train_target = np.log1p(train_target)
train_feature_num_non_cat = np.log1p(train_feature_num_non_cat)
test_feature_num_non_cat = np.log1p(test_feature_num_non_cat )<concatenate> | mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
normalize_transform = Normalize(mean, std ) | Deepfake Detection Challenge |
8,069,381 | train_feature_fin = pd.concat([train_feature_num_cat, train_feature_num_non_cat, train_feature_obj], axis=1)
test_feature_fin = pd.concat([test_feature_num_cat, test_feature_num_non_cat, test_feature_obj], axis=1 )<define_variables> | class MyResNeXt(models.resnet.ResNet):
def __init__(self, training=True):
super(MyResNeXt, self ).__init__(block=models.resnet.Bottleneck,
layers=[3, 4, 6, 3],
groups=32,
width_per_group=4)
self.fc = nn.Linear(2048, 1 ) | Deepfake Detection Challenge |
8,069,381 | cond1 = train_target>500000
cond2 = train_feature_num_non_cat['GrLivArea']>4000<drop_column> | checkpoint = torch.load("/kaggle/input/deepfakes-inference-demo/resnext.pth", map_location=gpu)
model = MyResNeXt().to(gpu)
model.load_state_dict(checkpoint)
_ = model.eval()
del checkpoint | Deepfake Detection Challenge |
8,069,381 | train_feature_fin = train_feature_fin.drop(train_feature_num_non_cat[cond1|cond2].index, axis=0 )<train_on_grid> | def predict_on_video(video_path, batch_size):
try:
faces = face_extractor.process_video(video_path)
face_extractor.keep_only_best_face(faces)
if len(faces)> 0:
x = np.zeros(( batch_size, input_size, input_size, 3), dtype=np.uint8)
n = 0
for frame_data in faces:
for face in frame_data["faces"]:
resized_face = isotropically_resize_image(face, input_size)
resized_face = make_square_image(resized_face)
if n < batch_size:
x[n] = resized_face
n += 1
else:
print("WARNING: have %d faces but batch size is %d" %(n, batch_size))
if n > 0:
x = torch.tensor(x, device=gpu ).float()
x = x.permute(( 0, 3, 1, 2))
for i in range(len(x)) :
x[i] = normalize_transform(x[i] / 255.)
with torch.no_grad() :
y_pred = model(x)
y_pred = torch.sigmoid(y_pred.squeeze())
return y_pred[:n].mean().item()
except Exception as e:
print("Prediction error on video %s: %s" %(video_path, str(e)))
return 0.481 | Deepfake Detection Challenge |
8,069,381 | def get_best_estimator(model, params):
grid_model = GridSearchCV(model, param_grid=params, scoring="neg_mean_squared_error", cv=5)
grid_model.fit(train_feature_fin, train_target)
rmse = np.sqrt(-1*grid_model.best_score_)
print('{0}, param:{1}, rmse:{2}'.format(model.__class__.__name__, grid_model.best_params_,\
np.round(rmse, 4)))
return grid_model.best_estimator_
ridge_params = {'alpha':[0.05, 0.1, 1, 5, 8, 10, 15]}
lasso_params = {'alpha':[0.001, 0.005, 0.008, 0.05, 0.1, 0.3, 0.5, 1, 5, 10]}
elastic_params = {'alpha':[0.05, 0.1, 0.5, 1, 3, 5, 8]}
ridge_reg = Ridge()
lasso_reg = Lasso()
elastic_reg = ElasticNet(l1_ratio=0.7)
lasso_be = get_best_estimator(lasso_reg, lasso_params)
ridge_be = get_best_estimator(ridge_reg, ridge_params)
elastic_be = get_best_estimator(elastic_reg, elastic_params )<choose_model_class> | def predict_on_video_set(videos, num_workers):
def process_file(i):
filename = videos[i]
y_pred = predict_on_video(os.path.join(test_dir, filename), batch_size=frames_per_video)
return y_pred
with ThreadPoolExecutor(max_workers=num_workers)as ex:
predictions = ex.map(process_file, range(len(videos)))
return list(predictions ) | Deepfake Detection Challenge |
8,069,381 | lgbm_params = {
'max_depth':[5, 10, 15, 20, 25, 30],
'learning_rate':[0.01, 0.05, 0.1, 0.5, 1],
}
lgbm_reg = LGBMRegressor(n_estimators=1000)
lgbm_be = get_best_estimator(lgbm_reg, lgbm_params )<predict_on_test> | predictions = predict_on_video_set(test_videos, num_workers=4 ) | Deepfake Detection Challenge |
8,069,381 | preds = np.expm1(lgbm_be.predict(test_feature_fin))<save_to_csv> | submission_df_resnext = pd.DataFrame({"filename": test_videos, "label": predictions})
submission_df_resnext.to_csv("submission_resnext.csv", index=False ) | Deepfake Detection Challenge |
8,069,381 | test=pd.read_csv('.. /input/house-prices-advanced-regression-techniques/test.csv')
my_submission = pd.DataFrame({'Id': test.Id,
'SalePrice': preds})
my_submission.to_csv('submission.csv', index=False )<load_from_csv> | !pip install.. /input/deepfake-xception-trained-model/pytorchcv-0.0.55-py2.py3-none-any.whl --quiet | Deepfake Detection Challenge |
8,069,381 | train=pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv',index_col=0)
test=pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv',index_col=0)
submission=pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/sample_submission.csv')
Id_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv' ).values[:,0]
data=train
print(train.shape, test.shape, submission.shape, Id_test.shape )<set_options> | %matplotlib inline
warnings.filterwarnings("ignore" ) | Deepfake Detection Challenge |
8,069,381 | %matplotlib inline
<drop_column> | test_dir = "/kaggle/input/deepfake-detection-challenge/test_videos/"
test_videos = sorted([x for x in os.listdir(test_dir)if x[-4:] == ".mp4"])
len(test_videos ) | Deepfake Detection Challenge |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.