id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
166,012 | import os
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from typing import Dict
import itertools
import numpy as np
from timm.models.layers import DropPath, trunc_normal_, to_2tuple
EfficientFormer_width = {
'L': [40, 80, 192, 384], # 26m 83.3% 6attn
'S2': [32, 64, 144, 288], # 12m 81.6% 4attn dp0.02
'S1': [32, 48, 120, 224], # 6.1m 79.0
'S0': [32, 48, 96, 176], # 75.0 75.7
}
EfficientFormer_depth = {
'L': [5, 5, 15, 10], # 26m 83.3%
'S2': [4, 4, 12, 8], # 12m
'S1': [3, 3, 9, 6], # 79.0
'S0': [2, 2, 6, 4], # 75.7
}
expansion_ratios_S0 = {
'0': [4, 4],
'1': [4, 4],
'2': [4, 3, 3, 3, 4, 4],
'3': [4, 3, 3, 4],
}
class EfficientFormerV2(nn.Module):
def __init__(self, layers, embed_dims=None,
mlp_ratios=4, downsamples=None,
pool_size=3,
norm_layer=nn.BatchNorm2d, act_layer=nn.GELU,
num_classes=1000,
down_patch_size=3, down_stride=2, down_pad=1,
drop_rate=0., drop_path_rate=0.,
use_layer_scale=True, layer_scale_init_value=1e-5,
fork_feat=True,
vit_num=0,
resolution=640,
e_ratios=expansion_ratios_L,
**kwargs):
super().__init__()
if not fork_feat:
self.num_classes = num_classes
self.fork_feat = fork_feat
self.patch_embed = stem(3, embed_dims[0], act_layer=act_layer)
network = []
for i in range(len(layers)):
stage = eformer_block(embed_dims[i], i, layers,
pool_size=pool_size, mlp_ratio=mlp_ratios,
act_layer=act_layer, norm_layer=norm_layer,
drop_rate=drop_rate,
drop_path_rate=drop_path_rate,
use_layer_scale=use_layer_scale,
layer_scale_init_value=layer_scale_init_value,
resolution=math.ceil(resolution / (2 ** (i + 2))),
vit_num=vit_num,
e_ratios=e_ratios)
network.append(stage)
if i >= len(layers) - 1:
break
if downsamples[i] or embed_dims[i] != embed_dims[i + 1]:
# downsampling between two stages
if i >= 2:
asub = True
else:
asub = False
network.append(
Embedding(
patch_size=down_patch_size, stride=down_stride,
padding=down_pad,
in_chans=embed_dims[i], embed_dim=embed_dims[i + 1],
resolution=math.ceil(resolution / (2 ** (i + 2))),
asub=asub,
act_layer=act_layer, norm_layer=norm_layer,
)
)
self.network = nn.ModuleList(network)
if self.fork_feat:
# add a norm layer for each output
self.out_indices = [0, 2, 4, 6]
for i_emb, i_layer in enumerate(self.out_indices):
if i_emb == 0 and os.environ.get('FORK_LAST3', None):
layer = nn.Identity()
else:
layer = norm_layer(embed_dims[i_emb])
layer_name = f'norm{i_layer}'
self.add_module(layer_name, layer)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, resolution, resolution))]
def forward_tokens(self, x):
outs = []
for idx, block in enumerate(self.network):
x = block(x)
if self.fork_feat and idx in self.out_indices:
norm_layer = getattr(self, f'norm{idx}')
x_out = norm_layer(x)
outs.append(x_out)
return outs
def forward(self, x):
x = self.patch_embed(x)
x = self.forward_tokens(x)
return x
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
def efficientformerv2_s0(weights='', **kwargs):
model = EfficientFormerV2(
layers=EfficientFormer_depth['S0'],
embed_dims=EfficientFormer_width['S0'],
downsamples=[True, True, True, True, True],
vit_num=2,
drop_path_rate=0.0,
e_ratios=expansion_ratios_S0,
**kwargs)
if weights:
pretrained_weight = torch.load(weights)['model']
model.load_state_dict(update_weight(model.state_dict(), pretrained_weight))
return model | null |
166,013 | import os
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from typing import Dict
import itertools
import numpy as np
from timm.models.layers import DropPath, trunc_normal_, to_2tuple
EfficientFormer_width = {
'L': [40, 80, 192, 384], # 26m 83.3% 6attn
'S2': [32, 64, 144, 288], # 12m 81.6% 4attn dp0.02
'S1': [32, 48, 120, 224], # 6.1m 79.0
'S0': [32, 48, 96, 176], # 75.0 75.7
}
EfficientFormer_depth = {
'L': [5, 5, 15, 10], # 26m 83.3%
'S2': [4, 4, 12, 8], # 12m
'S1': [3, 3, 9, 6], # 79.0
'S0': [2, 2, 6, 4], # 75.7
}
expansion_ratios_S1 = {
'0': [4, 4, 4],
'1': [4, 4, 4],
'2': [4, 4, 3, 3, 3, 3, 4, 4, 4],
'3': [4, 4, 3, 3, 4, 4],
}
class EfficientFormerV2(nn.Module):
def __init__(self, layers, embed_dims=None,
mlp_ratios=4, downsamples=None,
pool_size=3,
norm_layer=nn.BatchNorm2d, act_layer=nn.GELU,
num_classes=1000,
down_patch_size=3, down_stride=2, down_pad=1,
drop_rate=0., drop_path_rate=0.,
use_layer_scale=True, layer_scale_init_value=1e-5,
fork_feat=True,
vit_num=0,
resolution=640,
e_ratios=expansion_ratios_L,
**kwargs):
super().__init__()
if not fork_feat:
self.num_classes = num_classes
self.fork_feat = fork_feat
self.patch_embed = stem(3, embed_dims[0], act_layer=act_layer)
network = []
for i in range(len(layers)):
stage = eformer_block(embed_dims[i], i, layers,
pool_size=pool_size, mlp_ratio=mlp_ratios,
act_layer=act_layer, norm_layer=norm_layer,
drop_rate=drop_rate,
drop_path_rate=drop_path_rate,
use_layer_scale=use_layer_scale,
layer_scale_init_value=layer_scale_init_value,
resolution=math.ceil(resolution / (2 ** (i + 2))),
vit_num=vit_num,
e_ratios=e_ratios)
network.append(stage)
if i >= len(layers) - 1:
break
if downsamples[i] or embed_dims[i] != embed_dims[i + 1]:
# downsampling between two stages
if i >= 2:
asub = True
else:
asub = False
network.append(
Embedding(
patch_size=down_patch_size, stride=down_stride,
padding=down_pad,
in_chans=embed_dims[i], embed_dim=embed_dims[i + 1],
resolution=math.ceil(resolution / (2 ** (i + 2))),
asub=asub,
act_layer=act_layer, norm_layer=norm_layer,
)
)
self.network = nn.ModuleList(network)
if self.fork_feat:
# add a norm layer for each output
self.out_indices = [0, 2, 4, 6]
for i_emb, i_layer in enumerate(self.out_indices):
if i_emb == 0 and os.environ.get('FORK_LAST3', None):
layer = nn.Identity()
else:
layer = norm_layer(embed_dims[i_emb])
layer_name = f'norm{i_layer}'
self.add_module(layer_name, layer)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, resolution, resolution))]
def forward_tokens(self, x):
outs = []
for idx, block in enumerate(self.network):
x = block(x)
if self.fork_feat and idx in self.out_indices:
norm_layer = getattr(self, f'norm{idx}')
x_out = norm_layer(x)
outs.append(x_out)
return outs
def forward(self, x):
x = self.patch_embed(x)
x = self.forward_tokens(x)
return x
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
def efficientformerv2_s1(weights='', **kwargs):
model = EfficientFormerV2(
layers=EfficientFormer_depth['S1'],
embed_dims=EfficientFormer_width['S1'],
downsamples=[True, True, True, True],
vit_num=2,
drop_path_rate=0.0,
e_ratios=expansion_ratios_S1,
**kwargs)
if weights:
pretrained_weight = torch.load(weights)['model']
model.load_state_dict(update_weight(model.state_dict(), pretrained_weight))
return model | null |
166,014 | import os
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from typing import Dict
import itertools
import numpy as np
from timm.models.layers import DropPath, trunc_normal_, to_2tuple
EfficientFormer_width = {
'L': [40, 80, 192, 384], # 26m 83.3% 6attn
'S2': [32, 64, 144, 288], # 12m 81.6% 4attn dp0.02
'S1': [32, 48, 120, 224], # 6.1m 79.0
'S0': [32, 48, 96, 176], # 75.0 75.7
}
EfficientFormer_depth = {
'L': [5, 5, 15, 10], # 26m 83.3%
'S2': [4, 4, 12, 8], # 12m
'S1': [3, 3, 9, 6], # 79.0
'S0': [2, 2, 6, 4], # 75.7
}
expansion_ratios_S2 = {
'0': [4, 4, 4, 4],
'1': [4, 4, 4, 4],
'2': [4, 4, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4],
'3': [4, 4, 3, 3, 3, 3, 4, 4],
}
class EfficientFormerV2(nn.Module):
def __init__(self, layers, embed_dims=None,
mlp_ratios=4, downsamples=None,
pool_size=3,
norm_layer=nn.BatchNorm2d, act_layer=nn.GELU,
num_classes=1000,
down_patch_size=3, down_stride=2, down_pad=1,
drop_rate=0., drop_path_rate=0.,
use_layer_scale=True, layer_scale_init_value=1e-5,
fork_feat=True,
vit_num=0,
resolution=640,
e_ratios=expansion_ratios_L,
**kwargs):
super().__init__()
if not fork_feat:
self.num_classes = num_classes
self.fork_feat = fork_feat
self.patch_embed = stem(3, embed_dims[0], act_layer=act_layer)
network = []
for i in range(len(layers)):
stage = eformer_block(embed_dims[i], i, layers,
pool_size=pool_size, mlp_ratio=mlp_ratios,
act_layer=act_layer, norm_layer=norm_layer,
drop_rate=drop_rate,
drop_path_rate=drop_path_rate,
use_layer_scale=use_layer_scale,
layer_scale_init_value=layer_scale_init_value,
resolution=math.ceil(resolution / (2 ** (i + 2))),
vit_num=vit_num,
e_ratios=e_ratios)
network.append(stage)
if i >= len(layers) - 1:
break
if downsamples[i] or embed_dims[i] != embed_dims[i + 1]:
# downsampling between two stages
if i >= 2:
asub = True
else:
asub = False
network.append(
Embedding(
patch_size=down_patch_size, stride=down_stride,
padding=down_pad,
in_chans=embed_dims[i], embed_dim=embed_dims[i + 1],
resolution=math.ceil(resolution / (2 ** (i + 2))),
asub=asub,
act_layer=act_layer, norm_layer=norm_layer,
)
)
self.network = nn.ModuleList(network)
if self.fork_feat:
# add a norm layer for each output
self.out_indices = [0, 2, 4, 6]
for i_emb, i_layer in enumerate(self.out_indices):
if i_emb == 0 and os.environ.get('FORK_LAST3', None):
layer = nn.Identity()
else:
layer = norm_layer(embed_dims[i_emb])
layer_name = f'norm{i_layer}'
self.add_module(layer_name, layer)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, resolution, resolution))]
def forward_tokens(self, x):
outs = []
for idx, block in enumerate(self.network):
x = block(x)
if self.fork_feat and idx in self.out_indices:
norm_layer = getattr(self, f'norm{idx}')
x_out = norm_layer(x)
outs.append(x_out)
return outs
def forward(self, x):
x = self.patch_embed(x)
x = self.forward_tokens(x)
return x
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
def efficientformerv2_s2(weights='', **kwargs):
model = EfficientFormerV2(
layers=EfficientFormer_depth['S2'],
embed_dims=EfficientFormer_width['S2'],
downsamples=[True, True, True, True],
vit_num=4,
drop_path_rate=0.02,
e_ratios=expansion_ratios_S2,
**kwargs)
if weights:
pretrained_weight = torch.load(weights)['model']
model.load_state_dict(update_weight(model.state_dict(), pretrained_weight))
return model | null |
166,015 | import os
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from typing import Dict
import itertools
import numpy as np
from timm.models.layers import DropPath, trunc_normal_, to_2tuple
EfficientFormer_width = {
'L': [40, 80, 192, 384], # 26m 83.3% 6attn
'S2': [32, 64, 144, 288], # 12m 81.6% 4attn dp0.02
'S1': [32, 48, 120, 224], # 6.1m 79.0
'S0': [32, 48, 96, 176], # 75.0 75.7
}
EfficientFormer_depth = {
'L': [5, 5, 15, 10], # 26m 83.3%
'S2': [4, 4, 12, 8], # 12m
'S1': [3, 3, 9, 6], # 79.0
'S0': [2, 2, 6, 4], # 75.7
}
expansion_ratios_L = {
'0': [4, 4, 4, 4, 4],
'1': [4, 4, 4, 4, 4],
'2': [4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4],
'3': [4, 4, 4, 3, 3, 3, 3, 4, 4, 4],
}
class EfficientFormerV2(nn.Module):
def __init__(self, layers, embed_dims=None,
mlp_ratios=4, downsamples=None,
pool_size=3,
norm_layer=nn.BatchNorm2d, act_layer=nn.GELU,
num_classes=1000,
down_patch_size=3, down_stride=2, down_pad=1,
drop_rate=0., drop_path_rate=0.,
use_layer_scale=True, layer_scale_init_value=1e-5,
fork_feat=True,
vit_num=0,
resolution=640,
e_ratios=expansion_ratios_L,
**kwargs):
super().__init__()
if not fork_feat:
self.num_classes = num_classes
self.fork_feat = fork_feat
self.patch_embed = stem(3, embed_dims[0], act_layer=act_layer)
network = []
for i in range(len(layers)):
stage = eformer_block(embed_dims[i], i, layers,
pool_size=pool_size, mlp_ratio=mlp_ratios,
act_layer=act_layer, norm_layer=norm_layer,
drop_rate=drop_rate,
drop_path_rate=drop_path_rate,
use_layer_scale=use_layer_scale,
layer_scale_init_value=layer_scale_init_value,
resolution=math.ceil(resolution / (2 ** (i + 2))),
vit_num=vit_num,
e_ratios=e_ratios)
network.append(stage)
if i >= len(layers) - 1:
break
if downsamples[i] or embed_dims[i] != embed_dims[i + 1]:
# downsampling between two stages
if i >= 2:
asub = True
else:
asub = False
network.append(
Embedding(
patch_size=down_patch_size, stride=down_stride,
padding=down_pad,
in_chans=embed_dims[i], embed_dim=embed_dims[i + 1],
resolution=math.ceil(resolution / (2 ** (i + 2))),
asub=asub,
act_layer=act_layer, norm_layer=norm_layer,
)
)
self.network = nn.ModuleList(network)
if self.fork_feat:
# add a norm layer for each output
self.out_indices = [0, 2, 4, 6]
for i_emb, i_layer in enumerate(self.out_indices):
if i_emb == 0 and os.environ.get('FORK_LAST3', None):
layer = nn.Identity()
else:
layer = norm_layer(embed_dims[i_emb])
layer_name = f'norm{i_layer}'
self.add_module(layer_name, layer)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, resolution, resolution))]
def forward_tokens(self, x):
outs = []
for idx, block in enumerate(self.network):
x = block(x)
if self.fork_feat and idx in self.out_indices:
norm_layer = getattr(self, f'norm{idx}')
x_out = norm_layer(x)
outs.append(x_out)
return outs
def forward(self, x):
x = self.patch_embed(x)
x = self.forward_tokens(x)
return x
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
def efficientformerv2_l(weights='', **kwargs):
model = EfficientFormerV2(
layers=EfficientFormer_depth['L'],
embed_dims=EfficientFormer_width['L'],
downsamples=[True, True, True, True],
vit_num=6,
drop_path_rate=0.1,
e_ratios=expansion_ratios_L,
**kwargs)
if weights:
pretrained_weight = torch.load(weights)['model']
model.load_state_dict(update_weight(model.state_dict(), pretrained_weight))
return model | null |
166,016 | from functools import partial
import numpy as np
import torch
import torch.utils.checkpoint as checkpoint
from einops import rearrange
from timm.models.layers import DropPath, trunc_normal_
from torch import nn
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v | null |
166,017 | from functools import partial
import numpy as np
import torch
import torch.utils.checkpoint as checkpoint
from einops import rearrange
from timm.models.layers import DropPath, trunc_normal_
from torch import nn
class NextViT(nn.Module):
def __init__(self, stem_chs, depths, path_dropout, attn_drop=0, drop=0, num_classes=1000,
strides=[1, 2, 2, 2], sr_ratios=[8, 4, 2, 1], head_dim=32, mix_block_ratio=0.75,
use_checkpoint=False):
super(NextViT, self).__init__()
self.use_checkpoint = use_checkpoint
self.stage_out_channels = [[96] * (depths[0]),
[192] * (depths[1] - 1) + [256],
[384, 384, 384, 384, 512] * (depths[2] // 5),
[768] * (depths[3] - 1) + [1024]]
# Next Hybrid Strategy
self.stage_block_types = [[NCB] * depths[0],
[NCB] * (depths[1] - 1) + [NTB],
[NCB, NCB, NCB, NCB, NTB] * (depths[2] // 5),
[NCB] * (depths[3] - 1) + [NTB]]
self.stem = nn.Sequential(
ConvBNReLU(3, stem_chs[0], kernel_size=3, stride=2),
ConvBNReLU(stem_chs[0], stem_chs[1], kernel_size=3, stride=1),
ConvBNReLU(stem_chs[1], stem_chs[2], kernel_size=3, stride=1),
ConvBNReLU(stem_chs[2], stem_chs[2], kernel_size=3, stride=2),
)
input_channel = stem_chs[-1]
features = []
idx = 0
dpr = [x.item() for x in torch.linspace(0, path_dropout, sum(depths))] # stochastic depth decay rule
for stage_id in range(len(depths)):
numrepeat = depths[stage_id]
output_channels = self.stage_out_channels[stage_id]
block_types = self.stage_block_types[stage_id]
for block_id in range(numrepeat):
if strides[stage_id] == 2 and block_id == 0:
stride = 2
else:
stride = 1
output_channel = output_channels[block_id]
block_type = block_types[block_id]
if block_type is NCB:
layer = NCB(input_channel, output_channel, stride=stride, path_dropout=dpr[idx + block_id],
drop=drop, head_dim=head_dim)
features.append(layer)
elif block_type is NTB:
layer = NTB(input_channel, output_channel, path_dropout=dpr[idx + block_id], stride=stride,
sr_ratio=sr_ratios[stage_id], head_dim=head_dim, mix_block_ratio=mix_block_ratio,
attn_drop=attn_drop, drop=drop)
features.append(layer)
input_channel = output_channel
idx += numrepeat
self.features = nn.Sequential(*features)
self.norm = nn.BatchNorm2d(output_channel, eps=NORM_EPS)
self.stage_out_idx = [sum(depths[:idx + 1]) - 1 for idx in range(len(depths))]
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
self._initialize_weights()
def _initialize_weights(self):
for n, m in self.named_modules():
if isinstance(m, (nn.BatchNorm2d, nn.GroupNorm, nn.LayerNorm, nn.BatchNorm1d)):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=.02)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
res = []
x = self.stem(x)
for idx, layer in enumerate(self.features):
if self.use_checkpoint:
x = checkpoint.checkpoint(layer, x)
else:
x = layer(x)
if idx in self.stage_out_idx:
res.append(x)
res[-1] = self.norm(res[-1])
return res
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
def nextvit_small(weights=''):
model = NextViT(stem_chs=[64, 32, 64], depths=[3, 4, 10, 3], path_dropout=0.1)
if weights:
pretrained_weight = torch.load(weights)['model']
model.load_state_dict(update_weight(model.state_dict(), pretrained_weight))
return model | null |
166,018 | from functools import partial
import numpy as np
import torch
import torch.utils.checkpoint as checkpoint
from einops import rearrange
from timm.models.layers import DropPath, trunc_normal_
from torch import nn
class NextViT(nn.Module):
def __init__(self, stem_chs, depths, path_dropout, attn_drop=0, drop=0, num_classes=1000,
strides=[1, 2, 2, 2], sr_ratios=[8, 4, 2, 1], head_dim=32, mix_block_ratio=0.75,
use_checkpoint=False):
super(NextViT, self).__init__()
self.use_checkpoint = use_checkpoint
self.stage_out_channels = [[96] * (depths[0]),
[192] * (depths[1] - 1) + [256],
[384, 384, 384, 384, 512] * (depths[2] // 5),
[768] * (depths[3] - 1) + [1024]]
# Next Hybrid Strategy
self.stage_block_types = [[NCB] * depths[0],
[NCB] * (depths[1] - 1) + [NTB],
[NCB, NCB, NCB, NCB, NTB] * (depths[2] // 5),
[NCB] * (depths[3] - 1) + [NTB]]
self.stem = nn.Sequential(
ConvBNReLU(3, stem_chs[0], kernel_size=3, stride=2),
ConvBNReLU(stem_chs[0], stem_chs[1], kernel_size=3, stride=1),
ConvBNReLU(stem_chs[1], stem_chs[2], kernel_size=3, stride=1),
ConvBNReLU(stem_chs[2], stem_chs[2], kernel_size=3, stride=2),
)
input_channel = stem_chs[-1]
features = []
idx = 0
dpr = [x.item() for x in torch.linspace(0, path_dropout, sum(depths))] # stochastic depth decay rule
for stage_id in range(len(depths)):
numrepeat = depths[stage_id]
output_channels = self.stage_out_channels[stage_id]
block_types = self.stage_block_types[stage_id]
for block_id in range(numrepeat):
if strides[stage_id] == 2 and block_id == 0:
stride = 2
else:
stride = 1
output_channel = output_channels[block_id]
block_type = block_types[block_id]
if block_type is NCB:
layer = NCB(input_channel, output_channel, stride=stride, path_dropout=dpr[idx + block_id],
drop=drop, head_dim=head_dim)
features.append(layer)
elif block_type is NTB:
layer = NTB(input_channel, output_channel, path_dropout=dpr[idx + block_id], stride=stride,
sr_ratio=sr_ratios[stage_id], head_dim=head_dim, mix_block_ratio=mix_block_ratio,
attn_drop=attn_drop, drop=drop)
features.append(layer)
input_channel = output_channel
idx += numrepeat
self.features = nn.Sequential(*features)
self.norm = nn.BatchNorm2d(output_channel, eps=NORM_EPS)
self.stage_out_idx = [sum(depths[:idx + 1]) - 1 for idx in range(len(depths))]
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
self._initialize_weights()
def _initialize_weights(self):
for n, m in self.named_modules():
if isinstance(m, (nn.BatchNorm2d, nn.GroupNorm, nn.LayerNorm, nn.BatchNorm1d)):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=.02)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
res = []
x = self.stem(x)
for idx, layer in enumerate(self.features):
if self.use_checkpoint:
x = checkpoint.checkpoint(layer, x)
else:
x = layer(x)
if idx in self.stage_out_idx:
res.append(x)
res[-1] = self.norm(res[-1])
return res
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
def nextvit_base(weights=''):
model = NextViT(stem_chs=[64, 32, 64], depths=[3, 4, 20, 3], path_dropout=0.2)
if weights:
pretrained_weight = torch.load(weights)['model']
model.load_state_dict(update_weight(model.state_dict(), pretrained_weight))
return model | null |
166,019 | from functools import partial
import numpy as np
import torch
import torch.utils.checkpoint as checkpoint
from einops import rearrange
from timm.models.layers import DropPath, trunc_normal_
from torch import nn
class NextViT(nn.Module):
def __init__(self, stem_chs, depths, path_dropout, attn_drop=0, drop=0, num_classes=1000,
strides=[1, 2, 2, 2], sr_ratios=[8, 4, 2, 1], head_dim=32, mix_block_ratio=0.75,
use_checkpoint=False):
super(NextViT, self).__init__()
self.use_checkpoint = use_checkpoint
self.stage_out_channels = [[96] * (depths[0]),
[192] * (depths[1] - 1) + [256],
[384, 384, 384, 384, 512] * (depths[2] // 5),
[768] * (depths[3] - 1) + [1024]]
# Next Hybrid Strategy
self.stage_block_types = [[NCB] * depths[0],
[NCB] * (depths[1] - 1) + [NTB],
[NCB, NCB, NCB, NCB, NTB] * (depths[2] // 5),
[NCB] * (depths[3] - 1) + [NTB]]
self.stem = nn.Sequential(
ConvBNReLU(3, stem_chs[0], kernel_size=3, stride=2),
ConvBNReLU(stem_chs[0], stem_chs[1], kernel_size=3, stride=1),
ConvBNReLU(stem_chs[1], stem_chs[2], kernel_size=3, stride=1),
ConvBNReLU(stem_chs[2], stem_chs[2], kernel_size=3, stride=2),
)
input_channel = stem_chs[-1]
features = []
idx = 0
dpr = [x.item() for x in torch.linspace(0, path_dropout, sum(depths))] # stochastic depth decay rule
for stage_id in range(len(depths)):
numrepeat = depths[stage_id]
output_channels = self.stage_out_channels[stage_id]
block_types = self.stage_block_types[stage_id]
for block_id in range(numrepeat):
if strides[stage_id] == 2 and block_id == 0:
stride = 2
else:
stride = 1
output_channel = output_channels[block_id]
block_type = block_types[block_id]
if block_type is NCB:
layer = NCB(input_channel, output_channel, stride=stride, path_dropout=dpr[idx + block_id],
drop=drop, head_dim=head_dim)
features.append(layer)
elif block_type is NTB:
layer = NTB(input_channel, output_channel, path_dropout=dpr[idx + block_id], stride=stride,
sr_ratio=sr_ratios[stage_id], head_dim=head_dim, mix_block_ratio=mix_block_ratio,
attn_drop=attn_drop, drop=drop)
features.append(layer)
input_channel = output_channel
idx += numrepeat
self.features = nn.Sequential(*features)
self.norm = nn.BatchNorm2d(output_channel, eps=NORM_EPS)
self.stage_out_idx = [sum(depths[:idx + 1]) - 1 for idx in range(len(depths))]
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
self._initialize_weights()
def _initialize_weights(self):
for n, m in self.named_modules():
if isinstance(m, (nn.BatchNorm2d, nn.GroupNorm, nn.LayerNorm, nn.BatchNorm1d)):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=.02)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
res = []
x = self.stem(x)
for idx, layer in enumerate(self.features):
if self.use_checkpoint:
x = checkpoint.checkpoint(layer, x)
else:
x = layer(x)
if idx in self.stage_out_idx:
res.append(x)
res[-1] = self.norm(res[-1])
return res
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
def nextvit_large(weights=''):
model = NextViT(stem_chs=[64, 32, 64], depths=[3, 4, 30, 3], path_dropout=0.2)
if weights:
pretrained_weight = torch.load(weights)['model']
model.load_state_dict(update_weight(model.state_dict(), pretrained_weight))
return model | null |
166,020 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from timm.models.layers import trunc_normal_, DropPath
class ConvNeXtV2(nn.Module):
""" ConvNeXt V2
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]
dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768]
drop_path_rate (float): Stochastic depth rate. Default: 0.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
"""
def __init__(self, in_chans=3, num_classes=1000,
depths=[3, 3, 9, 3], dims=[96, 192, 384, 768],
drop_path_rate=0., head_init_scale=1.
):
super().__init__()
self.depths = depths
self.downsample_layers = nn.ModuleList() # stem and 3 intermediate downsampling conv layers
stem = nn.Sequential(
nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4),
LayerNorm(dims[0], eps=1e-6, data_format="channels_first")
)
self.downsample_layers.append(stem)
for i in range(3):
downsample_layer = nn.Sequential(
LayerNorm(dims[i], eps=1e-6, data_format="channels_first"),
nn.Conv2d(dims[i], dims[i+1], kernel_size=2, stride=2),
)
self.downsample_layers.append(downsample_layer)
self.stages = nn.ModuleList() # 4 feature resolution stages, each consisting of multiple residual blocks
dp_rates=[x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
cur = 0
for i in range(4):
stage = nn.Sequential(
*[Block(dim=dims[i], drop_path=dp_rates[cur + j]) for j in range(depths[i])]
)
self.stages.append(stage)
cur += depths[i]
self.norm = nn.LayerNorm(dims[-1], eps=1e-6) # final norm layer
self.head = nn.Linear(dims[-1], num_classes)
self.apply(self._init_weights)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
nn.init.constant_(m.bias, 0)
def forward(self, x):
res = []
for i in range(4):
x = self.downsample_layers[i](x)
x = self.stages[i](x)
res.append(x)
return res
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
def convnextv2_atto(weights='', **kwargs):
model = ConvNeXtV2(depths=[2, 2, 6, 2], dims=[40, 80, 160, 320], **kwargs)
if weights:
model.load_state_dict(update_weight(model.state_dict(), torch.load(weights)['model']))
return model | null |
166,021 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from timm.models.layers import trunc_normal_, DropPath
class ConvNeXtV2(nn.Module):
""" ConvNeXt V2
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]
dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768]
drop_path_rate (float): Stochastic depth rate. Default: 0.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
"""
def __init__(self, in_chans=3, num_classes=1000,
depths=[3, 3, 9, 3], dims=[96, 192, 384, 768],
drop_path_rate=0., head_init_scale=1.
):
super().__init__()
self.depths = depths
self.downsample_layers = nn.ModuleList() # stem and 3 intermediate downsampling conv layers
stem = nn.Sequential(
nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4),
LayerNorm(dims[0], eps=1e-6, data_format="channels_first")
)
self.downsample_layers.append(stem)
for i in range(3):
downsample_layer = nn.Sequential(
LayerNorm(dims[i], eps=1e-6, data_format="channels_first"),
nn.Conv2d(dims[i], dims[i+1], kernel_size=2, stride=2),
)
self.downsample_layers.append(downsample_layer)
self.stages = nn.ModuleList() # 4 feature resolution stages, each consisting of multiple residual blocks
dp_rates=[x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
cur = 0
for i in range(4):
stage = nn.Sequential(
*[Block(dim=dims[i], drop_path=dp_rates[cur + j]) for j in range(depths[i])]
)
self.stages.append(stage)
cur += depths[i]
self.norm = nn.LayerNorm(dims[-1], eps=1e-6) # final norm layer
self.head = nn.Linear(dims[-1], num_classes)
self.apply(self._init_weights)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
nn.init.constant_(m.bias, 0)
def forward(self, x):
res = []
for i in range(4):
x = self.downsample_layers[i](x)
x = self.stages[i](x)
res.append(x)
return res
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
def convnextv2_femto(weights='', **kwargs):
model = ConvNeXtV2(depths=[2, 2, 6, 2], dims=[48, 96, 192, 384], **kwargs)
if weights:
model.load_state_dict(update_weight(model.state_dict(), torch.load(weights)['model']))
return model | null |
166,022 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from timm.models.layers import trunc_normal_, DropPath
class ConvNeXtV2(nn.Module):
""" ConvNeXt V2
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]
dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768]
drop_path_rate (float): Stochastic depth rate. Default: 0.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
"""
def __init__(self, in_chans=3, num_classes=1000,
depths=[3, 3, 9, 3], dims=[96, 192, 384, 768],
drop_path_rate=0., head_init_scale=1.
):
super().__init__()
self.depths = depths
self.downsample_layers = nn.ModuleList() # stem and 3 intermediate downsampling conv layers
stem = nn.Sequential(
nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4),
LayerNorm(dims[0], eps=1e-6, data_format="channels_first")
)
self.downsample_layers.append(stem)
for i in range(3):
downsample_layer = nn.Sequential(
LayerNorm(dims[i], eps=1e-6, data_format="channels_first"),
nn.Conv2d(dims[i], dims[i+1], kernel_size=2, stride=2),
)
self.downsample_layers.append(downsample_layer)
self.stages = nn.ModuleList() # 4 feature resolution stages, each consisting of multiple residual blocks
dp_rates=[x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
cur = 0
for i in range(4):
stage = nn.Sequential(
*[Block(dim=dims[i], drop_path=dp_rates[cur + j]) for j in range(depths[i])]
)
self.stages.append(stage)
cur += depths[i]
self.norm = nn.LayerNorm(dims[-1], eps=1e-6) # final norm layer
self.head = nn.Linear(dims[-1], num_classes)
self.apply(self._init_weights)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
nn.init.constant_(m.bias, 0)
def forward(self, x):
res = []
for i in range(4):
x = self.downsample_layers[i](x)
x = self.stages[i](x)
res.append(x)
return res
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
def convnextv2_pico(weights='', **kwargs):
model = ConvNeXtV2(depths=[2, 2, 6, 2], dims=[64, 128, 256, 512], **kwargs)
if weights:
model.load_state_dict(update_weight(model.state_dict(), torch.load(weights)['model']))
return model | null |
166,023 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from timm.models.layers import trunc_normal_, DropPath
class ConvNeXtV2(nn.Module):
def __init__(self, in_chans=3, num_classes=1000,
depths=[3, 3, 9, 3], dims=[96, 192, 384, 768],
drop_path_rate=0., head_init_scale=1.
):
def _init_weights(self, m):
def forward(self, x):
def update_weight(model_dict, weight_dict):
def convnextv2_nano(weights='', **kwargs):
model = ConvNeXtV2(depths=[2, 2, 8, 2], dims=[80, 160, 320, 640], **kwargs)
if weights:
model.load_state_dict(update_weight(model.state_dict(), torch.load(weights)['model']))
return model | null |
166,024 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from timm.models.layers import trunc_normal_, DropPath
class ConvNeXtV2(nn.Module):
def __init__(self, in_chans=3, num_classes=1000,
depths=[3, 3, 9, 3], dims=[96, 192, 384, 768],
drop_path_rate=0., head_init_scale=1.
):
def _init_weights(self, m):
def forward(self, x):
def update_weight(model_dict, weight_dict):
def convnextv2_tiny(weights='', **kwargs):
model = ConvNeXtV2(depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], **kwargs)
if weights:
model.load_state_dict(update_weight(model.state_dict(), torch.load(weights)['model']))
return model | null |
166,025 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from timm.models.layers import trunc_normal_, DropPath
class ConvNeXtV2(nn.Module):
""" ConvNeXt V2
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]
dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768]
drop_path_rate (float): Stochastic depth rate. Default: 0.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
"""
def __init__(self, in_chans=3, num_classes=1000,
depths=[3, 3, 9, 3], dims=[96, 192, 384, 768],
drop_path_rate=0., head_init_scale=1.
):
super().__init__()
self.depths = depths
self.downsample_layers = nn.ModuleList() # stem and 3 intermediate downsampling conv layers
stem = nn.Sequential(
nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4),
LayerNorm(dims[0], eps=1e-6, data_format="channels_first")
)
self.downsample_layers.append(stem)
for i in range(3):
downsample_layer = nn.Sequential(
LayerNorm(dims[i], eps=1e-6, data_format="channels_first"),
nn.Conv2d(dims[i], dims[i+1], kernel_size=2, stride=2),
)
self.downsample_layers.append(downsample_layer)
self.stages = nn.ModuleList() # 4 feature resolution stages, each consisting of multiple residual blocks
dp_rates=[x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
cur = 0
for i in range(4):
stage = nn.Sequential(
*[Block(dim=dims[i], drop_path=dp_rates[cur + j]) for j in range(depths[i])]
)
self.stages.append(stage)
cur += depths[i]
self.norm = nn.LayerNorm(dims[-1], eps=1e-6) # final norm layer
self.head = nn.Linear(dims[-1], num_classes)
self.apply(self._init_weights)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
nn.init.constant_(m.bias, 0)
def forward(self, x):
res = []
for i in range(4):
x = self.downsample_layers[i](x)
x = self.stages[i](x)
res.append(x)
return res
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
def convnextv2_base(weights='', **kwargs):
model = ConvNeXtV2(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs)
if weights:
model.load_state_dict(update_weight(model.state_dict(), torch.load(weights)['model']))
return model | null |
166,026 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from timm.models.layers import trunc_normal_, DropPath
class ConvNeXtV2(nn.Module):
""" ConvNeXt V2
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]
dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768]
drop_path_rate (float): Stochastic depth rate. Default: 0.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
"""
def __init__(self, in_chans=3, num_classes=1000,
depths=[3, 3, 9, 3], dims=[96, 192, 384, 768],
drop_path_rate=0., head_init_scale=1.
):
super().__init__()
self.depths = depths
self.downsample_layers = nn.ModuleList() # stem and 3 intermediate downsampling conv layers
stem = nn.Sequential(
nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4),
LayerNorm(dims[0], eps=1e-6, data_format="channels_first")
)
self.downsample_layers.append(stem)
for i in range(3):
downsample_layer = nn.Sequential(
LayerNorm(dims[i], eps=1e-6, data_format="channels_first"),
nn.Conv2d(dims[i], dims[i+1], kernel_size=2, stride=2),
)
self.downsample_layers.append(downsample_layer)
self.stages = nn.ModuleList() # 4 feature resolution stages, each consisting of multiple residual blocks
dp_rates=[x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
cur = 0
for i in range(4):
stage = nn.Sequential(
*[Block(dim=dims[i], drop_path=dp_rates[cur + j]) for j in range(depths[i])]
)
self.stages.append(stage)
cur += depths[i]
self.norm = nn.LayerNorm(dims[-1], eps=1e-6) # final norm layer
self.head = nn.Linear(dims[-1], num_classes)
self.apply(self._init_weights)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
nn.init.constant_(m.bias, 0)
def forward(self, x):
res = []
for i in range(4):
x = self.downsample_layers[i](x)
x = self.stages[i](x)
res.append(x)
return res
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
def convnextv2_large(weights='', **kwargs):
model = ConvNeXtV2(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], **kwargs)
if weights:
model.load_state_dict(update_weight(model.state_dict(), torch.load(weights)['model']))
return model | null |
166,027 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from timm.models.layers import trunc_normal_, DropPath
class ConvNeXtV2(nn.Module):
""" ConvNeXt V2
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]
dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768]
drop_path_rate (float): Stochastic depth rate. Default: 0.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
"""
def __init__(self, in_chans=3, num_classes=1000,
depths=[3, 3, 9, 3], dims=[96, 192, 384, 768],
drop_path_rate=0., head_init_scale=1.
):
super().__init__()
self.depths = depths
self.downsample_layers = nn.ModuleList() # stem and 3 intermediate downsampling conv layers
stem = nn.Sequential(
nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4),
LayerNorm(dims[0], eps=1e-6, data_format="channels_first")
)
self.downsample_layers.append(stem)
for i in range(3):
downsample_layer = nn.Sequential(
LayerNorm(dims[i], eps=1e-6, data_format="channels_first"),
nn.Conv2d(dims[i], dims[i+1], kernel_size=2, stride=2),
)
self.downsample_layers.append(downsample_layer)
self.stages = nn.ModuleList() # 4 feature resolution stages, each consisting of multiple residual blocks
dp_rates=[x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
cur = 0
for i in range(4):
stage = nn.Sequential(
*[Block(dim=dims[i], drop_path=dp_rates[cur + j]) for j in range(depths[i])]
)
self.stages.append(stage)
cur += depths[i]
self.norm = nn.LayerNorm(dims[-1], eps=1e-6) # final norm layer
self.head = nn.Linear(dims[-1], num_classes)
self.apply(self._init_weights)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
nn.init.constant_(m.bias, 0)
def forward(self, x):
res = []
for i in range(4):
x = self.downsample_layers[i](x)
x = self.stages[i](x)
res.append(x)
return res
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
def convnextv2_huge(weights='', **kwargs):
model = ConvNeXtV2(depths=[3, 3, 27, 3], dims=[352, 704, 1408, 2816], **kwargs)
if weights:
model.load_state_dict(update_weight(model.state_dict(), torch.load(weights)['model']))
return model | null |
166,028 |
def parse_model(d, ch): # model_dict, input_channels(3)
# Parse a YOLOv5 model.yaml dictionary
LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}")
anchors, nc, gd, gw, act = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation')
if act:
Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU()
LOGGER.info(f"{colorstr('activation:')} {act}") # print
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
is_backbone = False
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
try:
t = m
m = eval(m) if isinstance(m, str) else m # eval strings
except:
pass
for j, a in enumerate(args):
with contextlib.suppress(NameError):
try:
args[j] = eval(a) if isinstance(a, str) else a # eval strings
except:
args[j] = a
n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain
if m in {
Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv,
BottleneckCSP, C3, C3TR, C3SPP, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x}:
c1, c2 = ch[f], args[0]
if c2 != no: # if not output
c2 = make_divisible(c2 * gw, 8)
args = [c1, c2, *args[1:]]
if m in {BottleneckCSP, C3, C3TR, C3Ghost, C3x}:
args.insert(2, n) # number of repeats
n = 1
elif m is nn.BatchNorm2d:
args = [ch[f]]
elif m is Concat:
c2 = sum(ch[x] for x in f)
# TODO: channel, gw, gd
elif m in {Detect, Segment}:
args.append([ch[x] for x in f])
if isinstance(args[1], int): # number of anchors
args[1] = [list(range(args[1] * 2))] * len(f)
if m is Segment:
args[3] = make_divisible(args[3] * gw, 8)
elif m is Contract:
c2 = ch[f] * args[0] ** 2
elif m is Expand:
c2 = ch[f] // args[0] ** 2
elif isinstance(m, str):
t = m
m = timm.create_model(m, pretrained=args[0], features_only=True)
c2 = m.feature_info.channels()
# elif m in {}:
# m = m(*args)
# c2 = m.channel
else:
c2 = ch[f]
if isinstance(c2, list):
is_backbone = True
m_ = m
m_.backbone = True
else:
m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module
t = str(m)[8:-2].replace('__main__.', '') # module type
np = sum(x.numel() for x in m_.parameters()) # number params
m_.i, m_.f, m_.type, m_.np = i + 4 if is_backbone else i, f, t, np # attach index, 'from' index, type, number params
LOGGER.info(f'{i:>3}{str(f):>18}{n_:>3}{np:10.0f} {t:<40}{str(args):<30}') # print
save.extend(x % (i + 4 if is_backbone else i) for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
layers.append(m_)
if i == 0:
ch = []
if isinstance(c2, list):
ch.extend(c2)
for _ in range(5 - len(ch)):
ch.insert(0, 0)
else:
ch.append(c2)
return nn.Sequential(*layers), sorted(save) | null |
166,029 |
def _forward_once(self, x, profile=False, visualize=False):
y, dt = [], [] # outputs
for m in self.model:
if m.f != -1: # if not from previous layer
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
if profile:
self._profile_one_layer(m, x, dt)
if hasattr(m, 'backbone'):
x = m(x)
for _ in range(5 - len(x)):
x.insert(0, None)
for i_idx, i in enumerate(x):
if i_idx in self.save:
y.append(i)
else:
y.append(None)
x = x[-1]
else:
x = m(x) # run
y.append(x if m.i in self.save else None) # save output
if visualize:
feature_visualization(x, m.type, m.i, save_dir=visualize)
return x | null |
166,030 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
import itertools
from timm.models.layers import SqueezeExcite
import numpy as np
import itertools
def replace_batchnorm(net):
class EfficientViT(torch.nn.Module):
def __init__(self, img_size=400,
patch_size=16,
frozen_stages=0,
in_chans=3,
stages=['s', 's', 's'],
embed_dim=[64, 128, 192],
key_dim=[16, 16, 16],
depth=[1, 2, 3],
num_heads=[4, 4, 4],
window_size=[7, 7, 7],
kernels=[5, 5, 5, 5],
down_ops=[['subsample', 2], ['subsample', 2], ['']],
pretrained=None,
distillation=False,):
def forward(self, x):
EfficientViT_m0 = {
'img_size': 224,
'patch_size': 16,
'embed_dim': [64, 128, 192],
'depth': [1, 2, 3],
'num_heads': [4, 4, 4],
'window_size': [7, 7, 7],
'kernels': [7, 5, 3, 3],
}
def update_weight(model_dict, weight_dict):
def EfficientViT_M0(pretrained='', frozen_stages=0, distillation=False, fuse=False, pretrained_cfg=None, model_cfg=EfficientViT_m0):
model = EfficientViT(frozen_stages=frozen_stages, distillation=distillation, pretrained=pretrained, **model_cfg)
if pretrained:
model.load_state_dict(update_weight(model.state_dict(), torch.load(pretrained)['model']))
if fuse:
replace_batchnorm(model)
return model | null |
166,031 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
import itertools
from timm.models.layers import SqueezeExcite
import numpy as np
import itertools
def replace_batchnorm(net):
for child_name, child in net.named_children():
if hasattr(child, 'fuse'):
setattr(net, child_name, child.fuse())
elif isinstance(child, torch.nn.BatchNorm2d):
setattr(net, child_name, torch.nn.Identity())
else:
replace_batchnorm(child)
class EfficientViT(torch.nn.Module):
def __init__(self, img_size=400,
patch_size=16,
frozen_stages=0,
in_chans=3,
stages=['s', 's', 's'],
embed_dim=[64, 128, 192],
key_dim=[16, 16, 16],
depth=[1, 2, 3],
num_heads=[4, 4, 4],
window_size=[7, 7, 7],
kernels=[5, 5, 5, 5],
down_ops=[['subsample', 2], ['subsample', 2], ['']],
pretrained=None,
distillation=False,):
super().__init__()
resolution = img_size
self.patch_embed = torch.nn.Sequential(Conv2d_BN(in_chans, embed_dim[0] // 8, 3, 2, 1, resolution=resolution), torch.nn.ReLU(),
Conv2d_BN(embed_dim[0] // 8, embed_dim[0] // 4, 3, 2, 1, resolution=resolution // 2), torch.nn.ReLU(),
Conv2d_BN(embed_dim[0] // 4, embed_dim[0] // 2, 3, 2, 1, resolution=resolution // 4), torch.nn.ReLU(),
Conv2d_BN(embed_dim[0] // 2, embed_dim[0], 3, 1, 1, resolution=resolution // 8))
resolution = img_size // patch_size
attn_ratio = [embed_dim[i] / (key_dim[i] * num_heads[i]) for i in range(len(embed_dim))]
self.blocks1 = []
self.blocks2 = []
self.blocks3 = []
for i, (stg, ed, kd, dpth, nh, ar, wd, do) in enumerate(
zip(stages, embed_dim, key_dim, depth, num_heads, attn_ratio, window_size, down_ops)):
for d in range(dpth):
eval('self.blocks' + str(i+1)).append(EfficientViTBlock(stg, ed, kd, nh, ar, resolution, wd, kernels))
if do[0] == 'subsample':
#('Subsample' stride)
blk = eval('self.blocks' + str(i+2))
resolution_ = (resolution - 1) // do[1] + 1
blk.append(torch.nn.Sequential(Residual(Conv2d_BN(embed_dim[i], embed_dim[i], 3, 1, 1, groups=embed_dim[i], resolution=resolution)),
Residual(FFN(embed_dim[i], int(embed_dim[i] * 2), resolution)),))
blk.append(PatchMerging(*embed_dim[i:i + 2], resolution))
resolution = resolution_
blk.append(torch.nn.Sequential(Residual(Conv2d_BN(embed_dim[i + 1], embed_dim[i + 1], 3, 1, 1, groups=embed_dim[i + 1], resolution=resolution)),
Residual(FFN(embed_dim[i + 1], int(embed_dim[i + 1] * 2), resolution)),))
self.blocks1 = torch.nn.Sequential(*self.blocks1)
self.blocks2 = torch.nn.Sequential(*self.blocks2)
self.blocks3 = torch.nn.Sequential(*self.blocks3)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
def forward(self, x):
outs = []
x = self.patch_embed(x)
x = self.blocks1(x)
outs.append(x)
x = self.blocks2(x)
outs.append(x)
x = self.blocks3(x)
outs.append(x)
return outs
EfficientViT_m1 = {
'img_size': 224,
'patch_size': 16,
'embed_dim': [128, 144, 192],
'depth': [1, 2, 3],
'num_heads': [2, 3, 3],
'window_size': [7, 7, 7],
'kernels': [7, 5, 3, 3],
}
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
# k = k[9:]
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
def EfficientViT_M1(pretrained='', frozen_stages=0, distillation=False, fuse=False, pretrained_cfg=None, model_cfg=EfficientViT_m1):
model = EfficientViT(frozen_stages=frozen_stages, distillation=distillation, pretrained=pretrained, **model_cfg)
if pretrained:
model.load_state_dict(update_weight(model.state_dict(), torch.load(pretrained)['model']))
if fuse:
replace_batchnorm(model)
return model | null |
166,032 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
import itertools
from timm.models.layers import SqueezeExcite
import numpy as np
import itertools
def replace_batchnorm(net):
for child_name, child in net.named_children():
if hasattr(child, 'fuse'):
setattr(net, child_name, child.fuse())
elif isinstance(child, torch.nn.BatchNorm2d):
setattr(net, child_name, torch.nn.Identity())
else:
replace_batchnorm(child)
class EfficientViT(torch.nn.Module):
def __init__(self, img_size=400,
patch_size=16,
frozen_stages=0,
in_chans=3,
stages=['s', 's', 's'],
embed_dim=[64, 128, 192],
key_dim=[16, 16, 16],
depth=[1, 2, 3],
num_heads=[4, 4, 4],
window_size=[7, 7, 7],
kernels=[5, 5, 5, 5],
down_ops=[['subsample', 2], ['subsample', 2], ['']],
pretrained=None,
distillation=False,):
super().__init__()
resolution = img_size
self.patch_embed = torch.nn.Sequential(Conv2d_BN(in_chans, embed_dim[0] // 8, 3, 2, 1, resolution=resolution), torch.nn.ReLU(),
Conv2d_BN(embed_dim[0] // 8, embed_dim[0] // 4, 3, 2, 1, resolution=resolution // 2), torch.nn.ReLU(),
Conv2d_BN(embed_dim[0] // 4, embed_dim[0] // 2, 3, 2, 1, resolution=resolution // 4), torch.nn.ReLU(),
Conv2d_BN(embed_dim[0] // 2, embed_dim[0], 3, 1, 1, resolution=resolution // 8))
resolution = img_size // patch_size
attn_ratio = [embed_dim[i] / (key_dim[i] * num_heads[i]) for i in range(len(embed_dim))]
self.blocks1 = []
self.blocks2 = []
self.blocks3 = []
for i, (stg, ed, kd, dpth, nh, ar, wd, do) in enumerate(
zip(stages, embed_dim, key_dim, depth, num_heads, attn_ratio, window_size, down_ops)):
for d in range(dpth):
eval('self.blocks' + str(i+1)).append(EfficientViTBlock(stg, ed, kd, nh, ar, resolution, wd, kernels))
if do[0] == 'subsample':
#('Subsample' stride)
blk = eval('self.blocks' + str(i+2))
resolution_ = (resolution - 1) // do[1] + 1
blk.append(torch.nn.Sequential(Residual(Conv2d_BN(embed_dim[i], embed_dim[i], 3, 1, 1, groups=embed_dim[i], resolution=resolution)),
Residual(FFN(embed_dim[i], int(embed_dim[i] * 2), resolution)),))
blk.append(PatchMerging(*embed_dim[i:i + 2], resolution))
resolution = resolution_
blk.append(torch.nn.Sequential(Residual(Conv2d_BN(embed_dim[i + 1], embed_dim[i + 1], 3, 1, 1, groups=embed_dim[i + 1], resolution=resolution)),
Residual(FFN(embed_dim[i + 1], int(embed_dim[i + 1] * 2), resolution)),))
self.blocks1 = torch.nn.Sequential(*self.blocks1)
self.blocks2 = torch.nn.Sequential(*self.blocks2)
self.blocks3 = torch.nn.Sequential(*self.blocks3)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
def forward(self, x):
outs = []
x = self.patch_embed(x)
x = self.blocks1(x)
outs.append(x)
x = self.blocks2(x)
outs.append(x)
x = self.blocks3(x)
outs.append(x)
return outs
EfficientViT_m2 = {
'img_size': 224,
'patch_size': 16,
'embed_dim': [128, 192, 224],
'depth': [1, 2, 3],
'num_heads': [4, 3, 2],
'window_size': [7, 7, 7],
'kernels': [7, 5, 3, 3],
}
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
# k = k[9:]
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
def EfficientViT_M2(pretrained='', frozen_stages=0, distillation=False, fuse=False, pretrained_cfg=None, model_cfg=EfficientViT_m2):
model = EfficientViT(frozen_stages=frozen_stages, distillation=distillation, pretrained=pretrained, **model_cfg)
if pretrained:
model.load_state_dict(update_weight(model.state_dict(), torch.load(pretrained)['model']))
if fuse:
replace_batchnorm(model)
return model | null |
166,033 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
import itertools
from timm.models.layers import SqueezeExcite
import numpy as np
import itertools
def replace_batchnorm(net):
class EfficientViT(torch.nn.Module):
def __init__(self, img_size=400,
patch_size=16,
frozen_stages=0,
in_chans=3,
stages=['s', 's', 's'],
embed_dim=[64, 128, 192],
key_dim=[16, 16, 16],
depth=[1, 2, 3],
num_heads=[4, 4, 4],
window_size=[7, 7, 7],
kernels=[5, 5, 5, 5],
down_ops=[['subsample', 2], ['subsample', 2], ['']],
pretrained=None,
distillation=False,):
def forward(self, x):
EfficientViT_m3 = {
'img_size': 224,
'patch_size': 16,
'embed_dim': [128, 240, 320],
'depth': [1, 2, 3],
'num_heads': [4, 3, 4],
'window_size': [7, 7, 7],
'kernels': [5, 5, 5, 5],
}
def update_weight(model_dict, weight_dict):
def EfficientViT_M3(pretrained='', frozen_stages=0, distillation=False, fuse=False, pretrained_cfg=None, model_cfg=EfficientViT_m3):
model = EfficientViT(frozen_stages=frozen_stages, distillation=distillation, pretrained=pretrained, **model_cfg)
if pretrained:
model.load_state_dict(update_weight(model.state_dict(), torch.load(pretrained)['model']))
if fuse:
replace_batchnorm(model)
return model | null |
166,034 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
import itertools
from timm.models.layers import SqueezeExcite
import numpy as np
import itertools
def replace_batchnorm(net):
for child_name, child in net.named_children():
if hasattr(child, 'fuse'):
setattr(net, child_name, child.fuse())
elif isinstance(child, torch.nn.BatchNorm2d):
setattr(net, child_name, torch.nn.Identity())
else:
replace_batchnorm(child)
class EfficientViT(torch.nn.Module):
def __init__(self, img_size=400,
patch_size=16,
frozen_stages=0,
in_chans=3,
stages=['s', 's', 's'],
embed_dim=[64, 128, 192],
key_dim=[16, 16, 16],
depth=[1, 2, 3],
num_heads=[4, 4, 4],
window_size=[7, 7, 7],
kernels=[5, 5, 5, 5],
down_ops=[['subsample', 2], ['subsample', 2], ['']],
pretrained=None,
distillation=False,):
super().__init__()
resolution = img_size
self.patch_embed = torch.nn.Sequential(Conv2d_BN(in_chans, embed_dim[0] // 8, 3, 2, 1, resolution=resolution), torch.nn.ReLU(),
Conv2d_BN(embed_dim[0] // 8, embed_dim[0] // 4, 3, 2, 1, resolution=resolution // 2), torch.nn.ReLU(),
Conv2d_BN(embed_dim[0] // 4, embed_dim[0] // 2, 3, 2, 1, resolution=resolution // 4), torch.nn.ReLU(),
Conv2d_BN(embed_dim[0] // 2, embed_dim[0], 3, 1, 1, resolution=resolution // 8))
resolution = img_size // patch_size
attn_ratio = [embed_dim[i] / (key_dim[i] * num_heads[i]) for i in range(len(embed_dim))]
self.blocks1 = []
self.blocks2 = []
self.blocks3 = []
for i, (stg, ed, kd, dpth, nh, ar, wd, do) in enumerate(
zip(stages, embed_dim, key_dim, depth, num_heads, attn_ratio, window_size, down_ops)):
for d in range(dpth):
eval('self.blocks' + str(i+1)).append(EfficientViTBlock(stg, ed, kd, nh, ar, resolution, wd, kernels))
if do[0] == 'subsample':
#('Subsample' stride)
blk = eval('self.blocks' + str(i+2))
resolution_ = (resolution - 1) // do[1] + 1
blk.append(torch.nn.Sequential(Residual(Conv2d_BN(embed_dim[i], embed_dim[i], 3, 1, 1, groups=embed_dim[i], resolution=resolution)),
Residual(FFN(embed_dim[i], int(embed_dim[i] * 2), resolution)),))
blk.append(PatchMerging(*embed_dim[i:i + 2], resolution))
resolution = resolution_
blk.append(torch.nn.Sequential(Residual(Conv2d_BN(embed_dim[i + 1], embed_dim[i + 1], 3, 1, 1, groups=embed_dim[i + 1], resolution=resolution)),
Residual(FFN(embed_dim[i + 1], int(embed_dim[i + 1] * 2), resolution)),))
self.blocks1 = torch.nn.Sequential(*self.blocks1)
self.blocks2 = torch.nn.Sequential(*self.blocks2)
self.blocks3 = torch.nn.Sequential(*self.blocks3)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
def forward(self, x):
outs = []
x = self.patch_embed(x)
x = self.blocks1(x)
outs.append(x)
x = self.blocks2(x)
outs.append(x)
x = self.blocks3(x)
outs.append(x)
return outs
EfficientViT_m4 = {
'img_size': 224,
'patch_size': 16,
'embed_dim': [128, 256, 384],
'depth': [1, 2, 3],
'num_heads': [4, 4, 4],
'window_size': [7, 7, 7],
'kernels': [7, 5, 3, 3],
}
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
# k = k[9:]
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
def EfficientViT_M4(pretrained='', frozen_stages=0, distillation=False, fuse=False, pretrained_cfg=None, model_cfg=EfficientViT_m4):
model = EfficientViT(frozen_stages=frozen_stages, distillation=distillation, pretrained=pretrained, **model_cfg)
if pretrained:
model.load_state_dict(update_weight(model.state_dict(), torch.load(pretrained)['model']))
if fuse:
replace_batchnorm(model)
return model | null |
166,035 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
import itertools
from timm.models.layers import SqueezeExcite
import numpy as np
import itertools
def replace_batchnorm(net):
for child_name, child in net.named_children():
if hasattr(child, 'fuse'):
setattr(net, child_name, child.fuse())
elif isinstance(child, torch.nn.BatchNorm2d):
setattr(net, child_name, torch.nn.Identity())
else:
replace_batchnorm(child)
class EfficientViT(torch.nn.Module):
def __init__(self, img_size=400,
patch_size=16,
frozen_stages=0,
in_chans=3,
stages=['s', 's', 's'],
embed_dim=[64, 128, 192],
key_dim=[16, 16, 16],
depth=[1, 2, 3],
num_heads=[4, 4, 4],
window_size=[7, 7, 7],
kernels=[5, 5, 5, 5],
down_ops=[['subsample', 2], ['subsample', 2], ['']],
pretrained=None,
distillation=False,):
super().__init__()
resolution = img_size
self.patch_embed = torch.nn.Sequential(Conv2d_BN(in_chans, embed_dim[0] // 8, 3, 2, 1, resolution=resolution), torch.nn.ReLU(),
Conv2d_BN(embed_dim[0] // 8, embed_dim[0] // 4, 3, 2, 1, resolution=resolution // 2), torch.nn.ReLU(),
Conv2d_BN(embed_dim[0] // 4, embed_dim[0] // 2, 3, 2, 1, resolution=resolution // 4), torch.nn.ReLU(),
Conv2d_BN(embed_dim[0] // 2, embed_dim[0], 3, 1, 1, resolution=resolution // 8))
resolution = img_size // patch_size
attn_ratio = [embed_dim[i] / (key_dim[i] * num_heads[i]) for i in range(len(embed_dim))]
self.blocks1 = []
self.blocks2 = []
self.blocks3 = []
for i, (stg, ed, kd, dpth, nh, ar, wd, do) in enumerate(
zip(stages, embed_dim, key_dim, depth, num_heads, attn_ratio, window_size, down_ops)):
for d in range(dpth):
eval('self.blocks' + str(i+1)).append(EfficientViTBlock(stg, ed, kd, nh, ar, resolution, wd, kernels))
if do[0] == 'subsample':
#('Subsample' stride)
blk = eval('self.blocks' + str(i+2))
resolution_ = (resolution - 1) // do[1] + 1
blk.append(torch.nn.Sequential(Residual(Conv2d_BN(embed_dim[i], embed_dim[i], 3, 1, 1, groups=embed_dim[i], resolution=resolution)),
Residual(FFN(embed_dim[i], int(embed_dim[i] * 2), resolution)),))
blk.append(PatchMerging(*embed_dim[i:i + 2], resolution))
resolution = resolution_
blk.append(torch.nn.Sequential(Residual(Conv2d_BN(embed_dim[i + 1], embed_dim[i + 1], 3, 1, 1, groups=embed_dim[i + 1], resolution=resolution)),
Residual(FFN(embed_dim[i + 1], int(embed_dim[i + 1] * 2), resolution)),))
self.blocks1 = torch.nn.Sequential(*self.blocks1)
self.blocks2 = torch.nn.Sequential(*self.blocks2)
self.blocks3 = torch.nn.Sequential(*self.blocks3)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
def forward(self, x):
outs = []
x = self.patch_embed(x)
x = self.blocks1(x)
outs.append(x)
x = self.blocks2(x)
outs.append(x)
x = self.blocks3(x)
outs.append(x)
return outs
EfficientViT_m5 = {
'img_size': 224,
'patch_size': 16,
'embed_dim': [192, 288, 384],
'depth': [1, 3, 4],
'num_heads': [3, 3, 4],
'window_size': [7, 7, 7],
'kernels': [7, 5, 3, 3],
}
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
# k = k[9:]
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
def EfficientViT_M5(pretrained='', frozen_stages=0, distillation=False, fuse=False, pretrained_cfg=None, model_cfg=EfficientViT_m5):
model = EfficientViT(frozen_stages=frozen_stages, distillation=distillation, pretrained=pretrained, **model_cfg)
if pretrained:
model.load_state_dict(update_weight(model.state_dict(), torch.load(pretrained)['model']))
if fuse:
replace_batchnorm(model)
return model | null |
166,036 | import torch
import torch.nn as nn
from torch.nn.modules.utils import _pair as to_2tuple
from timm.layers import DropPath, to_2tuple
from functools import partial
import numpy as np
class LSKNet(nn.Module):
def __init__(self, img_size=224, in_chans=3, embed_dims=[64, 128, 256, 512],
mlp_ratios=[8, 8, 4, 4], drop_rate=0., drop_path_rate=0., norm_layer=partial(nn.LayerNorm, eps=1e-6),
depths=[3, 4, 6, 3], num_stages=4,
norm_cfg=None):
super().__init__()
self.depths = depths
self.num_stages = num_stages
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
cur = 0
for i in range(num_stages):
patch_embed = OverlapPatchEmbed(img_size=img_size if i == 0 else img_size // (2 ** (i + 1)),
patch_size=7 if i == 0 else 3,
stride=4 if i == 0 else 2,
in_chans=in_chans if i == 0 else embed_dims[i - 1],
embed_dim=embed_dims[i], norm_cfg=norm_cfg)
block = nn.ModuleList([Block(
dim=embed_dims[i], mlp_ratio=mlp_ratios[i], drop=drop_rate, drop_path=dpr[cur + j],norm_cfg=norm_cfg)
for j in range(depths[i])])
norm = norm_layer(embed_dims[i])
cur += depths[i]
setattr(self, f"patch_embed{i + 1}", patch_embed)
setattr(self, f"block{i + 1}", block)
setattr(self, f"norm{i + 1}", norm)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
def forward(self, x):
B = x.shape[0]
outs = []
for i in range(self.num_stages):
patch_embed = getattr(self, f"patch_embed{i + 1}")
block = getattr(self, f"block{i + 1}")
norm = getattr(self, f"norm{i + 1}")
x, H, W = patch_embed(x)
for blk in block:
x = blk(x)
x = x.flatten(2).transpose(1, 2)
x = norm(x)
x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x)
return outs
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
def lsknet_t(weights=''):
model = LSKNet(embed_dims=[32, 64, 160, 256], depths=[3, 3, 5, 2], drop_rate=0.1, drop_path_rate=0.1)
if weights:
model.load_state_dict(update_weight(model.state_dict(), torch.load(weights)['state_dict']))
return model | null |
166,037 | import torch
import torch.nn as nn
from torch.nn.modules.utils import _pair as to_2tuple
from timm.layers import DropPath, to_2tuple
from functools import partial
import numpy as np
class LSKNet(nn.Module):
def __init__(self, img_size=224, in_chans=3, embed_dims=[64, 128, 256, 512],
mlp_ratios=[8, 8, 4, 4], drop_rate=0., drop_path_rate=0., norm_layer=partial(nn.LayerNorm, eps=1e-6),
depths=[3, 4, 6, 3], num_stages=4,
norm_cfg=None):
super().__init__()
self.depths = depths
self.num_stages = num_stages
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
cur = 0
for i in range(num_stages):
patch_embed = OverlapPatchEmbed(img_size=img_size if i == 0 else img_size // (2 ** (i + 1)),
patch_size=7 if i == 0 else 3,
stride=4 if i == 0 else 2,
in_chans=in_chans if i == 0 else embed_dims[i - 1],
embed_dim=embed_dims[i], norm_cfg=norm_cfg)
block = nn.ModuleList([Block(
dim=embed_dims[i], mlp_ratio=mlp_ratios[i], drop=drop_rate, drop_path=dpr[cur + j],norm_cfg=norm_cfg)
for j in range(depths[i])])
norm = norm_layer(embed_dims[i])
cur += depths[i]
setattr(self, f"patch_embed{i + 1}", patch_embed)
setattr(self, f"block{i + 1}", block)
setattr(self, f"norm{i + 1}", norm)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
def forward(self, x):
B = x.shape[0]
outs = []
for i in range(self.num_stages):
patch_embed = getattr(self, f"patch_embed{i + 1}")
block = getattr(self, f"block{i + 1}")
norm = getattr(self, f"norm{i + 1}")
x, H, W = patch_embed(x)
for blk in block:
x = blk(x)
x = x.flatten(2).transpose(1, 2)
x = norm(x)
x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
outs.append(x)
return outs
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
def lsknet_s(weights=''):
model = LSKNet(embed_dims=[64, 128, 256, 512], depths=[2, 2, 4, 2], drop_rate=0.1, drop_path_rate=0.1)
if weights:
model.load_state_dict(update_weight(model.state_dict(), torch.load(weights)['state_dict']))
return model | null |
166,038 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
class FocalNet(nn.Module):
r""" Focal Modulation Networks (FocalNets)
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Focal Transformer layer.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
drop_rate (float): Dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
focal_levels (list): How many focal levels at all stages. Note that this excludes the finest-grain level. Default: [1, 1, 1, 1]
focal_windows (list): The focal window size at all stages. Default: [7, 5, 3, 1]
use_conv_embed (bool): Whether use convolutional embedding. We noted that using convolutional embedding usually improve the performance, but we do not use it by default. Default: False
use_layerscale (bool): Whether use layerscale proposed in CaiT. Default: False
layerscale_value (float): Value for layer scale. Default: 1e-4
use_postln (bool): Whether use layernorm after modulation (it helps stablize training of large models)
"""
def __init__(self,
img_size=224,
patch_size=4,
in_chans=3,
num_classes=1000,
embed_dim=96,
depths=[2, 2, 6, 2],
mlp_ratio=4.,
drop_rate=0.,
drop_path_rate=0.1,
norm_layer=nn.LayerNorm,
patch_norm=True,
use_checkpoint=False,
focal_levels=[2, 2, 2, 2],
focal_windows=[3, 3, 3, 3],
use_conv_embed=False,
use_layerscale=False,
layerscale_value=1e-4,
use_postln=False,
use_postln_in_modulation=False,
normalize_modulator=False,
**kwargs):
super().__init__()
self.num_layers = len(depths)
embed_dim = [embed_dim * (2 ** i) for i in range(self.num_layers)]
self.num_classes = num_classes
self.embed_dim = embed_dim
self.patch_norm = patch_norm
self.num_features = embed_dim[-1]
self.mlp_ratio = mlp_ratio
# split image into patches using either non-overlapped embedding or overlapped embedding
self.patch_embed = PatchEmbed(
img_size=to_2tuple(img_size),
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim[0],
use_conv_embed=use_conv_embed,
norm_layer=norm_layer if self.patch_norm else None,
is_stem=True)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=embed_dim[i_layer],
out_dim=embed_dim[i_layer+1] if (i_layer < self.num_layers - 1) else None,
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
mlp_ratio=self.mlp_ratio,
drop=drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchEmbed if (i_layer < self.num_layers - 1) else None,
focal_level=focal_levels[i_layer],
focal_window=focal_windows[i_layer],
use_conv_embed=use_conv_embed,
use_checkpoint=use_checkpoint,
use_layerscale=use_layerscale,
layerscale_value=layerscale_value,
use_postln=use_postln,
use_postln_in_modulation=use_postln_in_modulation,
normalize_modulator=normalize_modulator
)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
self.apply(self._init_weights)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def no_weight_decay(self):
return {''}
def no_weight_decay_keywords(self):
return {''}
def forward(self, x):
input_size = x.size(2)
scale = [4, 8, 16, 32]
x, H, W = self.patch_embed(x)
x = self.pos_drop(x)
features = [x, None, None, None]
for layer in self.layers:
x, H, W = layer(x, H, W)
if input_size // H in scale:
features[scale.index(input_size // H)] = x
# features[-1] = self.norm(features[-1]) # B L C
for i in range(len(features)):
features[i] = torch.transpose(features[i], dim0=2, dim1=1).view(-1,features[i].size(2), int(features[i].size(1) ** 0.5), int(features[i].size(1) ** 0.5))
return features
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
model_urls = {
"focalnet_tiny_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet_tiny_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet_small_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet_small_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet_base_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet_base_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet_large_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet_large_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet_xlarge_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet_xlarge_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
"focalnet_huge_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_huge_lrf_224.pth",
"focalnet_huge_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_huge_lrf_224_fl4.pth",
}
def focalnet_tiny_srf(pretrained=False, **kwargs):
model = FocalNet(depths=[2, 2, 6, 2], embed_dim=96, **kwargs)
if pretrained:
url = model_urls['focalnet_tiny_srf']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu", check_hash=True)
model.load_state_dict(update_weight(model.state_dict(), checkpoint["model"]))
return model | null |
166,039 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
class FocalNet(nn.Module):
r""" Focal Modulation Networks (FocalNets)
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Focal Transformer layer.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
drop_rate (float): Dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
focal_levels (list): How many focal levels at all stages. Note that this excludes the finest-grain level. Default: [1, 1, 1, 1]
focal_windows (list): The focal window size at all stages. Default: [7, 5, 3, 1]
use_conv_embed (bool): Whether use convolutional embedding. We noted that using convolutional embedding usually improve the performance, but we do not use it by default. Default: False
use_layerscale (bool): Whether use layerscale proposed in CaiT. Default: False
layerscale_value (float): Value for layer scale. Default: 1e-4
use_postln (bool): Whether use layernorm after modulation (it helps stablize training of large models)
"""
def __init__(self,
img_size=224,
patch_size=4,
in_chans=3,
num_classes=1000,
embed_dim=96,
depths=[2, 2, 6, 2],
mlp_ratio=4.,
drop_rate=0.,
drop_path_rate=0.1,
norm_layer=nn.LayerNorm,
patch_norm=True,
use_checkpoint=False,
focal_levels=[2, 2, 2, 2],
focal_windows=[3, 3, 3, 3],
use_conv_embed=False,
use_layerscale=False,
layerscale_value=1e-4,
use_postln=False,
use_postln_in_modulation=False,
normalize_modulator=False,
**kwargs):
super().__init__()
self.num_layers = len(depths)
embed_dim = [embed_dim * (2 ** i) for i in range(self.num_layers)]
self.num_classes = num_classes
self.embed_dim = embed_dim
self.patch_norm = patch_norm
self.num_features = embed_dim[-1]
self.mlp_ratio = mlp_ratio
# split image into patches using either non-overlapped embedding or overlapped embedding
self.patch_embed = PatchEmbed(
img_size=to_2tuple(img_size),
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim[0],
use_conv_embed=use_conv_embed,
norm_layer=norm_layer if self.patch_norm else None,
is_stem=True)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=embed_dim[i_layer],
out_dim=embed_dim[i_layer+1] if (i_layer < self.num_layers - 1) else None,
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
mlp_ratio=self.mlp_ratio,
drop=drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchEmbed if (i_layer < self.num_layers - 1) else None,
focal_level=focal_levels[i_layer],
focal_window=focal_windows[i_layer],
use_conv_embed=use_conv_embed,
use_checkpoint=use_checkpoint,
use_layerscale=use_layerscale,
layerscale_value=layerscale_value,
use_postln=use_postln,
use_postln_in_modulation=use_postln_in_modulation,
normalize_modulator=normalize_modulator
)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
self.apply(self._init_weights)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def no_weight_decay(self):
return {''}
def no_weight_decay_keywords(self):
return {''}
def forward(self, x):
input_size = x.size(2)
scale = [4, 8, 16, 32]
x, H, W = self.patch_embed(x)
x = self.pos_drop(x)
features = [x, None, None, None]
for layer in self.layers:
x, H, W = layer(x, H, W)
if input_size // H in scale:
features[scale.index(input_size // H)] = x
# features[-1] = self.norm(features[-1]) # B L C
for i in range(len(features)):
features[i] = torch.transpose(features[i], dim0=2, dim1=1).view(-1,features[i].size(2), int(features[i].size(1) ** 0.5), int(features[i].size(1) ** 0.5))
return features
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
model_urls = {
"focalnet_tiny_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet_tiny_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet_small_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet_small_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet_base_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet_base_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet_large_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet_large_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet_xlarge_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet_xlarge_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
"focalnet_huge_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_huge_lrf_224.pth",
"focalnet_huge_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_huge_lrf_224_fl4.pth",
}
def focalnet_small_srf(pretrained=False, **kwargs):
model = FocalNet(depths=[2, 2, 18, 2], embed_dim=96, **kwargs)
if pretrained:
url = model_urls['focalnet_small_srf']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
model.load_state_dict(update_weight(model.state_dict(), checkpoint["model"]))
return model | null |
166,040 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
def update_weight(model_dict, weight_dict):
class FocalNet(nn.Module):
def __init__(self,
img_size=224,
patch_size=4,
in_chans=3,
num_classes=1000,
embed_dim=96,
depths=[2, 2, 6, 2],
mlp_ratio=4.,
drop_rate=0.,
drop_path_rate=0.1,
norm_layer=nn.LayerNorm,
patch_norm=True,
use_checkpoint=False,
focal_levels=[2, 2, 2, 2],
focal_windows=[3, 3, 3, 3],
use_conv_embed=False,
use_layerscale=False,
layerscale_value=1e-4,
use_postln=False,
use_postln_in_modulation=False,
normalize_modulator=False,
**kwargs):
def _init_weights(self, m):
def no_weight_decay(self):
def no_weight_decay_keywords(self):
def forward(self, x):
def flops(self):
model_urls = {
"focalnet_tiny_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet_tiny_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet_small_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet_small_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet_base_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet_base_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet_large_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet_large_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet_xlarge_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet_xlarge_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
"focalnet_huge_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_huge_lrf_224.pth",
"focalnet_huge_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_huge_lrf_224_fl4.pth",
}
def focalnet_base_srf(pretrained=False, **kwargs):
model = FocalNet(depths=[2, 2, 18, 2], embed_dim=128, **kwargs)
if pretrained:
url = model_urls['focalnet_base_srf']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
model.load_state_dict(update_weight(model.state_dict(), checkpoint["model"]))
return model | null |
166,041 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
class FocalNet(nn.Module):
r""" Focal Modulation Networks (FocalNets)
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Focal Transformer layer.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
drop_rate (float): Dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
focal_levels (list): How many focal levels at all stages. Note that this excludes the finest-grain level. Default: [1, 1, 1, 1]
focal_windows (list): The focal window size at all stages. Default: [7, 5, 3, 1]
use_conv_embed (bool): Whether use convolutional embedding. We noted that using convolutional embedding usually improve the performance, but we do not use it by default. Default: False
use_layerscale (bool): Whether use layerscale proposed in CaiT. Default: False
layerscale_value (float): Value for layer scale. Default: 1e-4
use_postln (bool): Whether use layernorm after modulation (it helps stablize training of large models)
"""
def __init__(self,
img_size=224,
patch_size=4,
in_chans=3,
num_classes=1000,
embed_dim=96,
depths=[2, 2, 6, 2],
mlp_ratio=4.,
drop_rate=0.,
drop_path_rate=0.1,
norm_layer=nn.LayerNorm,
patch_norm=True,
use_checkpoint=False,
focal_levels=[2, 2, 2, 2],
focal_windows=[3, 3, 3, 3],
use_conv_embed=False,
use_layerscale=False,
layerscale_value=1e-4,
use_postln=False,
use_postln_in_modulation=False,
normalize_modulator=False,
**kwargs):
super().__init__()
self.num_layers = len(depths)
embed_dim = [embed_dim * (2 ** i) for i in range(self.num_layers)]
self.num_classes = num_classes
self.embed_dim = embed_dim
self.patch_norm = patch_norm
self.num_features = embed_dim[-1]
self.mlp_ratio = mlp_ratio
# split image into patches using either non-overlapped embedding or overlapped embedding
self.patch_embed = PatchEmbed(
img_size=to_2tuple(img_size),
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim[0],
use_conv_embed=use_conv_embed,
norm_layer=norm_layer if self.patch_norm else None,
is_stem=True)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=embed_dim[i_layer],
out_dim=embed_dim[i_layer+1] if (i_layer < self.num_layers - 1) else None,
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
mlp_ratio=self.mlp_ratio,
drop=drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchEmbed if (i_layer < self.num_layers - 1) else None,
focal_level=focal_levels[i_layer],
focal_window=focal_windows[i_layer],
use_conv_embed=use_conv_embed,
use_checkpoint=use_checkpoint,
use_layerscale=use_layerscale,
layerscale_value=layerscale_value,
use_postln=use_postln,
use_postln_in_modulation=use_postln_in_modulation,
normalize_modulator=normalize_modulator
)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
self.apply(self._init_weights)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def no_weight_decay(self):
return {''}
def no_weight_decay_keywords(self):
return {''}
def forward(self, x):
input_size = x.size(2)
scale = [4, 8, 16, 32]
x, H, W = self.patch_embed(x)
x = self.pos_drop(x)
features = [x, None, None, None]
for layer in self.layers:
x, H, W = layer(x, H, W)
if input_size // H in scale:
features[scale.index(input_size // H)] = x
# features[-1] = self.norm(features[-1]) # B L C
for i in range(len(features)):
features[i] = torch.transpose(features[i], dim0=2, dim1=1).view(-1,features[i].size(2), int(features[i].size(1) ** 0.5), int(features[i].size(1) ** 0.5))
return features
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
model_urls = {
"focalnet_tiny_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet_tiny_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet_small_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet_small_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet_base_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet_base_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet_large_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet_large_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet_xlarge_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet_xlarge_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
"focalnet_huge_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_huge_lrf_224.pth",
"focalnet_huge_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_huge_lrf_224_fl4.pth",
}
def focalnet_tiny_lrf(pretrained=False, **kwargs):
model = FocalNet(depths=[2, 2, 6, 2], embed_dim=96, **kwargs)
if pretrained:
url = model_urls['focalnet_tiny_lrf']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu", check_hash=True)
model.load_state_dict(update_weight(model.state_dict(), checkpoint["model"]))
return model | null |
166,042 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
class FocalNet(nn.Module):
r""" Focal Modulation Networks (FocalNets)
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Focal Transformer layer.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
drop_rate (float): Dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
focal_levels (list): How many focal levels at all stages. Note that this excludes the finest-grain level. Default: [1, 1, 1, 1]
focal_windows (list): The focal window size at all stages. Default: [7, 5, 3, 1]
use_conv_embed (bool): Whether use convolutional embedding. We noted that using convolutional embedding usually improve the performance, but we do not use it by default. Default: False
use_layerscale (bool): Whether use layerscale proposed in CaiT. Default: False
layerscale_value (float): Value for layer scale. Default: 1e-4
use_postln (bool): Whether use layernorm after modulation (it helps stablize training of large models)
"""
def __init__(self,
img_size=224,
patch_size=4,
in_chans=3,
num_classes=1000,
embed_dim=96,
depths=[2, 2, 6, 2],
mlp_ratio=4.,
drop_rate=0.,
drop_path_rate=0.1,
norm_layer=nn.LayerNorm,
patch_norm=True,
use_checkpoint=False,
focal_levels=[2, 2, 2, 2],
focal_windows=[3, 3, 3, 3],
use_conv_embed=False,
use_layerscale=False,
layerscale_value=1e-4,
use_postln=False,
use_postln_in_modulation=False,
normalize_modulator=False,
**kwargs):
super().__init__()
self.num_layers = len(depths)
embed_dim = [embed_dim * (2 ** i) for i in range(self.num_layers)]
self.num_classes = num_classes
self.embed_dim = embed_dim
self.patch_norm = patch_norm
self.num_features = embed_dim[-1]
self.mlp_ratio = mlp_ratio
# split image into patches using either non-overlapped embedding or overlapped embedding
self.patch_embed = PatchEmbed(
img_size=to_2tuple(img_size),
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim[0],
use_conv_embed=use_conv_embed,
norm_layer=norm_layer if self.patch_norm else None,
is_stem=True)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=embed_dim[i_layer],
out_dim=embed_dim[i_layer+1] if (i_layer < self.num_layers - 1) else None,
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
mlp_ratio=self.mlp_ratio,
drop=drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchEmbed if (i_layer < self.num_layers - 1) else None,
focal_level=focal_levels[i_layer],
focal_window=focal_windows[i_layer],
use_conv_embed=use_conv_embed,
use_checkpoint=use_checkpoint,
use_layerscale=use_layerscale,
layerscale_value=layerscale_value,
use_postln=use_postln,
use_postln_in_modulation=use_postln_in_modulation,
normalize_modulator=normalize_modulator
)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
self.apply(self._init_weights)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def no_weight_decay(self):
return {''}
def no_weight_decay_keywords(self):
return {''}
def forward(self, x):
input_size = x.size(2)
scale = [4, 8, 16, 32]
x, H, W = self.patch_embed(x)
x = self.pos_drop(x)
features = [x, None, None, None]
for layer in self.layers:
x, H, W = layer(x, H, W)
if input_size // H in scale:
features[scale.index(input_size // H)] = x
# features[-1] = self.norm(features[-1]) # B L C
for i in range(len(features)):
features[i] = torch.transpose(features[i], dim0=2, dim1=1).view(-1,features[i].size(2), int(features[i].size(1) ** 0.5), int(features[i].size(1) ** 0.5))
return features
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
model_urls = {
"focalnet_tiny_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet_tiny_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet_small_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet_small_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet_base_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet_base_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet_large_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet_large_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet_xlarge_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet_xlarge_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
"focalnet_huge_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_huge_lrf_224.pth",
"focalnet_huge_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_huge_lrf_224_fl4.pth",
}
def focalnet_small_lrf(pretrained=False, **kwargs):
model = FocalNet(depths=[2, 2, 18, 2], embed_dim=96, **kwargs)
if pretrained:
url = model_urls['focalnet_small_lrf']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
model.load_state_dict(update_weight(model.state_dict(), checkpoint["model"]))
return model | null |
166,043 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
class FocalNet(nn.Module):
r""" Focal Modulation Networks (FocalNets)
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Focal Transformer layer.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
drop_rate (float): Dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
focal_levels (list): How many focal levels at all stages. Note that this excludes the finest-grain level. Default: [1, 1, 1, 1]
focal_windows (list): The focal window size at all stages. Default: [7, 5, 3, 1]
use_conv_embed (bool): Whether use convolutional embedding. We noted that using convolutional embedding usually improve the performance, but we do not use it by default. Default: False
use_layerscale (bool): Whether use layerscale proposed in CaiT. Default: False
layerscale_value (float): Value for layer scale. Default: 1e-4
use_postln (bool): Whether use layernorm after modulation (it helps stablize training of large models)
"""
def __init__(self,
img_size=224,
patch_size=4,
in_chans=3,
num_classes=1000,
embed_dim=96,
depths=[2, 2, 6, 2],
mlp_ratio=4.,
drop_rate=0.,
drop_path_rate=0.1,
norm_layer=nn.LayerNorm,
patch_norm=True,
use_checkpoint=False,
focal_levels=[2, 2, 2, 2],
focal_windows=[3, 3, 3, 3],
use_conv_embed=False,
use_layerscale=False,
layerscale_value=1e-4,
use_postln=False,
use_postln_in_modulation=False,
normalize_modulator=False,
**kwargs):
super().__init__()
self.num_layers = len(depths)
embed_dim = [embed_dim * (2 ** i) for i in range(self.num_layers)]
self.num_classes = num_classes
self.embed_dim = embed_dim
self.patch_norm = patch_norm
self.num_features = embed_dim[-1]
self.mlp_ratio = mlp_ratio
# split image into patches using either non-overlapped embedding or overlapped embedding
self.patch_embed = PatchEmbed(
img_size=to_2tuple(img_size),
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim[0],
use_conv_embed=use_conv_embed,
norm_layer=norm_layer if self.patch_norm else None,
is_stem=True)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=embed_dim[i_layer],
out_dim=embed_dim[i_layer+1] if (i_layer < self.num_layers - 1) else None,
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
mlp_ratio=self.mlp_ratio,
drop=drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchEmbed if (i_layer < self.num_layers - 1) else None,
focal_level=focal_levels[i_layer],
focal_window=focal_windows[i_layer],
use_conv_embed=use_conv_embed,
use_checkpoint=use_checkpoint,
use_layerscale=use_layerscale,
layerscale_value=layerscale_value,
use_postln=use_postln,
use_postln_in_modulation=use_postln_in_modulation,
normalize_modulator=normalize_modulator
)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
self.apply(self._init_weights)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def no_weight_decay(self):
return {''}
def no_weight_decay_keywords(self):
return {''}
def forward(self, x):
input_size = x.size(2)
scale = [4, 8, 16, 32]
x, H, W = self.patch_embed(x)
x = self.pos_drop(x)
features = [x, None, None, None]
for layer in self.layers:
x, H, W = layer(x, H, W)
if input_size // H in scale:
features[scale.index(input_size // H)] = x
# features[-1] = self.norm(features[-1]) # B L C
for i in range(len(features)):
features[i] = torch.transpose(features[i], dim0=2, dim1=1).view(-1,features[i].size(2), int(features[i].size(1) ** 0.5), int(features[i].size(1) ** 0.5))
return features
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
model_urls = {
"focalnet_tiny_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet_tiny_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet_small_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet_small_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet_base_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet_base_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet_large_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet_large_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet_xlarge_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet_xlarge_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
"focalnet_huge_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_huge_lrf_224.pth",
"focalnet_huge_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_huge_lrf_224_fl4.pth",
}
def focalnet_base_lrf(pretrained=False, **kwargs):
model = FocalNet(depths=[2, 2, 18, 2], embed_dim=128, **kwargs)
if pretrained:
url = model_urls['focalnet_base_lrf']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
model.load_state_dict(update_weight(model.state_dict(), checkpoint["model"]))
return model | null |
166,044 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
class FocalNet(nn.Module):
r""" Focal Modulation Networks (FocalNets)
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Focal Transformer layer.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
drop_rate (float): Dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
focal_levels (list): How many focal levels at all stages. Note that this excludes the finest-grain level. Default: [1, 1, 1, 1]
focal_windows (list): The focal window size at all stages. Default: [7, 5, 3, 1]
use_conv_embed (bool): Whether use convolutional embedding. We noted that using convolutional embedding usually improve the performance, but we do not use it by default. Default: False
use_layerscale (bool): Whether use layerscale proposed in CaiT. Default: False
layerscale_value (float): Value for layer scale. Default: 1e-4
use_postln (bool): Whether use layernorm after modulation (it helps stablize training of large models)
"""
def __init__(self,
img_size=224,
patch_size=4,
in_chans=3,
num_classes=1000,
embed_dim=96,
depths=[2, 2, 6, 2],
mlp_ratio=4.,
drop_rate=0.,
drop_path_rate=0.1,
norm_layer=nn.LayerNorm,
patch_norm=True,
use_checkpoint=False,
focal_levels=[2, 2, 2, 2],
focal_windows=[3, 3, 3, 3],
use_conv_embed=False,
use_layerscale=False,
layerscale_value=1e-4,
use_postln=False,
use_postln_in_modulation=False,
normalize_modulator=False,
**kwargs):
super().__init__()
self.num_layers = len(depths)
embed_dim = [embed_dim * (2 ** i) for i in range(self.num_layers)]
self.num_classes = num_classes
self.embed_dim = embed_dim
self.patch_norm = patch_norm
self.num_features = embed_dim[-1]
self.mlp_ratio = mlp_ratio
# split image into patches using either non-overlapped embedding or overlapped embedding
self.patch_embed = PatchEmbed(
img_size=to_2tuple(img_size),
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim[0],
use_conv_embed=use_conv_embed,
norm_layer=norm_layer if self.patch_norm else None,
is_stem=True)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=embed_dim[i_layer],
out_dim=embed_dim[i_layer+1] if (i_layer < self.num_layers - 1) else None,
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
mlp_ratio=self.mlp_ratio,
drop=drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchEmbed if (i_layer < self.num_layers - 1) else None,
focal_level=focal_levels[i_layer],
focal_window=focal_windows[i_layer],
use_conv_embed=use_conv_embed,
use_checkpoint=use_checkpoint,
use_layerscale=use_layerscale,
layerscale_value=layerscale_value,
use_postln=use_postln,
use_postln_in_modulation=use_postln_in_modulation,
normalize_modulator=normalize_modulator
)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
self.apply(self._init_weights)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def no_weight_decay(self):
return {''}
def no_weight_decay_keywords(self):
return {''}
def forward(self, x):
input_size = x.size(2)
scale = [4, 8, 16, 32]
x, H, W = self.patch_embed(x)
x = self.pos_drop(x)
features = [x, None, None, None]
for layer in self.layers:
x, H, W = layer(x, H, W)
if input_size // H in scale:
features[scale.index(input_size // H)] = x
# features[-1] = self.norm(features[-1]) # B L C
for i in range(len(features)):
features[i] = torch.transpose(features[i], dim0=2, dim1=1).view(-1,features[i].size(2), int(features[i].size(1) ** 0.5), int(features[i].size(1) ** 0.5))
return features
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
model_urls = {
"focalnet_tiny_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet_tiny_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet_small_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet_small_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet_base_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet_base_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet_large_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet_large_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet_xlarge_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet_xlarge_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
"focalnet_huge_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_huge_lrf_224.pth",
"focalnet_huge_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_huge_lrf_224_fl4.pth",
}
def focalnet_tiny_iso(pretrained=False, **kwargs):
model = FocalNet(depths=[12], patch_size=16, embed_dim=192, **kwargs)
if pretrained:
url = model_urls['focalnet_tiny_iso']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu", check_hash=True)
model.load_state_dict(update_weight(model.state_dict(), checkpoint["model"]))
return model | null |
166,045 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
class FocalNet(nn.Module):
r""" Focal Modulation Networks (FocalNets)
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Focal Transformer layer.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
drop_rate (float): Dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
focal_levels (list): How many focal levels at all stages. Note that this excludes the finest-grain level. Default: [1, 1, 1, 1]
focal_windows (list): The focal window size at all stages. Default: [7, 5, 3, 1]
use_conv_embed (bool): Whether use convolutional embedding. We noted that using convolutional embedding usually improve the performance, but we do not use it by default. Default: False
use_layerscale (bool): Whether use layerscale proposed in CaiT. Default: False
layerscale_value (float): Value for layer scale. Default: 1e-4
use_postln (bool): Whether use layernorm after modulation (it helps stablize training of large models)
"""
def __init__(self,
img_size=224,
patch_size=4,
in_chans=3,
num_classes=1000,
embed_dim=96,
depths=[2, 2, 6, 2],
mlp_ratio=4.,
drop_rate=0.,
drop_path_rate=0.1,
norm_layer=nn.LayerNorm,
patch_norm=True,
use_checkpoint=False,
focal_levels=[2, 2, 2, 2],
focal_windows=[3, 3, 3, 3],
use_conv_embed=False,
use_layerscale=False,
layerscale_value=1e-4,
use_postln=False,
use_postln_in_modulation=False,
normalize_modulator=False,
**kwargs):
super().__init__()
self.num_layers = len(depths)
embed_dim = [embed_dim * (2 ** i) for i in range(self.num_layers)]
self.num_classes = num_classes
self.embed_dim = embed_dim
self.patch_norm = patch_norm
self.num_features = embed_dim[-1]
self.mlp_ratio = mlp_ratio
# split image into patches using either non-overlapped embedding or overlapped embedding
self.patch_embed = PatchEmbed(
img_size=to_2tuple(img_size),
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim[0],
use_conv_embed=use_conv_embed,
norm_layer=norm_layer if self.patch_norm else None,
is_stem=True)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=embed_dim[i_layer],
out_dim=embed_dim[i_layer+1] if (i_layer < self.num_layers - 1) else None,
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
mlp_ratio=self.mlp_ratio,
drop=drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchEmbed if (i_layer < self.num_layers - 1) else None,
focal_level=focal_levels[i_layer],
focal_window=focal_windows[i_layer],
use_conv_embed=use_conv_embed,
use_checkpoint=use_checkpoint,
use_layerscale=use_layerscale,
layerscale_value=layerscale_value,
use_postln=use_postln,
use_postln_in_modulation=use_postln_in_modulation,
normalize_modulator=normalize_modulator
)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
self.apply(self._init_weights)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def no_weight_decay(self):
return {''}
def no_weight_decay_keywords(self):
return {''}
def forward(self, x):
input_size = x.size(2)
scale = [4, 8, 16, 32]
x, H, W = self.patch_embed(x)
x = self.pos_drop(x)
features = [x, None, None, None]
for layer in self.layers:
x, H, W = layer(x, H, W)
if input_size // H in scale:
features[scale.index(input_size // H)] = x
# features[-1] = self.norm(features[-1]) # B L C
for i in range(len(features)):
features[i] = torch.transpose(features[i], dim0=2, dim1=1).view(-1,features[i].size(2), int(features[i].size(1) ** 0.5), int(features[i].size(1) ** 0.5))
return features
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
model_urls = {
"focalnet_tiny_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet_tiny_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet_small_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet_small_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet_base_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet_base_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet_large_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet_large_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet_xlarge_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet_xlarge_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
"focalnet_huge_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_huge_lrf_224.pth",
"focalnet_huge_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_huge_lrf_224_fl4.pth",
}
def focalnet_small_iso(pretrained=False, **kwargs):
model = FocalNet(depths=[12], patch_size=16, embed_dim=384, **kwargs)
if pretrained:
url = model_urls['focalnet_small_iso']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
model.load_state_dict(update_weight(model.state_dict(), checkpoint["model"]))
return model | null |
166,046 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
class FocalNet(nn.Module):
r""" Focal Modulation Networks (FocalNets)
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Focal Transformer layer.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
drop_rate (float): Dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
focal_levels (list): How many focal levels at all stages. Note that this excludes the finest-grain level. Default: [1, 1, 1, 1]
focal_windows (list): The focal window size at all stages. Default: [7, 5, 3, 1]
use_conv_embed (bool): Whether use convolutional embedding. We noted that using convolutional embedding usually improve the performance, but we do not use it by default. Default: False
use_layerscale (bool): Whether use layerscale proposed in CaiT. Default: False
layerscale_value (float): Value for layer scale. Default: 1e-4
use_postln (bool): Whether use layernorm after modulation (it helps stablize training of large models)
"""
def __init__(self,
img_size=224,
patch_size=4,
in_chans=3,
num_classes=1000,
embed_dim=96,
depths=[2, 2, 6, 2],
mlp_ratio=4.,
drop_rate=0.,
drop_path_rate=0.1,
norm_layer=nn.LayerNorm,
patch_norm=True,
use_checkpoint=False,
focal_levels=[2, 2, 2, 2],
focal_windows=[3, 3, 3, 3],
use_conv_embed=False,
use_layerscale=False,
layerscale_value=1e-4,
use_postln=False,
use_postln_in_modulation=False,
normalize_modulator=False,
**kwargs):
super().__init__()
self.num_layers = len(depths)
embed_dim = [embed_dim * (2 ** i) for i in range(self.num_layers)]
self.num_classes = num_classes
self.embed_dim = embed_dim
self.patch_norm = patch_norm
self.num_features = embed_dim[-1]
self.mlp_ratio = mlp_ratio
# split image into patches using either non-overlapped embedding or overlapped embedding
self.patch_embed = PatchEmbed(
img_size=to_2tuple(img_size),
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim[0],
use_conv_embed=use_conv_embed,
norm_layer=norm_layer if self.patch_norm else None,
is_stem=True)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=embed_dim[i_layer],
out_dim=embed_dim[i_layer+1] if (i_layer < self.num_layers - 1) else None,
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
mlp_ratio=self.mlp_ratio,
drop=drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchEmbed if (i_layer < self.num_layers - 1) else None,
focal_level=focal_levels[i_layer],
focal_window=focal_windows[i_layer],
use_conv_embed=use_conv_embed,
use_checkpoint=use_checkpoint,
use_layerscale=use_layerscale,
layerscale_value=layerscale_value,
use_postln=use_postln,
use_postln_in_modulation=use_postln_in_modulation,
normalize_modulator=normalize_modulator
)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
self.apply(self._init_weights)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def no_weight_decay(self):
return {''}
def no_weight_decay_keywords(self):
return {''}
def forward(self, x):
input_size = x.size(2)
scale = [4, 8, 16, 32]
x, H, W = self.patch_embed(x)
x = self.pos_drop(x)
features = [x, None, None, None]
for layer in self.layers:
x, H, W = layer(x, H, W)
if input_size // H in scale:
features[scale.index(input_size // H)] = x
# features[-1] = self.norm(features[-1]) # B L C
for i in range(len(features)):
features[i] = torch.transpose(features[i], dim0=2, dim1=1).view(-1,features[i].size(2), int(features[i].size(1) ** 0.5), int(features[i].size(1) ** 0.5))
return features
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
model_urls = {
"focalnet_tiny_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet_tiny_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet_small_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet_small_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet_base_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet_base_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet_large_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet_large_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet_xlarge_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet_xlarge_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
"focalnet_huge_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_huge_lrf_224.pth",
"focalnet_huge_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_huge_lrf_224_fl4.pth",
}
def focalnet_base_iso(pretrained=False, **kwargs):
model = FocalNet(depths=[12], patch_size=16, embed_dim=768, focal_levels=[3], focal_windows=[3], use_layerscale=True, use_postln=True, **kwargs)
if pretrained:
url = model_urls['focalnet_base_iso']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
model.load_state_dict(update_weight(model.state_dict(), checkpoint["model"]))
return model | null |
166,047 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
class FocalNet(nn.Module):
r""" Focal Modulation Networks (FocalNets)
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Focal Transformer layer.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
drop_rate (float): Dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
focal_levels (list): How many focal levels at all stages. Note that this excludes the finest-grain level. Default: [1, 1, 1, 1]
focal_windows (list): The focal window size at all stages. Default: [7, 5, 3, 1]
use_conv_embed (bool): Whether use convolutional embedding. We noted that using convolutional embedding usually improve the performance, but we do not use it by default. Default: False
use_layerscale (bool): Whether use layerscale proposed in CaiT. Default: False
layerscale_value (float): Value for layer scale. Default: 1e-4
use_postln (bool): Whether use layernorm after modulation (it helps stablize training of large models)
"""
def __init__(self,
img_size=224,
patch_size=4,
in_chans=3,
num_classes=1000,
embed_dim=96,
depths=[2, 2, 6, 2],
mlp_ratio=4.,
drop_rate=0.,
drop_path_rate=0.1,
norm_layer=nn.LayerNorm,
patch_norm=True,
use_checkpoint=False,
focal_levels=[2, 2, 2, 2],
focal_windows=[3, 3, 3, 3],
use_conv_embed=False,
use_layerscale=False,
layerscale_value=1e-4,
use_postln=False,
use_postln_in_modulation=False,
normalize_modulator=False,
**kwargs):
super().__init__()
self.num_layers = len(depths)
embed_dim = [embed_dim * (2 ** i) for i in range(self.num_layers)]
self.num_classes = num_classes
self.embed_dim = embed_dim
self.patch_norm = patch_norm
self.num_features = embed_dim[-1]
self.mlp_ratio = mlp_ratio
# split image into patches using either non-overlapped embedding or overlapped embedding
self.patch_embed = PatchEmbed(
img_size=to_2tuple(img_size),
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim[0],
use_conv_embed=use_conv_embed,
norm_layer=norm_layer if self.patch_norm else None,
is_stem=True)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=embed_dim[i_layer],
out_dim=embed_dim[i_layer+1] if (i_layer < self.num_layers - 1) else None,
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
mlp_ratio=self.mlp_ratio,
drop=drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchEmbed if (i_layer < self.num_layers - 1) else None,
focal_level=focal_levels[i_layer],
focal_window=focal_windows[i_layer],
use_conv_embed=use_conv_embed,
use_checkpoint=use_checkpoint,
use_layerscale=use_layerscale,
layerscale_value=layerscale_value,
use_postln=use_postln,
use_postln_in_modulation=use_postln_in_modulation,
normalize_modulator=normalize_modulator
)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
self.apply(self._init_weights)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def no_weight_decay(self):
return {''}
def no_weight_decay_keywords(self):
return {''}
def forward(self, x):
input_size = x.size(2)
scale = [4, 8, 16, 32]
x, H, W = self.patch_embed(x)
x = self.pos_drop(x)
features = [x, None, None, None]
for layer in self.layers:
x, H, W = layer(x, H, W)
if input_size // H in scale:
features[scale.index(input_size // H)] = x
# features[-1] = self.norm(features[-1]) # B L C
for i in range(len(features)):
features[i] = torch.transpose(features[i], dim0=2, dim1=1).view(-1,features[i].size(2), int(features[i].size(1) ** 0.5), int(features[i].size(1) ** 0.5))
return features
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
model_urls = {
"focalnet_tiny_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet_tiny_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet_small_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet_small_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet_base_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet_base_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet_large_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet_large_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet_xlarge_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet_xlarge_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
"focalnet_huge_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_huge_lrf_224.pth",
"focalnet_huge_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_huge_lrf_224_fl4.pth",
}
def focalnet_large_fl3(pretrained=False, **kwargs):
model = FocalNet(depths=[2, 2, 18, 2], embed_dim=192, **kwargs)
if pretrained:
url = model_urls['focalnet_large_fl3']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
model.load_state_dict(update_weight(model.state_dict(), checkpoint["model"]))
return model | null |
166,048 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
def update_weight(model_dict, weight_dict):
class FocalNet(nn.Module):
def __init__(self,
img_size=224,
patch_size=4,
in_chans=3,
num_classes=1000,
embed_dim=96,
depths=[2, 2, 6, 2],
mlp_ratio=4.,
drop_rate=0.,
drop_path_rate=0.1,
norm_layer=nn.LayerNorm,
patch_norm=True,
use_checkpoint=False,
focal_levels=[2, 2, 2, 2],
focal_windows=[3, 3, 3, 3],
use_conv_embed=False,
use_layerscale=False,
layerscale_value=1e-4,
use_postln=False,
use_postln_in_modulation=False,
normalize_modulator=False,
**kwargs):
def _init_weights(self, m):
def no_weight_decay(self):
def no_weight_decay_keywords(self):
def forward(self, x):
def flops(self):
model_urls = {
"focalnet_tiny_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet_tiny_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet_small_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet_small_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet_base_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet_base_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet_large_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet_large_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet_xlarge_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet_xlarge_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
"focalnet_huge_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_huge_lrf_224.pth",
"focalnet_huge_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_huge_lrf_224_fl4.pth",
}
def focalnet_large_fl4(pretrained=False, **kwargs):
model = FocalNet(depths=[2, 2, 18, 2], embed_dim=192, **kwargs)
if pretrained:
url = model_urls['focalnet_large_fl4']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
model.load_state_dict(update_weight(model.state_dict(), checkpoint["model"]))
return model | null |
166,049 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
def update_weight(model_dict, weight_dict):
class FocalNet(nn.Module):
def __init__(self,
img_size=224,
patch_size=4,
in_chans=3,
num_classes=1000,
embed_dim=96,
depths=[2, 2, 6, 2],
mlp_ratio=4.,
drop_rate=0.,
drop_path_rate=0.1,
norm_layer=nn.LayerNorm,
patch_norm=True,
use_checkpoint=False,
focal_levels=[2, 2, 2, 2],
focal_windows=[3, 3, 3, 3],
use_conv_embed=False,
use_layerscale=False,
layerscale_value=1e-4,
use_postln=False,
use_postln_in_modulation=False,
normalize_modulator=False,
**kwargs):
def _init_weights(self, m):
def no_weight_decay(self):
def no_weight_decay_keywords(self):
def forward(self, x):
def flops(self):
model_urls = {
"focalnet_tiny_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet_tiny_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet_small_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet_small_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet_base_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet_base_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet_large_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet_large_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet_xlarge_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet_xlarge_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
"focalnet_huge_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_huge_lrf_224.pth",
"focalnet_huge_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_huge_lrf_224_fl4.pth",
}
def focalnet_xlarge_fl3(pretrained=False, **kwargs):
model = FocalNet(depths=[2, 2, 18, 2], embed_dim=256, **kwargs)
if pretrained:
url = model_urls['focalnet_xlarge_fl3']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
model.load_state_dict(update_weight(model.state_dict(), checkpoint["model"]))
return model | null |
166,050 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
class FocalNet(nn.Module):
r""" Focal Modulation Networks (FocalNets)
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Focal Transformer layer.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
drop_rate (float): Dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
focal_levels (list): How many focal levels at all stages. Note that this excludes the finest-grain level. Default: [1, 1, 1, 1]
focal_windows (list): The focal window size at all stages. Default: [7, 5, 3, 1]
use_conv_embed (bool): Whether use convolutional embedding. We noted that using convolutional embedding usually improve the performance, but we do not use it by default. Default: False
use_layerscale (bool): Whether use layerscale proposed in CaiT. Default: False
layerscale_value (float): Value for layer scale. Default: 1e-4
use_postln (bool): Whether use layernorm after modulation (it helps stablize training of large models)
"""
def __init__(self,
img_size=224,
patch_size=4,
in_chans=3,
num_classes=1000,
embed_dim=96,
depths=[2, 2, 6, 2],
mlp_ratio=4.,
drop_rate=0.,
drop_path_rate=0.1,
norm_layer=nn.LayerNorm,
patch_norm=True,
use_checkpoint=False,
focal_levels=[2, 2, 2, 2],
focal_windows=[3, 3, 3, 3],
use_conv_embed=False,
use_layerscale=False,
layerscale_value=1e-4,
use_postln=False,
use_postln_in_modulation=False,
normalize_modulator=False,
**kwargs):
super().__init__()
self.num_layers = len(depths)
embed_dim = [embed_dim * (2 ** i) for i in range(self.num_layers)]
self.num_classes = num_classes
self.embed_dim = embed_dim
self.patch_norm = patch_norm
self.num_features = embed_dim[-1]
self.mlp_ratio = mlp_ratio
# split image into patches using either non-overlapped embedding or overlapped embedding
self.patch_embed = PatchEmbed(
img_size=to_2tuple(img_size),
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim[0],
use_conv_embed=use_conv_embed,
norm_layer=norm_layer if self.patch_norm else None,
is_stem=True)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=embed_dim[i_layer],
out_dim=embed_dim[i_layer+1] if (i_layer < self.num_layers - 1) else None,
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
mlp_ratio=self.mlp_ratio,
drop=drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchEmbed if (i_layer < self.num_layers - 1) else None,
focal_level=focal_levels[i_layer],
focal_window=focal_windows[i_layer],
use_conv_embed=use_conv_embed,
use_checkpoint=use_checkpoint,
use_layerscale=use_layerscale,
layerscale_value=layerscale_value,
use_postln=use_postln,
use_postln_in_modulation=use_postln_in_modulation,
normalize_modulator=normalize_modulator
)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
self.apply(self._init_weights)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def no_weight_decay(self):
return {''}
def no_weight_decay_keywords(self):
return {''}
def forward(self, x):
input_size = x.size(2)
scale = [4, 8, 16, 32]
x, H, W = self.patch_embed(x)
x = self.pos_drop(x)
features = [x, None, None, None]
for layer in self.layers:
x, H, W = layer(x, H, W)
if input_size // H in scale:
features[scale.index(input_size // H)] = x
# features[-1] = self.norm(features[-1]) # B L C
for i in range(len(features)):
features[i] = torch.transpose(features[i], dim0=2, dim1=1).view(-1,features[i].size(2), int(features[i].size(1) ** 0.5), int(features[i].size(1) ** 0.5))
return features
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
model_urls = {
"focalnet_tiny_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet_tiny_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet_small_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet_small_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet_base_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet_base_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet_large_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet_large_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet_xlarge_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet_xlarge_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
"focalnet_huge_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_huge_lrf_224.pth",
"focalnet_huge_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_huge_lrf_224_fl4.pth",
}
def focalnet_xlarge_fl4(pretrained=False, **kwargs):
model = FocalNet(depths=[2, 2, 18, 2], embed_dim=256, **kwargs)
if pretrained:
url = model_urls['focalnet_xlarge_fl4']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
model.load_state_dict(update_weight(model.state_dict(), checkpoint["model"]))
return model | null |
166,051 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
class FocalNet(nn.Module):
r""" Focal Modulation Networks (FocalNets)
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Focal Transformer layer.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
drop_rate (float): Dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
focal_levels (list): How many focal levels at all stages. Note that this excludes the finest-grain level. Default: [1, 1, 1, 1]
focal_windows (list): The focal window size at all stages. Default: [7, 5, 3, 1]
use_conv_embed (bool): Whether use convolutional embedding. We noted that using convolutional embedding usually improve the performance, but we do not use it by default. Default: False
use_layerscale (bool): Whether use layerscale proposed in CaiT. Default: False
layerscale_value (float): Value for layer scale. Default: 1e-4
use_postln (bool): Whether use layernorm after modulation (it helps stablize training of large models)
"""
def __init__(self,
img_size=224,
patch_size=4,
in_chans=3,
num_classes=1000,
embed_dim=96,
depths=[2, 2, 6, 2],
mlp_ratio=4.,
drop_rate=0.,
drop_path_rate=0.1,
norm_layer=nn.LayerNorm,
patch_norm=True,
use_checkpoint=False,
focal_levels=[2, 2, 2, 2],
focal_windows=[3, 3, 3, 3],
use_conv_embed=False,
use_layerscale=False,
layerscale_value=1e-4,
use_postln=False,
use_postln_in_modulation=False,
normalize_modulator=False,
**kwargs):
super().__init__()
self.num_layers = len(depths)
embed_dim = [embed_dim * (2 ** i) for i in range(self.num_layers)]
self.num_classes = num_classes
self.embed_dim = embed_dim
self.patch_norm = patch_norm
self.num_features = embed_dim[-1]
self.mlp_ratio = mlp_ratio
# split image into patches using either non-overlapped embedding or overlapped embedding
self.patch_embed = PatchEmbed(
img_size=to_2tuple(img_size),
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim[0],
use_conv_embed=use_conv_embed,
norm_layer=norm_layer if self.patch_norm else None,
is_stem=True)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=embed_dim[i_layer],
out_dim=embed_dim[i_layer+1] if (i_layer < self.num_layers - 1) else None,
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
mlp_ratio=self.mlp_ratio,
drop=drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchEmbed if (i_layer < self.num_layers - 1) else None,
focal_level=focal_levels[i_layer],
focal_window=focal_windows[i_layer],
use_conv_embed=use_conv_embed,
use_checkpoint=use_checkpoint,
use_layerscale=use_layerscale,
layerscale_value=layerscale_value,
use_postln=use_postln,
use_postln_in_modulation=use_postln_in_modulation,
normalize_modulator=normalize_modulator
)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
self.apply(self._init_weights)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def no_weight_decay(self):
return {''}
def no_weight_decay_keywords(self):
return {''}
def forward(self, x):
input_size = x.size(2)
scale = [4, 8, 16, 32]
x, H, W = self.patch_embed(x)
x = self.pos_drop(x)
features = [x, None, None, None]
for layer in self.layers:
x, H, W = layer(x, H, W)
if input_size // H in scale:
features[scale.index(input_size // H)] = x
# features[-1] = self.norm(features[-1]) # B L C
for i in range(len(features)):
features[i] = torch.transpose(features[i], dim0=2, dim1=1).view(-1,features[i].size(2), int(features[i].size(1) ** 0.5), int(features[i].size(1) ** 0.5))
return features
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
model_urls = {
"focalnet_tiny_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet_tiny_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet_small_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet_small_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet_base_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet_base_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet_large_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet_large_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet_xlarge_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet_xlarge_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
"focalnet_huge_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_huge_lrf_224.pth",
"focalnet_huge_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_huge_lrf_224_fl4.pth",
}
def focalnet_huge_fl3(pretrained=False, **kwargs):
model = FocalNet(depths=[2, 2, 18, 2], embed_dim=352, **kwargs)
if pretrained:
url = model_urls['focalnet_huge_fl3']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
model.load_state_dict(update_weight(model.state_dict(), checkpoint["model"]))
return model | null |
166,052 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
class FocalNet(nn.Module):
r""" Focal Modulation Networks (FocalNets)
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Focal Transformer layer.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
drop_rate (float): Dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
focal_levels (list): How many focal levels at all stages. Note that this excludes the finest-grain level. Default: [1, 1, 1, 1]
focal_windows (list): The focal window size at all stages. Default: [7, 5, 3, 1]
use_conv_embed (bool): Whether use convolutional embedding. We noted that using convolutional embedding usually improve the performance, but we do not use it by default. Default: False
use_layerscale (bool): Whether use layerscale proposed in CaiT. Default: False
layerscale_value (float): Value for layer scale. Default: 1e-4
use_postln (bool): Whether use layernorm after modulation (it helps stablize training of large models)
"""
def __init__(self,
img_size=224,
patch_size=4,
in_chans=3,
num_classes=1000,
embed_dim=96,
depths=[2, 2, 6, 2],
mlp_ratio=4.,
drop_rate=0.,
drop_path_rate=0.1,
norm_layer=nn.LayerNorm,
patch_norm=True,
use_checkpoint=False,
focal_levels=[2, 2, 2, 2],
focal_windows=[3, 3, 3, 3],
use_conv_embed=False,
use_layerscale=False,
layerscale_value=1e-4,
use_postln=False,
use_postln_in_modulation=False,
normalize_modulator=False,
**kwargs):
super().__init__()
self.num_layers = len(depths)
embed_dim = [embed_dim * (2 ** i) for i in range(self.num_layers)]
self.num_classes = num_classes
self.embed_dim = embed_dim
self.patch_norm = patch_norm
self.num_features = embed_dim[-1]
self.mlp_ratio = mlp_ratio
# split image into patches using either non-overlapped embedding or overlapped embedding
self.patch_embed = PatchEmbed(
img_size=to_2tuple(img_size),
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim[0],
use_conv_embed=use_conv_embed,
norm_layer=norm_layer if self.patch_norm else None,
is_stem=True)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=embed_dim[i_layer],
out_dim=embed_dim[i_layer+1] if (i_layer < self.num_layers - 1) else None,
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
mlp_ratio=self.mlp_ratio,
drop=drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchEmbed if (i_layer < self.num_layers - 1) else None,
focal_level=focal_levels[i_layer],
focal_window=focal_windows[i_layer],
use_conv_embed=use_conv_embed,
use_checkpoint=use_checkpoint,
use_layerscale=use_layerscale,
layerscale_value=layerscale_value,
use_postln=use_postln,
use_postln_in_modulation=use_postln_in_modulation,
normalize_modulator=normalize_modulator
)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
self.apply(self._init_weights)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def no_weight_decay(self):
return {''}
def no_weight_decay_keywords(self):
return {''}
def forward(self, x):
input_size = x.size(2)
scale = [4, 8, 16, 32]
x, H, W = self.patch_embed(x)
x = self.pos_drop(x)
features = [x, None, None, None]
for layer in self.layers:
x, H, W = layer(x, H, W)
if input_size // H in scale:
features[scale.index(input_size // H)] = x
# features[-1] = self.norm(features[-1]) # B L C
for i in range(len(features)):
features[i] = torch.transpose(features[i], dim0=2, dim1=1).view(-1,features[i].size(2), int(features[i].size(1) ** 0.5), int(features[i].size(1) ** 0.5))
return features
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
model_urls = {
"focalnet_tiny_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet_tiny_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet_small_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet_small_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet_base_srf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet_base_lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet_large_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet_large_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet_xlarge_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet_xlarge_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
"focalnet_huge_fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_huge_lrf_224.pth",
"focalnet_huge_fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_huge_lrf_224_fl4.pth",
}
def focalnet_huge_fl4(pretrained=False, **kwargs):
model = FocalNet(depths=[2, 2, 18, 2], embed_dim=352, **kwargs)
if pretrained:
url = model_urls['focalnet_huge_fl4']
checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
model.load_state_dict(update_weight(model.state_dict(), checkpoint["model"]))
return model | null |
166,053 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.layers import trunc_normal_, DropPath, to_2tuple
from functools import partial
import torch.utils.checkpoint as checkpoint
import numpy as np
def get_conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias,
attempt_use_lk_impl=True):
kernel_size = to_2tuple(kernel_size)
if padding is None:
padding = (kernel_size[0] // 2, kernel_size[1] // 2)
else:
padding = to_2tuple(padding)
need_large_impl = kernel_size[0] == kernel_size[1] and kernel_size[0] > 5 and padding == (kernel_size[0] // 2, kernel_size[1] // 2)
# if attempt_use_lk_impl and need_large_impl:
# print('---------------- trying to import iGEMM implementation for large-kernel conv')
# try:
# from depthwise_conv2d_implicit_gemm import DepthWiseConv2dImplicitGEMM
# print('---------------- found iGEMM implementation ')
# except:
# DepthWiseConv2dImplicitGEMM = None
# print('---------------- found no iGEMM. use original conv. follow https://github.com/AILab-CVC/UniRepLKNet to install it.')
# if DepthWiseConv2dImplicitGEMM is not None and need_large_impl and in_channels == out_channels \
# and out_channels == groups and stride == 1 and dilation == 1:
# print(f'===== iGEMM Efficient Conv Impl, channels {in_channels}, kernel size {kernel_size} =====')
# return DepthWiseConv2dImplicitGEMM(in_channels, kernel_size, bias=bias)
return nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=bias) | null |
166,054 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.layers import trunc_normal_, DropPath, to_2tuple
from functools import partial
import torch.utils.checkpoint as checkpoint
import numpy as np
def get_bn(dim, use_sync_bn=False):
if use_sync_bn:
return nn.SyncBatchNorm(dim)
else:
return nn.BatchNorm2d(dim) | null |
166,055 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.layers import trunc_normal_, DropPath, to_2tuple
from functools import partial
import torch.utils.checkpoint as checkpoint
import numpy as np
def fuse_bn(conv, bn):
conv_bias = 0 if conv.bias is None else conv.bias
std = (bn.running_var + bn.eps).sqrt()
return conv.weight * (bn.weight / std).reshape(-1, 1, 1, 1), bn.bias + (conv_bias - bn.running_mean) * bn.weight / std | null |
166,056 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.layers import trunc_normal_, DropPath, to_2tuple
from functools import partial
import torch.utils.checkpoint as checkpoint
import numpy as np
def convert_dilated_to_nondilated(kernel, dilate_rate):
identity_kernel = torch.ones((1, 1, 1, 1)).to(kernel.device)
if kernel.size(1) == 1:
# This is a DW kernel
dilated = F.conv_transpose2d(kernel, identity_kernel, stride=dilate_rate)
return dilated
else:
# This is a dense or group-wise (but not DW) kernel
slices = []
for i in range(kernel.size(1)):
dilated = F.conv_transpose2d(kernel[:,i:i+1,:,:], identity_kernel, stride=dilate_rate)
slices.append(dilated)
return torch.cat(slices, dim=1)
def merge_dilated_into_large_kernel(large_kernel, dilated_kernel, dilated_r):
large_k = large_kernel.size(2)
dilated_k = dilated_kernel.size(2)
equivalent_kernel_size = dilated_r * (dilated_k - 1) + 1
equivalent_kernel = convert_dilated_to_nondilated(dilated_kernel, dilated_r)
rows_to_pad = large_k // 2 - equivalent_kernel_size // 2
merged_kernel = large_kernel + F.pad(equivalent_kernel, [rows_to_pad] * 4)
return merged_kernel | null |
166,057 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.layers import trunc_normal_, DropPath, to_2tuple
from functools import partial
import torch.utils.checkpoint as checkpoint
import numpy as np
UniRepLKNet_A_F_P_depths = (2, 2, 6, 2)
class UniRepLKNet(nn.Module):
r""" UniRepLKNet
A PyTorch impl of UniRepLKNet
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: (3, 3, 27, 3)
dims (int): Feature dimension at each stage. Default: (96, 192, 384, 768)
drop_path_rate (float): Stochastic depth rate. Default: 0.
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
kernel_sizes (tuple(tuple(int))): Kernel size for each block. None means using the default settings. Default: None.
deploy (bool): deploy = True means using the inference structure. Default: False
with_cp (bool): with_cp = True means using torch.utils.checkpoint to save GPU memory. Default: False
init_cfg (dict): weights to load. The easiest way to use UniRepLKNet with for OpenMMLab family. Default: None
attempt_use_lk_impl (bool): try to load the efficient iGEMM large-kernel impl. Setting it to False disabling the iGEMM impl. Default: True
use_sync_bn (bool): use_sync_bn = True means using sync BN. Use it if your batch size is small. Default: False
"""
def __init__(self,
in_chans=3,
num_classes=1000,
depths=(3, 3, 27, 3),
dims=(96, 192, 384, 768),
drop_path_rate=0.,
layer_scale_init_value=1e-6,
head_init_scale=1.,
kernel_sizes=None,
deploy=False,
with_cp=False,
init_cfg=None,
attempt_use_lk_impl=True,
use_sync_bn=False,
**kwargs
):
super().__init__()
depths = tuple(depths)
if kernel_sizes is None:
if depths in default_depths_to_kernel_sizes:
# print('=========== use default kernel size ')
kernel_sizes = default_depths_to_kernel_sizes[depths]
else:
raise ValueError('no default kernel size settings for the given depths, '
'please specify kernel sizes for each block, e.g., '
'((3, 3), (13, 13), (13, 13, 13, 13, 13, 13), (13, 13))')
# print(kernel_sizes)
for i in range(4):
assert len(kernel_sizes[i]) == depths[i], 'kernel sizes do not match the depths'
self.with_cp = with_cp
dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
# print('=========== drop path rates: ', dp_rates)
self.downsample_layers = nn.ModuleList()
self.downsample_layers.append(nn.Sequential(
nn.Conv2d(in_chans, dims[0] // 2, kernel_size=3, stride=2, padding=1),
LayerNorm(dims[0] // 2, eps=1e-6, data_format="channels_first"),
nn.GELU(),
nn.Conv2d(dims[0] // 2, dims[0], kernel_size=3, stride=2, padding=1),
LayerNorm(dims[0], eps=1e-6, data_format="channels_first")))
for i in range(3):
self.downsample_layers.append(nn.Sequential(
nn.Conv2d(dims[i], dims[i + 1], kernel_size=3, stride=2, padding=1),
LayerNorm(dims[i + 1], eps=1e-6, data_format="channels_first")))
self.stages = nn.ModuleList()
cur = 0
for i in range(4):
main_stage = nn.Sequential(
*[UniRepLKNetBlock(dim=dims[i], kernel_size=kernel_sizes[i][j], drop_path=dp_rates[cur + j],
layer_scale_init_value=layer_scale_init_value, deploy=deploy,
attempt_use_lk_impl=attempt_use_lk_impl,
with_cp=with_cp, use_sync_bn=use_sync_bn) for j in
range(depths[i])])
self.stages.append(main_stage)
cur += depths[i]
self.output_mode = 'features'
norm_layer = partial(LayerNorm, eps=1e-6, data_format="channels_first")
for i_layer in range(4):
layer = norm_layer(dims[i_layer])
layer_name = f'norm{i_layer}'
self.add_module(layer_name, layer)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
if self.output_mode == 'logits':
for stage_idx in range(4):
x = self.downsample_layers[stage_idx](x)
x = self.stages[stage_idx](x)
x = self.norm(x.mean([-2, -1]))
x = self.head(x)
return x
elif self.output_mode == 'features':
outs = []
for stage_idx in range(4):
x = self.downsample_layers[stage_idx](x)
x = self.stages[stage_idx](x)
outs.append(self.__getattr__(f'norm{stage_idx}')(x))
return outs
else:
raise ValueError('Defined new output mode?')
def switch_to_deploy(self):
for m in self.modules():
if hasattr(m, 'reparameterize'):
m.reparameterize()
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
def unireplknet_a(weights='', **kwargs):
model = UniRepLKNet(depths=UniRepLKNet_A_F_P_depths, dims=(40, 80, 160, 320), **kwargs)
if weights:
model.load_state_dict(update_weight(model.state_dict(), torch.load(weights)))
return model | null |
166,058 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.layers import trunc_normal_, DropPath, to_2tuple
from functools import partial
import torch.utils.checkpoint as checkpoint
import numpy as np
UniRepLKNet_A_F_P_depths = (2, 2, 6, 2)
class UniRepLKNet(nn.Module):
r""" UniRepLKNet
A PyTorch impl of UniRepLKNet
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: (3, 3, 27, 3)
dims (int): Feature dimension at each stage. Default: (96, 192, 384, 768)
drop_path_rate (float): Stochastic depth rate. Default: 0.
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
kernel_sizes (tuple(tuple(int))): Kernel size for each block. None means using the default settings. Default: None.
deploy (bool): deploy = True means using the inference structure. Default: False
with_cp (bool): with_cp = True means using torch.utils.checkpoint to save GPU memory. Default: False
init_cfg (dict): weights to load. The easiest way to use UniRepLKNet with for OpenMMLab family. Default: None
attempt_use_lk_impl (bool): try to load the efficient iGEMM large-kernel impl. Setting it to False disabling the iGEMM impl. Default: True
use_sync_bn (bool): use_sync_bn = True means using sync BN. Use it if your batch size is small. Default: False
"""
def __init__(self,
in_chans=3,
num_classes=1000,
depths=(3, 3, 27, 3),
dims=(96, 192, 384, 768),
drop_path_rate=0.,
layer_scale_init_value=1e-6,
head_init_scale=1.,
kernel_sizes=None,
deploy=False,
with_cp=False,
init_cfg=None,
attempt_use_lk_impl=True,
use_sync_bn=False,
**kwargs
):
super().__init__()
depths = tuple(depths)
if kernel_sizes is None:
if depths in default_depths_to_kernel_sizes:
# print('=========== use default kernel size ')
kernel_sizes = default_depths_to_kernel_sizes[depths]
else:
raise ValueError('no default kernel size settings for the given depths, '
'please specify kernel sizes for each block, e.g., '
'((3, 3), (13, 13), (13, 13, 13, 13, 13, 13), (13, 13))')
# print(kernel_sizes)
for i in range(4):
assert len(kernel_sizes[i]) == depths[i], 'kernel sizes do not match the depths'
self.with_cp = with_cp
dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
# print('=========== drop path rates: ', dp_rates)
self.downsample_layers = nn.ModuleList()
self.downsample_layers.append(nn.Sequential(
nn.Conv2d(in_chans, dims[0] // 2, kernel_size=3, stride=2, padding=1),
LayerNorm(dims[0] // 2, eps=1e-6, data_format="channels_first"),
nn.GELU(),
nn.Conv2d(dims[0] // 2, dims[0], kernel_size=3, stride=2, padding=1),
LayerNorm(dims[0], eps=1e-6, data_format="channels_first")))
for i in range(3):
self.downsample_layers.append(nn.Sequential(
nn.Conv2d(dims[i], dims[i + 1], kernel_size=3, stride=2, padding=1),
LayerNorm(dims[i + 1], eps=1e-6, data_format="channels_first")))
self.stages = nn.ModuleList()
cur = 0
for i in range(4):
main_stage = nn.Sequential(
*[UniRepLKNetBlock(dim=dims[i], kernel_size=kernel_sizes[i][j], drop_path=dp_rates[cur + j],
layer_scale_init_value=layer_scale_init_value, deploy=deploy,
attempt_use_lk_impl=attempt_use_lk_impl,
with_cp=with_cp, use_sync_bn=use_sync_bn) for j in
range(depths[i])])
self.stages.append(main_stage)
cur += depths[i]
self.output_mode = 'features'
norm_layer = partial(LayerNorm, eps=1e-6, data_format="channels_first")
for i_layer in range(4):
layer = norm_layer(dims[i_layer])
layer_name = f'norm{i_layer}'
self.add_module(layer_name, layer)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
if self.output_mode == 'logits':
for stage_idx in range(4):
x = self.downsample_layers[stage_idx](x)
x = self.stages[stage_idx](x)
x = self.norm(x.mean([-2, -1]))
x = self.head(x)
return x
elif self.output_mode == 'features':
outs = []
for stage_idx in range(4):
x = self.downsample_layers[stage_idx](x)
x = self.stages[stage_idx](x)
outs.append(self.__getattr__(f'norm{stage_idx}')(x))
return outs
else:
raise ValueError('Defined new output mode?')
def switch_to_deploy(self):
for m in self.modules():
if hasattr(m, 'reparameterize'):
m.reparameterize()
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
def unireplknet_f(weights='', **kwargs):
model = UniRepLKNet(depths=UniRepLKNet_A_F_P_depths, dims=(48, 96, 192, 384), **kwargs)
if weights:
model.load_state_dict(update_weight(model.state_dict(), torch.load(weights)))
return model | null |
166,059 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.layers import trunc_normal_, DropPath, to_2tuple
from functools import partial
import torch.utils.checkpoint as checkpoint
import numpy as np
UniRepLKNet_A_F_P_depths = (2, 2, 6, 2)
class UniRepLKNet(nn.Module):
r""" UniRepLKNet
A PyTorch impl of UniRepLKNet
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: (3, 3, 27, 3)
dims (int): Feature dimension at each stage. Default: (96, 192, 384, 768)
drop_path_rate (float): Stochastic depth rate. Default: 0.
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
kernel_sizes (tuple(tuple(int))): Kernel size for each block. None means using the default settings. Default: None.
deploy (bool): deploy = True means using the inference structure. Default: False
with_cp (bool): with_cp = True means using torch.utils.checkpoint to save GPU memory. Default: False
init_cfg (dict): weights to load. The easiest way to use UniRepLKNet with for OpenMMLab family. Default: None
attempt_use_lk_impl (bool): try to load the efficient iGEMM large-kernel impl. Setting it to False disabling the iGEMM impl. Default: True
use_sync_bn (bool): use_sync_bn = True means using sync BN. Use it if your batch size is small. Default: False
"""
def __init__(self,
in_chans=3,
num_classes=1000,
depths=(3, 3, 27, 3),
dims=(96, 192, 384, 768),
drop_path_rate=0.,
layer_scale_init_value=1e-6,
head_init_scale=1.,
kernel_sizes=None,
deploy=False,
with_cp=False,
init_cfg=None,
attempt_use_lk_impl=True,
use_sync_bn=False,
**kwargs
):
super().__init__()
depths = tuple(depths)
if kernel_sizes is None:
if depths in default_depths_to_kernel_sizes:
# print('=========== use default kernel size ')
kernel_sizes = default_depths_to_kernel_sizes[depths]
else:
raise ValueError('no default kernel size settings for the given depths, '
'please specify kernel sizes for each block, e.g., '
'((3, 3), (13, 13), (13, 13, 13, 13, 13, 13), (13, 13))')
# print(kernel_sizes)
for i in range(4):
assert len(kernel_sizes[i]) == depths[i], 'kernel sizes do not match the depths'
self.with_cp = with_cp
dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
# print('=========== drop path rates: ', dp_rates)
self.downsample_layers = nn.ModuleList()
self.downsample_layers.append(nn.Sequential(
nn.Conv2d(in_chans, dims[0] // 2, kernel_size=3, stride=2, padding=1),
LayerNorm(dims[0] // 2, eps=1e-6, data_format="channels_first"),
nn.GELU(),
nn.Conv2d(dims[0] // 2, dims[0], kernel_size=3, stride=2, padding=1),
LayerNorm(dims[0], eps=1e-6, data_format="channels_first")))
for i in range(3):
self.downsample_layers.append(nn.Sequential(
nn.Conv2d(dims[i], dims[i + 1], kernel_size=3, stride=2, padding=1),
LayerNorm(dims[i + 1], eps=1e-6, data_format="channels_first")))
self.stages = nn.ModuleList()
cur = 0
for i in range(4):
main_stage = nn.Sequential(
*[UniRepLKNetBlock(dim=dims[i], kernel_size=kernel_sizes[i][j], drop_path=dp_rates[cur + j],
layer_scale_init_value=layer_scale_init_value, deploy=deploy,
attempt_use_lk_impl=attempt_use_lk_impl,
with_cp=with_cp, use_sync_bn=use_sync_bn) for j in
range(depths[i])])
self.stages.append(main_stage)
cur += depths[i]
self.output_mode = 'features'
norm_layer = partial(LayerNorm, eps=1e-6, data_format="channels_first")
for i_layer in range(4):
layer = norm_layer(dims[i_layer])
layer_name = f'norm{i_layer}'
self.add_module(layer_name, layer)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
if self.output_mode == 'logits':
for stage_idx in range(4):
x = self.downsample_layers[stage_idx](x)
x = self.stages[stage_idx](x)
x = self.norm(x.mean([-2, -1]))
x = self.head(x)
return x
elif self.output_mode == 'features':
outs = []
for stage_idx in range(4):
x = self.downsample_layers[stage_idx](x)
x = self.stages[stage_idx](x)
outs.append(self.__getattr__(f'norm{stage_idx}')(x))
return outs
else:
raise ValueError('Defined new output mode?')
def switch_to_deploy(self):
for m in self.modules():
if hasattr(m, 'reparameterize'):
m.reparameterize()
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
def unireplknet_p(weights='', **kwargs):
model = UniRepLKNet(depths=UniRepLKNet_A_F_P_depths, dims=(64, 128, 256, 512), **kwargs)
if weights:
model.load_state_dict(update_weight(model.state_dict(), torch.load(weights)))
return model | null |
166,060 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.layers import trunc_normal_, DropPath, to_2tuple
from functools import partial
import torch.utils.checkpoint as checkpoint
import numpy as np
UniRepLKNet_N_depths = (2, 2, 8, 2)
class UniRepLKNet(nn.Module):
r""" UniRepLKNet
A PyTorch impl of UniRepLKNet
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: (3, 3, 27, 3)
dims (int): Feature dimension at each stage. Default: (96, 192, 384, 768)
drop_path_rate (float): Stochastic depth rate. Default: 0.
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
kernel_sizes (tuple(tuple(int))): Kernel size for each block. None means using the default settings. Default: None.
deploy (bool): deploy = True means using the inference structure. Default: False
with_cp (bool): with_cp = True means using torch.utils.checkpoint to save GPU memory. Default: False
init_cfg (dict): weights to load. The easiest way to use UniRepLKNet with for OpenMMLab family. Default: None
attempt_use_lk_impl (bool): try to load the efficient iGEMM large-kernel impl. Setting it to False disabling the iGEMM impl. Default: True
use_sync_bn (bool): use_sync_bn = True means using sync BN. Use it if your batch size is small. Default: False
"""
def __init__(self,
in_chans=3,
num_classes=1000,
depths=(3, 3, 27, 3),
dims=(96, 192, 384, 768),
drop_path_rate=0.,
layer_scale_init_value=1e-6,
head_init_scale=1.,
kernel_sizes=None,
deploy=False,
with_cp=False,
init_cfg=None,
attempt_use_lk_impl=True,
use_sync_bn=False,
**kwargs
):
super().__init__()
depths = tuple(depths)
if kernel_sizes is None:
if depths in default_depths_to_kernel_sizes:
# print('=========== use default kernel size ')
kernel_sizes = default_depths_to_kernel_sizes[depths]
else:
raise ValueError('no default kernel size settings for the given depths, '
'please specify kernel sizes for each block, e.g., '
'((3, 3), (13, 13), (13, 13, 13, 13, 13, 13), (13, 13))')
# print(kernel_sizes)
for i in range(4):
assert len(kernel_sizes[i]) == depths[i], 'kernel sizes do not match the depths'
self.with_cp = with_cp
dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
# print('=========== drop path rates: ', dp_rates)
self.downsample_layers = nn.ModuleList()
self.downsample_layers.append(nn.Sequential(
nn.Conv2d(in_chans, dims[0] // 2, kernel_size=3, stride=2, padding=1),
LayerNorm(dims[0] // 2, eps=1e-6, data_format="channels_first"),
nn.GELU(),
nn.Conv2d(dims[0] // 2, dims[0], kernel_size=3, stride=2, padding=1),
LayerNorm(dims[0], eps=1e-6, data_format="channels_first")))
for i in range(3):
self.downsample_layers.append(nn.Sequential(
nn.Conv2d(dims[i], dims[i + 1], kernel_size=3, stride=2, padding=1),
LayerNorm(dims[i + 1], eps=1e-6, data_format="channels_first")))
self.stages = nn.ModuleList()
cur = 0
for i in range(4):
main_stage = nn.Sequential(
*[UniRepLKNetBlock(dim=dims[i], kernel_size=kernel_sizes[i][j], drop_path=dp_rates[cur + j],
layer_scale_init_value=layer_scale_init_value, deploy=deploy,
attempt_use_lk_impl=attempt_use_lk_impl,
with_cp=with_cp, use_sync_bn=use_sync_bn) for j in
range(depths[i])])
self.stages.append(main_stage)
cur += depths[i]
self.output_mode = 'features'
norm_layer = partial(LayerNorm, eps=1e-6, data_format="channels_first")
for i_layer in range(4):
layer = norm_layer(dims[i_layer])
layer_name = f'norm{i_layer}'
self.add_module(layer_name, layer)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
if self.output_mode == 'logits':
for stage_idx in range(4):
x = self.downsample_layers[stage_idx](x)
x = self.stages[stage_idx](x)
x = self.norm(x.mean([-2, -1]))
x = self.head(x)
return x
elif self.output_mode == 'features':
outs = []
for stage_idx in range(4):
x = self.downsample_layers[stage_idx](x)
x = self.stages[stage_idx](x)
outs.append(self.__getattr__(f'norm{stage_idx}')(x))
return outs
else:
raise ValueError('Defined new output mode?')
def switch_to_deploy(self):
for m in self.modules():
if hasattr(m, 'reparameterize'):
m.reparameterize()
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
def unireplknet_n(weights='', **kwargs):
model = UniRepLKNet(depths=UniRepLKNet_N_depths, dims=(80, 160, 320, 640), **kwargs)
if weights:
model.load_state_dict(update_weight(model.state_dict(), torch.load(weights)))
return model | null |
166,061 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.layers import trunc_normal_, DropPath, to_2tuple
from functools import partial
import torch.utils.checkpoint as checkpoint
import numpy as np
UniRepLKNet_T_depths = (3, 3, 18, 3)
class UniRepLKNet(nn.Module):
def __init__(self,
in_chans=3,
num_classes=1000,
depths=(3, 3, 27, 3),
dims=(96, 192, 384, 768),
drop_path_rate=0.,
layer_scale_init_value=1e-6,
head_init_scale=1.,
kernel_sizes=None,
deploy=False,
with_cp=False,
init_cfg=None,
attempt_use_lk_impl=True,
use_sync_bn=False,
**kwargs
):
def _init_weights(self, m):
def forward(self, x):
def switch_to_deploy(self):
def update_weight(model_dict, weight_dict):
def unireplknet_t(weights='', **kwargs):
model = UniRepLKNet(depths=UniRepLKNet_T_depths, dims=(80, 160, 320, 640), **kwargs)
if weights:
model.load_state_dict(update_weight(model.state_dict(), torch.load(weights)))
return model | null |
166,062 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.layers import trunc_normal_, DropPath, to_2tuple
from functools import partial
import torch.utils.checkpoint as checkpoint
import numpy as np
UniRepLKNet_S_B_L_XL_depths = (3, 3, 27, 3)
class UniRepLKNet(nn.Module):
r""" UniRepLKNet
A PyTorch impl of UniRepLKNet
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: (3, 3, 27, 3)
dims (int): Feature dimension at each stage. Default: (96, 192, 384, 768)
drop_path_rate (float): Stochastic depth rate. Default: 0.
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
kernel_sizes (tuple(tuple(int))): Kernel size for each block. None means using the default settings. Default: None.
deploy (bool): deploy = True means using the inference structure. Default: False
with_cp (bool): with_cp = True means using torch.utils.checkpoint to save GPU memory. Default: False
init_cfg (dict): weights to load. The easiest way to use UniRepLKNet with for OpenMMLab family. Default: None
attempt_use_lk_impl (bool): try to load the efficient iGEMM large-kernel impl. Setting it to False disabling the iGEMM impl. Default: True
use_sync_bn (bool): use_sync_bn = True means using sync BN. Use it if your batch size is small. Default: False
"""
def __init__(self,
in_chans=3,
num_classes=1000,
depths=(3, 3, 27, 3),
dims=(96, 192, 384, 768),
drop_path_rate=0.,
layer_scale_init_value=1e-6,
head_init_scale=1.,
kernel_sizes=None,
deploy=False,
with_cp=False,
init_cfg=None,
attempt_use_lk_impl=True,
use_sync_bn=False,
**kwargs
):
super().__init__()
depths = tuple(depths)
if kernel_sizes is None:
if depths in default_depths_to_kernel_sizes:
# print('=========== use default kernel size ')
kernel_sizes = default_depths_to_kernel_sizes[depths]
else:
raise ValueError('no default kernel size settings for the given depths, '
'please specify kernel sizes for each block, e.g., '
'((3, 3), (13, 13), (13, 13, 13, 13, 13, 13), (13, 13))')
# print(kernel_sizes)
for i in range(4):
assert len(kernel_sizes[i]) == depths[i], 'kernel sizes do not match the depths'
self.with_cp = with_cp
dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
# print('=========== drop path rates: ', dp_rates)
self.downsample_layers = nn.ModuleList()
self.downsample_layers.append(nn.Sequential(
nn.Conv2d(in_chans, dims[0] // 2, kernel_size=3, stride=2, padding=1),
LayerNorm(dims[0] // 2, eps=1e-6, data_format="channels_first"),
nn.GELU(),
nn.Conv2d(dims[0] // 2, dims[0], kernel_size=3, stride=2, padding=1),
LayerNorm(dims[0], eps=1e-6, data_format="channels_first")))
for i in range(3):
self.downsample_layers.append(nn.Sequential(
nn.Conv2d(dims[i], dims[i + 1], kernel_size=3, stride=2, padding=1),
LayerNorm(dims[i + 1], eps=1e-6, data_format="channels_first")))
self.stages = nn.ModuleList()
cur = 0
for i in range(4):
main_stage = nn.Sequential(
*[UniRepLKNetBlock(dim=dims[i], kernel_size=kernel_sizes[i][j], drop_path=dp_rates[cur + j],
layer_scale_init_value=layer_scale_init_value, deploy=deploy,
attempt_use_lk_impl=attempt_use_lk_impl,
with_cp=with_cp, use_sync_bn=use_sync_bn) for j in
range(depths[i])])
self.stages.append(main_stage)
cur += depths[i]
self.output_mode = 'features'
norm_layer = partial(LayerNorm, eps=1e-6, data_format="channels_first")
for i_layer in range(4):
layer = norm_layer(dims[i_layer])
layer_name = f'norm{i_layer}'
self.add_module(layer_name, layer)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
if self.output_mode == 'logits':
for stage_idx in range(4):
x = self.downsample_layers[stage_idx](x)
x = self.stages[stage_idx](x)
x = self.norm(x.mean([-2, -1]))
x = self.head(x)
return x
elif self.output_mode == 'features':
outs = []
for stage_idx in range(4):
x = self.downsample_layers[stage_idx](x)
x = self.stages[stage_idx](x)
outs.append(self.__getattr__(f'norm{stage_idx}')(x))
return outs
else:
raise ValueError('Defined new output mode?')
def switch_to_deploy(self):
for m in self.modules():
if hasattr(m, 'reparameterize'):
m.reparameterize()
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
def unireplknet_s(weights='', **kwargs):
model = UniRepLKNet(depths=UniRepLKNet_S_B_L_XL_depths, dims=(96, 192, 384, 768), **kwargs)
if weights:
model.load_state_dict(update_weight(model.state_dict(), torch.load(weights)))
return model | null |
166,063 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.layers import trunc_normal_, DropPath, to_2tuple
from functools import partial
import torch.utils.checkpoint as checkpoint
import numpy as np
UniRepLKNet_S_B_L_XL_depths = (3, 3, 27, 3)
class UniRepLKNet(nn.Module):
r""" UniRepLKNet
A PyTorch impl of UniRepLKNet
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: (3, 3, 27, 3)
dims (int): Feature dimension at each stage. Default: (96, 192, 384, 768)
drop_path_rate (float): Stochastic depth rate. Default: 0.
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
kernel_sizes (tuple(tuple(int))): Kernel size for each block. None means using the default settings. Default: None.
deploy (bool): deploy = True means using the inference structure. Default: False
with_cp (bool): with_cp = True means using torch.utils.checkpoint to save GPU memory. Default: False
init_cfg (dict): weights to load. The easiest way to use UniRepLKNet with for OpenMMLab family. Default: None
attempt_use_lk_impl (bool): try to load the efficient iGEMM large-kernel impl. Setting it to False disabling the iGEMM impl. Default: True
use_sync_bn (bool): use_sync_bn = True means using sync BN. Use it if your batch size is small. Default: False
"""
def __init__(self,
in_chans=3,
num_classes=1000,
depths=(3, 3, 27, 3),
dims=(96, 192, 384, 768),
drop_path_rate=0.,
layer_scale_init_value=1e-6,
head_init_scale=1.,
kernel_sizes=None,
deploy=False,
with_cp=False,
init_cfg=None,
attempt_use_lk_impl=True,
use_sync_bn=False,
**kwargs
):
super().__init__()
depths = tuple(depths)
if kernel_sizes is None:
if depths in default_depths_to_kernel_sizes:
# print('=========== use default kernel size ')
kernel_sizes = default_depths_to_kernel_sizes[depths]
else:
raise ValueError('no default kernel size settings for the given depths, '
'please specify kernel sizes for each block, e.g., '
'((3, 3), (13, 13), (13, 13, 13, 13, 13, 13), (13, 13))')
# print(kernel_sizes)
for i in range(4):
assert len(kernel_sizes[i]) == depths[i], 'kernel sizes do not match the depths'
self.with_cp = with_cp
dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
# print('=========== drop path rates: ', dp_rates)
self.downsample_layers = nn.ModuleList()
self.downsample_layers.append(nn.Sequential(
nn.Conv2d(in_chans, dims[0] // 2, kernel_size=3, stride=2, padding=1),
LayerNorm(dims[0] // 2, eps=1e-6, data_format="channels_first"),
nn.GELU(),
nn.Conv2d(dims[0] // 2, dims[0], kernel_size=3, stride=2, padding=1),
LayerNorm(dims[0], eps=1e-6, data_format="channels_first")))
for i in range(3):
self.downsample_layers.append(nn.Sequential(
nn.Conv2d(dims[i], dims[i + 1], kernel_size=3, stride=2, padding=1),
LayerNorm(dims[i + 1], eps=1e-6, data_format="channels_first")))
self.stages = nn.ModuleList()
cur = 0
for i in range(4):
main_stage = nn.Sequential(
*[UniRepLKNetBlock(dim=dims[i], kernel_size=kernel_sizes[i][j], drop_path=dp_rates[cur + j],
layer_scale_init_value=layer_scale_init_value, deploy=deploy,
attempt_use_lk_impl=attempt_use_lk_impl,
with_cp=with_cp, use_sync_bn=use_sync_bn) for j in
range(depths[i])])
self.stages.append(main_stage)
cur += depths[i]
self.output_mode = 'features'
norm_layer = partial(LayerNorm, eps=1e-6, data_format="channels_first")
for i_layer in range(4):
layer = norm_layer(dims[i_layer])
layer_name = f'norm{i_layer}'
self.add_module(layer_name, layer)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
if self.output_mode == 'logits':
for stage_idx in range(4):
x = self.downsample_layers[stage_idx](x)
x = self.stages[stage_idx](x)
x = self.norm(x.mean([-2, -1]))
x = self.head(x)
return x
elif self.output_mode == 'features':
outs = []
for stage_idx in range(4):
x = self.downsample_layers[stage_idx](x)
x = self.stages[stage_idx](x)
outs.append(self.__getattr__(f'norm{stage_idx}')(x))
return outs
else:
raise ValueError('Defined new output mode?')
def switch_to_deploy(self):
for m in self.modules():
if hasattr(m, 'reparameterize'):
m.reparameterize()
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
def unireplknet_b(weights='', **kwargs):
model = UniRepLKNet(depths=UniRepLKNet_S_B_L_XL_depths, dims=(128, 256, 512, 1024), **kwargs)
if weights:
model.load_state_dict(update_weight(model.state_dict(), torch.load(weights)))
return model | null |
166,064 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.layers import trunc_normal_, DropPath, to_2tuple
from functools import partial
import torch.utils.checkpoint as checkpoint
import numpy as np
UniRepLKNet_S_B_L_XL_depths = (3, 3, 27, 3)
class UniRepLKNet(nn.Module):
def __init__(self,
in_chans=3,
num_classes=1000,
depths=(3, 3, 27, 3),
dims=(96, 192, 384, 768),
drop_path_rate=0.,
layer_scale_init_value=1e-6,
head_init_scale=1.,
kernel_sizes=None,
deploy=False,
with_cp=False,
init_cfg=None,
attempt_use_lk_impl=True,
use_sync_bn=False,
**kwargs
):
def _init_weights(self, m):
def forward(self, x):
def switch_to_deploy(self):
def update_weight(model_dict, weight_dict):
def unireplknet_l(weights='', **kwargs):
model = UniRepLKNet(depths=UniRepLKNet_S_B_L_XL_depths, dims=(192, 384, 768, 1536), **kwargs)
if weights:
model.load_state_dict(update_weight(model.state_dict(), torch.load(weights)))
return model | null |
166,065 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.layers import trunc_normal_, DropPath, to_2tuple
from functools import partial
import torch.utils.checkpoint as checkpoint
import numpy as np
UniRepLKNet_S_B_L_XL_depths = (3, 3, 27, 3)
class UniRepLKNet(nn.Module):
r""" UniRepLKNet
A PyTorch impl of UniRepLKNet
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: (3, 3, 27, 3)
dims (int): Feature dimension at each stage. Default: (96, 192, 384, 768)
drop_path_rate (float): Stochastic depth rate. Default: 0.
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
kernel_sizes (tuple(tuple(int))): Kernel size for each block. None means using the default settings. Default: None.
deploy (bool): deploy = True means using the inference structure. Default: False
with_cp (bool): with_cp = True means using torch.utils.checkpoint to save GPU memory. Default: False
init_cfg (dict): weights to load. The easiest way to use UniRepLKNet with for OpenMMLab family. Default: None
attempt_use_lk_impl (bool): try to load the efficient iGEMM large-kernel impl. Setting it to False disabling the iGEMM impl. Default: True
use_sync_bn (bool): use_sync_bn = True means using sync BN. Use it if your batch size is small. Default: False
"""
def __init__(self,
in_chans=3,
num_classes=1000,
depths=(3, 3, 27, 3),
dims=(96, 192, 384, 768),
drop_path_rate=0.,
layer_scale_init_value=1e-6,
head_init_scale=1.,
kernel_sizes=None,
deploy=False,
with_cp=False,
init_cfg=None,
attempt_use_lk_impl=True,
use_sync_bn=False,
**kwargs
):
super().__init__()
depths = tuple(depths)
if kernel_sizes is None:
if depths in default_depths_to_kernel_sizes:
# print('=========== use default kernel size ')
kernel_sizes = default_depths_to_kernel_sizes[depths]
else:
raise ValueError('no default kernel size settings for the given depths, '
'please specify kernel sizes for each block, e.g., '
'((3, 3), (13, 13), (13, 13, 13, 13, 13, 13), (13, 13))')
# print(kernel_sizes)
for i in range(4):
assert len(kernel_sizes[i]) == depths[i], 'kernel sizes do not match the depths'
self.with_cp = with_cp
dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
# print('=========== drop path rates: ', dp_rates)
self.downsample_layers = nn.ModuleList()
self.downsample_layers.append(nn.Sequential(
nn.Conv2d(in_chans, dims[0] // 2, kernel_size=3, stride=2, padding=1),
LayerNorm(dims[0] // 2, eps=1e-6, data_format="channels_first"),
nn.GELU(),
nn.Conv2d(dims[0] // 2, dims[0], kernel_size=3, stride=2, padding=1),
LayerNorm(dims[0], eps=1e-6, data_format="channels_first")))
for i in range(3):
self.downsample_layers.append(nn.Sequential(
nn.Conv2d(dims[i], dims[i + 1], kernel_size=3, stride=2, padding=1),
LayerNorm(dims[i + 1], eps=1e-6, data_format="channels_first")))
self.stages = nn.ModuleList()
cur = 0
for i in range(4):
main_stage = nn.Sequential(
*[UniRepLKNetBlock(dim=dims[i], kernel_size=kernel_sizes[i][j], drop_path=dp_rates[cur + j],
layer_scale_init_value=layer_scale_init_value, deploy=deploy,
attempt_use_lk_impl=attempt_use_lk_impl,
with_cp=with_cp, use_sync_bn=use_sync_bn) for j in
range(depths[i])])
self.stages.append(main_stage)
cur += depths[i]
self.output_mode = 'features'
norm_layer = partial(LayerNorm, eps=1e-6, data_format="channels_first")
for i_layer in range(4):
layer = norm_layer(dims[i_layer])
layer_name = f'norm{i_layer}'
self.add_module(layer_name, layer)
self.channel = [i.size(1) for i in self.forward(torch.randn(1, 3, 640, 640))]
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
if self.output_mode == 'logits':
for stage_idx in range(4):
x = self.downsample_layers[stage_idx](x)
x = self.stages[stage_idx](x)
x = self.norm(x.mean([-2, -1]))
x = self.head(x)
return x
elif self.output_mode == 'features':
outs = []
for stage_idx in range(4):
x = self.downsample_layers[stage_idx](x)
x = self.stages[stage_idx](x)
outs.append(self.__getattr__(f'norm{stage_idx}')(x))
return outs
else:
raise ValueError('Defined new output mode?')
def switch_to_deploy(self):
for m in self.modules():
if hasattr(m, 'reparameterize'):
m.reparameterize()
def update_weight(model_dict, weight_dict):
idx, temp_dict = 0, {}
for k, v in weight_dict.items():
if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
temp_dict[k] = v
idx += 1
model_dict.update(temp_dict)
print(f'loading weights... {idx}/{len(model_dict)} items')
return model_dict
def unireplknet_xl(weights='', **kwargs):
model = UniRepLKNet(depths=UniRepLKNet_S_B_L_XL_depths, dims=(256, 512, 1024, 2048), **kwargs)
if weights:
model.load_state_dict(update_weight(model.state_dict(), torch.load(weights)))
return model | null |
166,066 | import warnings
import cv2, os, shutil
import numpy as np
from ultralytics import YOLO
def get_video_cfg(path):
video = cv2.VideoCapture(path)
size = (int(video.get(cv2.CAP_PROP_FRAME_WIDTH)), int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)))
fps = int(video.get(cv2.CAP_PROP_FPS))
return cv2.VideoWriter_fourcc(*'XVID'), size, fps | null |
166,067 | import warnings
import cv2, os, shutil
import numpy as np
from ultralytics import YOLO
def plot_and_counting(result):
image_plot = result.plot()
box_count = result.boxes.shape[0]
cv2.putText(image_plot, f'Object Counts:{box_count}', (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (0, 0, 255), 4)
return image_plot | null |
166,068 | import warnings
import torch, yaml, cv2, os, shutil
import numpy as np
import matplotlib.pyplot as plt
from tqdm import trange
from PIL import Image
from models.yolo import Model
from utils.datasets import letterbox
from utils.general import xywh2xyxy, non_max_suppression
from models.experimental import attempt_load
from pytorch_grad_cam import GradCAMPlusPlus, GradCAM, XGradCAM, EigenCAM, HiResCAM, LayerCAM, RandomCAM, EigenGradCAM
from pytorch_grad_cam.utils.image import show_cam_on_image, scale_cam_image
from pytorch_grad_cam.activations_and_gradients import ActivationsAndGradients
def get_params():
params = {
'weight': 'runs/train/yolov7_tiny_custom_fasternet_lamp_exp1/weights/best.pt',
'device': 'cuda:0',
'method': 'XGradCAM', # GradCAMPlusPlus, GradCAM, XGradCAM, EigenCAM, HiResCAM, LayerCAM, RandomCAM, EigenGradCAM
'layer': [11, 14, 17],
'backward_type': 'all', # class, box, all
'conf_threshold': 0.2, # 0.6
'ratio': 0.02, # 0.02-0.1
'show_box': False,
'renormalize': True
}
return params | null |
166,069 | import warnings
import torch, yaml, cv2, os, shutil, sys
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from tqdm import trange
from PIL import Image
from ultralytics.nn.tasks import attempt_load_weights
from ultralytics.utils.torch_utils import intersect_dicts
from ultralytics.utils.ops import xywh2xyxy, non_max_suppression
from pytorch_grad_cam import GradCAMPlusPlus, GradCAM, XGradCAM, EigenCAM, HiResCAM, LayerCAM, RandomCAM, EigenGradCAM
from pytorch_grad_cam.utils.image import show_cam_on_image, scale_cam_image
from pytorch_grad_cam.activations_and_gradients import ActivationsAndGradients
def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
# Resize and pad image while meeting stride-multiple constraints
shape = im.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better val mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return im, ratio, (dw, dh) | null |
166,070 | import warnings
import torch, yaml, cv2, os, shutil, sys
import numpy as np
import matplotlib.pyplot as plt
from tqdm import trange
from PIL import Image
from ultralytics.nn.tasks import attempt_load_weights
from ultralytics.utils.torch_utils import intersect_dicts
from ultralytics.utils.ops import xywh2xyxy, non_max_suppression
from pytorch_grad_cam import GradCAMPlusPlus, GradCAM, XGradCAM, EigenCAM, HiResCAM, LayerCAM, RandomCAM, EigenGradCAM
from pytorch_grad_cam.utils.image import show_cam_on_image, scale_cam_image
from pytorch_grad_cam.activations_and_gradients import ActivationsAndGradients
def get_params():
params = {
'weight': 'runs/train/exp2/weights/best.pt', # 现在只需要指定权重即可,不需要指定cfg
'device': 'cuda:0',
'method': 'HiResCAM', # GradCAMPlusPlus, GradCAM, XGradCAM, EigenCAM, HiResCAM, LayerCAM, RandomCAM, EigenGradCAM
'layer': [10, 12, 14, 16, 18],
'backward_type': 'class', # class, box, all
'conf_threshold': 0.2, # 0.2
'ratio': 0.02, # 0.02-0.1
'show_box': False,
'renormalize': True
}
return params | null |
166,071 | import warnings
import torch, yaml, cv2, os, shutil
import numpy as np
import matplotlib.pyplot as plt
from tqdm import trange
from PIL import Image
from models.yolo import Model
from utils.augmentations import letterbox
from utils.general import xywh2xyxy, non_max_suppression
from models.experimental import attempt_load
from pytorch_grad_cam import GradCAMPlusPlus, GradCAM, XGradCAM, EigenCAM, HiResCAM, LayerCAM, RandomCAM, EigenGradCAM
from pytorch_grad_cam.utils.image import show_cam_on_image, scale_cam_image
from pytorch_grad_cam.activations_and_gradients import ActivationsAndGradients
def get_params():
params = {
'weight': 'yolov9-c-converted.pt',
'device': 'cuda:0',
'method': 'XGradCAM', # GradCAMPlusPlus, GradCAM, XGradCAM, EigenCAM, HiResCAM, LayerCAM, RandomCAM, EigenGradCAM
'layer': [11, 14, 17],
'backward_type': 'all', # class, box, all
'conf_threshold': 0.2, # 0.6
'ratio': 0.02, # 0.02-0.1
'show_box': True,
'renormalize': False
}
return params | null |
166,072 | import warnings
import torch, yaml, cv2, os, shutil
import numpy as np
import matplotlib.pyplot as plt
from tqdm import trange
from PIL import Image
from models.yolo import Model
from utils.general import intersect_dicts
from utils.augmentations import letterbox
from utils.general import xywh2xyxy, non_max_suppression
from models.experimental import attempt_load
from pytorch_grad_cam import GradCAMPlusPlus, GradCAM, XGradCAM, EigenCAM, HiResCAM, LayerCAM, RandomCAM, EigenGradCAM
from pytorch_grad_cam.utils.image import show_cam_on_image, scale_cam_image
from pytorch_grad_cam.activations_and_gradients import ActivationsAndGradients
def get_params():
params = {
'weight': 'runs/train/yolov5n_lamp_exp3/weights/best.pt',
'device': 'cuda:0',
'method': 'XGradCAM', # GradCAMPlusPlus, GradCAM, XGradCAM, EigenCAM, HiResCAM, LayerCAM, RandomCAM, EigenGradCAM
'layer': [16, 19, 21],
'backward_type': 'all', # class, box, all
'conf_threshold': 0.2, # 0.6
'ratio': 0.02, # 0.02-0.1
'show_box': False,
'renormalize': True
}
return params | null |
166,073 | import os
import glob
import json
import shutil
import numpy as np
import xml.etree.ElementTree as ET
def find_classes(path):
classes = []
for i in os.listdir(path):
try:
in_file = open(os.path.join(path, i), encoding='utf-8')
tree=ET.parse(in_file)
root = tree.getroot()
for obj in root.iter('object'):
difficult = 0
if obj.find('difficult')!=None:
difficult = obj.find('difficult').text
cls = obj.find('name').text
if cls not in classes:
classes.append(cls)
except Exception as e:
print(os.path.join(path, i), e)
return classes | null |
166,074 | import os
import glob
import json
import shutil
import numpy as np
import xml.etree.ElementTree as ET
START_BOUNDING_BOX_ID = 1
def get(root, name):
return root.findall(name)
def get_and_check(root, name, length):
vars = root.findall(name)
if len(vars) == 0:
raise NotImplementedError('Can not find %s in %s.'%(name, root.tag))
if length > 0 and len(vars) != length:
raise NotImplementedError('The size of %s is supposed to be %d, but is %d.'%(name, length, len(vars)))
if length == 1:
vars = vars[0]
return vars
def convert(xml_list, json_file):
json_dict = {"info":['none'], "license":['none'], "images": [], "annotations": [], "categories": []}
categories = pre_define_categories.copy()
bnd_id = START_BOUNDING_BOX_ID
all_categories = {}
for index, line in enumerate(xml_list):
# print("Processing %s"%(line))
xml_f = line
tree = ET.parse(xml_f)
root = tree.getroot()
filename = os.path.basename(xml_f)[:-4] + f".{postfix}"
image_id = index
size = get_and_check(root, 'size', 1)
width = int(get_and_check(size, 'width', 1).text)
height = int(get_and_check(size, 'height', 1).text)
image = {'file_name': filename, 'height': height, 'width': width, 'id':image_id}
json_dict['images'].append(image)
## Cruuently we do not support segmentation
# segmented = get_and_check(root, 'segmented', 1).text
# assert segmented == '0'
for obj in get(root, 'object'):
category = get_and_check(obj, 'name', 1).text
if category in all_categories:
all_categories[category] += 1
else:
all_categories[category] = 1
if category not in categories:
if only_care_pre_define_categories:
continue
new_id = len(categories) + 1
print("[warning] category '{}' not in 'pre_define_categories'({}), create new id: {} automatically".format(category, pre_define_categories, new_id))
categories[category] = new_id
category_id = categories[category]
bndbox = get_and_check(obj, 'bndbox', 1)
xmin = int(float(get_and_check(bndbox, 'xmin', 1).text))
ymin = int(float(get_and_check(bndbox, 'ymin', 1).text))
xmax = int(float(get_and_check(bndbox, 'xmax', 1).text))
ymax = int(float(get_and_check(bndbox, 'ymax', 1).text))
# if (xmax > xmin) or (ymax > ymin):
# continue
# assert(xmax > xmin), "xmax <= xmin, {}".format(line)
# assert(ymax > ymin), "ymax <= ymin, {}".format(line)
o_width = abs(xmax - xmin)
o_height = abs(ymax - ymin)
ann = {'area': o_width*o_height, 'iscrowd': 0, 'image_id':
image_id, 'bbox':[xmin, ymin, o_width, o_height],
'category_id': category_id, 'id': bnd_id, 'ignore': 0,
'segmentation': []}
json_dict['annotations'].append(ann)
bnd_id = bnd_id + 1
for cate, cid in categories.items():
cat = {'supercategory': 'none', 'id': cid, 'name': cate}
json_dict['categories'].append(cat)
json_fp = open(json_file, 'w')
json_str = json.dumps(json_dict)
json_fp.write(json_str)
json_fp.close()
print("------------create {} done--------------".format(json_file))
print("find {} categories: {} -->>> your pre_define_categories {}: {}".format(len(all_categories), all_categories.keys(), len(pre_define_categories), pre_define_categories.keys()))
print("category: id --> {}".format(categories))
print(categories.keys())
print(categories.values()) | null |
166,075 | import numpy as np
import torch
from torch import nn
from torch.nn import init
The provided code snippet includes necessary dependencies for implementing the `autopad` function. Write a Python function `def autopad(k, p=None, d=1)` to solve the following problem:
Pad to 'same' shape outputs.
Here is the function:
def autopad(k, p=None, d=1): # kernel, padding, dilation
"""Pad to 'same' shape outputs."""
if d > 1:
k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k] # actual kernel-size
if p is None:
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
return p | Pad to 'same' shape outputs. |
166,076 | import numpy as np
import torch
from torch import nn
from torch.nn import init
def spatial_shift1(x):
b, w, h, c = x.size()
x[:, 1:, :, :c // 4] = x[:, :w - 1, :, :c // 4]
x[:, :w - 1, :, c // 4:c // 2] = x[:, 1:, :, c // 4:c // 2]
x[:, :, 1:, c // 2:c * 3 // 4] = x[:, :, :h - 1, c // 2:c * 3 // 4]
x[:, :, :h - 1, 3 * c // 4:] = x[:, :, 1:, 3 * c // 4:]
return x | null |
166,077 | import numpy as np
import torch
from torch import nn
from torch.nn import init
def spatial_shift2(x):
b, w, h, c = x.size()
x[:, :, 1:, :c // 4] = x[:, :, :h - 1, :c // 4]
x[:, :, :h - 1, c // 4:c // 2] = x[:, :, 1:, c // 4:c // 2]
x[:, 1:, :, c // 2:c * 3 // 4] = x[:, :w - 1, :, c // 2:c * 3 // 4]
x[:, :w - 1, :, 3 * c // 4:] = x[:, 1:, :, 3 * c // 4:]
return x | null |
166,078 | from typing import Tuple, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from torch import Tensor, LongTensor
def _grid2seq(x:Tensor, region_size:Tuple[int], num_heads:int):
"""
Args:
x: BCHW tensor
region size: int
num_heads: number of attention heads
Return:
out: rearranged x, has a shape of (bs, nhead, nregion, reg_size, head_dim)
region_h, region_w: number of regions per col/row
"""
B, C, H, W = x.size()
region_h, region_w = H//region_size[0], W//region_size[1]
x = x.view(B, num_heads, C//num_heads, region_h, region_size[0], region_w, region_size[1])
x = torch.einsum('bmdhpwq->bmhwpqd', x).flatten(2, 3).flatten(-3, -2) # (bs, nhead, nregion, reg_size, head_dim)
return x, region_h, region_w
def _seq2grid(x:Tensor, region_h:int, region_w:int, region_size:Tuple[int]):
"""
Args:
x: (bs, nhead, nregion, reg_size^2, head_dim)
Return:
x: (bs, C, H, W)
"""
bs, nhead, nregion, reg_size_square, head_dim = x.size()
x = x.view(bs, nhead, region_h, region_w, region_size[0], region_size[1], head_dim)
x = torch.einsum('bmhwpqd->bmdhpwq', x).reshape(bs, nhead*head_dim,
region_h*region_size[0], region_w*region_size[1])
return x
The provided code snippet includes necessary dependencies for implementing the `regional_routing_attention_torch` function. Write a Python function `def regional_routing_attention_torch( query:Tensor, key:Tensor, value:Tensor, scale:float, region_graph:LongTensor, region_size:Tuple[int], kv_region_size:Optional[Tuple[int]]=None, auto_pad=True)->Tensor` to solve the following problem:
Args: query, key, value: (B, C, H, W) tensor scale: the scale/temperature for dot product attention region_graph: (B, nhead, h_q*w_q, topk) tensor, topk <= h_k*w_k region_size: region/window size for queries, (rh, rw) key_region_size: optional, if None, key_region_size=region_size auto_pad: required to be true if the input sizes are not divisible by the region_size Return: output: (B, C, H, W) tensor attn: (bs, nhead, q_nregion, reg_size, topk*kv_region_size) attention matrix
Here is the function:
def regional_routing_attention_torch(
query:Tensor, key:Tensor, value:Tensor, scale:float,
region_graph:LongTensor, region_size:Tuple[int],
kv_region_size:Optional[Tuple[int]]=None,
auto_pad=True)->Tensor:
"""
Args:
query, key, value: (B, C, H, W) tensor
scale: the scale/temperature for dot product attention
region_graph: (B, nhead, h_q*w_q, topk) tensor, topk <= h_k*w_k
region_size: region/window size for queries, (rh, rw)
key_region_size: optional, if None, key_region_size=region_size
auto_pad: required to be true if the input sizes are not divisible by the region_size
Return:
output: (B, C, H, W) tensor
attn: (bs, nhead, q_nregion, reg_size, topk*kv_region_size) attention matrix
"""
kv_region_size = kv_region_size or region_size
bs, nhead, q_nregion, topk = region_graph.size()
# Auto pad to deal with any input size
q_pad_b, q_pad_r, kv_pad_b, kv_pad_r = 0, 0, 0, 0
if auto_pad:
_, _, Hq, Wq = query.size()
q_pad_b = (region_size[0] - Hq % region_size[0]) % region_size[0]
q_pad_r = (region_size[1] - Wq % region_size[1]) % region_size[1]
if (q_pad_b > 0 or q_pad_r > 0):
query = F.pad(query, (0, q_pad_r, 0, q_pad_b)) # zero padding
_, _, Hk, Wk = key.size()
kv_pad_b = (kv_region_size[0] - Hk % kv_region_size[0]) % kv_region_size[0]
kv_pad_r = (kv_region_size[1] - Wk % kv_region_size[1]) % kv_region_size[1]
if (kv_pad_r > 0 or kv_pad_b > 0):
key = F.pad(key, (0, kv_pad_r, 0, kv_pad_b)) # zero padding
value = F.pad(value, (0, kv_pad_r, 0, kv_pad_b)) # zero padding
# to sequence format, i.e. (bs, nhead, nregion, reg_size, head_dim)
query, q_region_h, q_region_w = _grid2seq(query, region_size=region_size, num_heads=nhead)
key, _, _ = _grid2seq(key, region_size=kv_region_size, num_heads=nhead)
value, _, _ = _grid2seq(value, region_size=kv_region_size, num_heads=nhead)
# gather key and values.
# TODO: is seperate gathering slower than fused one (our old version) ?
# torch.gather does not support broadcasting, hence we do it manually
bs, nhead, kv_nregion, kv_region_size, head_dim = key.size()
broadcasted_region_graph = region_graph.view(bs, nhead, q_nregion, topk, 1, 1).\
expand(-1, -1, -1, -1, kv_region_size, head_dim)
key_g = torch.gather(key.view(bs, nhead, 1, kv_nregion, kv_region_size, head_dim).\
expand(-1, -1, query.size(2), -1, -1, -1), dim=3,
index=broadcasted_region_graph) # (bs, nhead, q_nregion, topk, kv_region_size, head_dim)
value_g = torch.gather(value.view(bs, nhead, 1, kv_nregion, kv_region_size, head_dim).\
expand(-1, -1, query.size(2), -1, -1, -1), dim=3,
index=broadcasted_region_graph) # (bs, nhead, q_nregion, topk, kv_region_size, head_dim)
# token-to-token attention
# (bs, nhead, q_nregion, reg_size, head_dim) @ (bs, nhead, q_nregion, head_dim, topk*kv_region_size)
# -> (bs, nhead, q_nregion, reg_size, topk*kv_region_size)
# TODO: mask padding region
attn = (query * scale) @ key_g.flatten(-3, -2).transpose(-1, -2)
attn = torch.softmax(attn, dim=-1)
# (bs, nhead, q_nregion, reg_size, topk*kv_region_size) @ (bs, nhead, q_nregion, topk*kv_region_size, head_dim)
# -> (bs, nhead, q_nregion, reg_size, head_dim)
output = attn @ value_g.flatten(-3, -2)
# to BCHW format
output = _seq2grid(output, region_h=q_region_h, region_w=q_region_w, region_size=region_size)
# remove paddings if needed
if auto_pad and (q_pad_b > 0 or q_pad_r > 0):
output = output[:, :, :Hq, :Wq]
return output, attn | Args: query, key, value: (B, C, H, W) tensor scale: the scale/temperature for dot product attention region_graph: (B, nhead, h_q*w_q, topk) tensor, topk <= h_k*w_k region_size: region/window size for queries, (rh, rw) key_region_size: optional, if None, key_region_size=region_size auto_pad: required to be true if the input sizes are not divisible by the region_size Return: output: (B, C, H, W) tensor attn: (bs, nhead, q_nregion, reg_size, topk*kv_region_size) attention matrix |
166,079 | import torch, time, math, thop, tqdm, torchvision
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.conv import _ConvNd
from torch.nn.modules.utils import _pair
from torch.nn.parameter import Parameter
from prettytable import PrettyTable
from ops_dcnv3.modules import DCNv3
def time_synchronized():
# pytorch-accurate time
if torch.cuda.is_available():
torch.cuda.synchronize()
return time.time() | null |
166,080 | import torch, time, math, thop, tqdm, torchvision
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.conv import _ConvNd
from torch.nn.modules.utils import _pair
from torch.nn.parameter import Parameter
from prettytable import PrettyTable
from ops_dcnv3.modules import DCNv3
def autopad(k, p=None, d=1): # kernel, padding, dilation
# Pad to 'same' shape outputs
if d > 1:
k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k] # actual kernel-size
if p is None:
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
return p | null |
166,081 | import warnings
import argparse
import logging
import math
import os
import random
import time
import sys
from copy import deepcopy
from pathlib import Path
from threading import Thread
import numpy as np
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torch.utils.data
import yaml
from torch.cuda import amp
from torch.nn.parallel import DistributedDataParallel as DDP
from tqdm import tqdm
from utils.torch_utils import select_device
from models.common import DetectMultiBackend
def get_weight_size(path):
stats = os.stat(path)
return f'{stats.st_size / 1024 / 1024:.1f}' | null |
166,082 | import warnings
import argparse
import logging
import math
import os
import random
import time
import sys
from copy import deepcopy
from pathlib import Path
from threading import Thread
import numpy as np
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torch.utils.data
import yaml
from torch.cuda import amp
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from models.experimental import attempt_load
from models.yolo import Model
from utils.torch_utils import select_device
def get_weight_size(path):
stats = os.stat(path)
return f'{stats.st_size / 1024 / 1024:.1f}' | null |
166,083 | import os, cv2, tqdm, shutil
import numpy as np
def xywh2xyxy(box):
box[:, 0] = box[:, 0] - box[:, 2] / 2
box[:, 1] = box[:, 1] - box[:, 3] / 2
box[:, 2] = box[:, 0] + box[:, 2]
box[:, 3] = box[:, 1] + box[:, 3]
return box | null |
166,084 | import os, cv2, tqdm, shutil
import numpy as np
def iou(box1, box2):
x11, y11, x12, y12 = np.split(box1, 4, axis=1)
x21, y21, x22, y22 = np.split(box2, 4, axis=1)
xa = np.maximum(x11, np.transpose(x21))
xb = np.minimum(x12, np.transpose(x22))
ya = np.maximum(y11, np.transpose(y21))
yb = np.minimum(y12, np.transpose(y22))
area_inter = np.maximum(0, (xb - xa + 1)) * np.maximum(0, (yb - ya + 1))
area_1 = (x12 - x11 + 1) * (y12 - y11 + 1)
area_2 = (x22 - x21 + 1) * (y22 - y21 + 1)
area_union = area_1 + np.transpose(area_2) - area_inter
iou = area_inter / area_union
return iou | null |
166,085 | import os, cv2, tqdm, shutil
import numpy as np
def draw_box(img, box, color):
cv2.rectangle(img, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), color, thickness=2)
return img | null |
166,086 | import pkg_resources as pkg
def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False):
def set_seeds(seed=0, deterministic=False):
# Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # for Multi-GPU, exception safe
# torch.backends.cudnn.benchmark = True # AutoBatch problem https://github.com/ultralytics/yolov5/issues/9287
if deterministic and check_version(torch.__version__, '1.12.0'): # https://github.com/ultralytics/yolov5/pull/8213
torch.use_deterministic_algorithms(True)
torch.backends.cudnn.deterministic = True
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
os.environ['PYTHONHASHSEED'] = str(seed) | null |
166,087 | import cv2
import numpy as np
import matplotlib.pylab as plt
from segment_anything import SamPredictor, sam_model_registry
def show_mask(mask, ax, random_color=False):
if random_color:
color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)
else:
color = np.array([30/255, 144/255, 255/255, 0.6])
h, w = mask.shape[-2:]
mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
ax.imshow(mask_image) | null |
166,088 | import cv2
import numpy as np
import matplotlib.pylab as plt
from segment_anything import SamPredictor, sam_model_registry
def show_points(coords, labels, ax, marker_size=375):
pos_points = coords[labels==1]
neg_points = coords[labels==0]
ax.scatter(pos_points[:, 0], pos_points[:, 1], color='green', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)
ax.scatter(neg_points[:, 0], neg_points[:, 1], color='red', marker='*', s=marker_size, edgecolor='white', linewidth=1.25) | null |
166,089 | import cv2
import numpy as np
import matplotlib.pylab as plt
from segment_anything import SamPredictor, sam_model_registry
def show_box(box, ax):
x0, y0 = box[0], box[1]
w, h = box[2] - box[0], box[3] - box[1]
ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0,0,0,0), lw=2)) | null |
166,090 | import os
import cv2
import json
from tqdm import tqdm
from sklearn.model_selection import train_test_split
import argparse
def yolo2coco(arg):
root_path = arg.root_dir
print("Loading data from ",root_path)
assert os.path.exists(root_path)
originLabelsDir = os.path.join(root_path, 'labels/test')
originImagesDir = os.path.join(root_path, 'images/test')
with open(os.path.join(root_path, 'classes.txt')) as f:
classes = list(map(lambda x:x.strip(), f.readlines()))
# images dir name
indexes = os.listdir(originImagesDir)
dataset = {'categories': [], 'annotations': [], 'images': []}
for i, cls in enumerate(classes, 0):
dataset['categories'].append({'id': i, 'name': cls, 'supercategory': 'mark'})
# 标注的id
ann_id_cnt = 0
for k, index in enumerate(tqdm(indexes)):
# 支持 png jpg 格式的图片。
txtFile = index.replace('images','txt').replace('.jpg','.txt').replace('.png','.txt')
# 读取图像的宽和高
im = cv2.imread(os.path.join(originImagesDir, index))
height, width, _ = im.shape
# 添加图像的信息
if not os.path.exists(os.path.join(originLabelsDir, txtFile)):
# 如没标签,跳过,只保留图片信息。
continue
dataset['images'].append({'file_name': index,
'id': int(index[:-4]) if index[:-4].isnumeric() else index[:-4],
'width': width,
'height': height})
with open(os.path.join(originLabelsDir, txtFile), 'r') as fr:
labelList = fr.readlines()
for label in labelList:
label = label.strip().split()
x = float(label[1])
y = float(label[2])
w = float(label[3])
h = float(label[4])
# convert x,y,w,h to x1,y1,x2,y2
H, W, _ = im.shape
x1 = (x - w / 2) * W
y1 = (y - h / 2) * H
x2 = (x + w / 2) * W
y2 = (y + h / 2) * H
# 标签序号从0开始计算, coco2017数据集标号混乱,不管它了。
cls_id = int(label[0])
width = max(0, x2 - x1)
height = max(0, y2 - y1)
dataset['annotations'].append({
'area': width * height,
'bbox': [x1, y1, width, height],
'category_id': cls_id,
'id': ann_id_cnt,
'image_id': int(index[:-4]) if index[:-4].isnumeric() else index[:-4],
'iscrowd': 0,
# mask, 矩形是从左上角点按顺时针的四个顶点
'segmentation': [[x1, y1, x2, y1, x2, y2, x1, y2]]
})
ann_id_cnt += 1
# 保存结果
with open(arg.save_path, 'w') as f:
json.dump(dataset, f)
print('Save annotation to {}'.format(arg.save_path)) | null |
166,091 |
The provided code snippet includes necessary dependencies for implementing the `feature_visualization` function. Write a Python function `def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp'))` to solve the following problem:
x: Features to be visualized module_type: Module type stage: Module stage within model n: Maximum number of feature maps to plot save_dir: Directory to save results
Here is the function:
def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):
"""
x: Features to be visualized
module_type: Module type
stage: Module stage within model
n: Maximum number of feature maps to plot
save_dir: Directory to save results
"""
if 'Detect' not in module_type:
batch, channels, height, width = x.shape # batch, channels, height, width
if height > 1 and width > 1:
f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename
blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels
n = min(n, channels) # number of plots
fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols
ax = ax.ravel()
plt.subplots_adjust(wspace=0.05, hspace=0.05)
for i in range(n):
block = blocks[i].squeeze().detach().numpy()
block = (block - np.min(block)) / (np.max(block) - np.min(block))
temp = np.array(block * 255.0, dtype=np.uint8)
temp = cv2.applyColorMap(temp, cv2.COLORMAP_JET)
ax[i].imshow(temp, cmap=plt.cm.jet) # cmap='gray'
ax[i].axis('off')
LOGGER.info(f'Saving {f}... ({n}/{channels})')
plt.savefig(f, dpi=300, bbox_inches='tight')
plt.close()
np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save | x: Features to be visualized module_type: Module type stage: Module stage within model n: Maximum number of feature maps to plot save_dir: Directory to save results |
166,092 | import datetime
import os
from typing import List
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.components.trainer.executor import Executor
from tfx.dsl.components.base import executor_spec
from tfx.dsl.components.common import resolver
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.airflow.airflow_dag_runner import AirflowDagRunner
from tfx.orchestration.airflow.airflow_dag_runner import AirflowPipelineConfig
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
class Executor(GenericExecutor):
"""Local estimator based trainer executor used by the TFX Trainer component.
How to create a trainer callback function to be used by this Trainer executor:
An estimator can be executed by TFX by first creating a trainer_fn callback
method that returns an estimator and some additional parameters, similar to
https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py#L285.
This becomes the basis of the new Executor for Trainer. This Executor will
then train and evaluate this estimator using the
tf.estimator.train_and_evaluate API to train locally.
"""
def Do(self, input_dict: Dict[str, List[types.Artifact]],
output_dict: Dict[str, List[types.Artifact]],
exec_properties: Dict[str, Any]) -> None:
"""Uses a user-supplied tf.estimator to train a TensorFlow model locally.
The Trainer Executor invokes a training_fn callback function provided by
the user via the module_file parameter. With the tf.estimator returned by
this function, the Trainer Executor then builds a TensorFlow model using the
user-provided tf.estimator.
Args:
input_dict: Input dict from input key to a list of ML-Metadata Artifacts.
- examples: Examples used for training, must include 'train' and 'eval'
if custom splits is not specified in train_args and eval_args.
- transform_graph: Optional input transform graph.
- schema: Schema of the data.
output_dict: Output dict from output key to a list of Artifacts.
- model: Exported model.
- model_run: Model training related outputs (e.g., Tensorboard logs)
exec_properties: A dict of execution properties.
- train_args: JSON string of trainer_pb2.TrainArgs instance, providing
args for training.
- eval_args: JSON string of trainer_pb2.EvalArgs instance, providing
args for eval.
- module_file: Python module file containing UDF model definition.
Exactly one of `module_file`, `module_path` and `trainer_fn` should
be passed.
- module_path: Python module path containing UDF model definition.
Exactly one of `module_file`, `module_path` and `trainer_fn` should
be passed.
- trainer_fn: Python module path to the trainer function.
Exactly one of `module_file`, `module_path` and `trainer_fn` should
be passed.
- warm_starting: Whether or not we need to do warm starting.
- warm_start_from: Optional. If warm_starting is True, this is the
directory to find previous model to warm start on.
- custom_config: Optional. JSON-serialized dict of additional parameters
to pass to trainer function.
Returns:
None
Raises:
ValueError: When not exactly one of `module_file`, `module_path` and
`trainer_fn` are present in `exec_properties`.
"""
self._log_startup(input_dict, output_dict, exec_properties)
fn_args = self._GetFnArgs(input_dict, output_dict, exec_properties)
trainer_fn = udf_utils.get_fn(exec_properties, 'trainer_fn')
schema = io_utils.parse_pbtxt_file(fn_args.schema_file, schema_pb2.Schema())
# TODO(b/160795287): Deprecate estimator based executor.
# Provide user with a modified fn_args, with model_run given as
# the working directory. Executor will then copy user models to
# model artifact directory.
serving_dest = fn_args.serving_model_dir
eval_dest = fn_args.eval_model_dir
working_dir = fn_args.model_run_dir
fn_args.serving_model_dir = path_utils.serving_model_dir(working_dir)
fn_args.eval_model_dir = path_utils.eval_model_dir(working_dir)
training_spec = trainer_fn(fn_args, schema)
# Train the model
absl.logging.info('Training model.')
tf_estimator.train_and_evaluate(training_spec['estimator'],
training_spec['train_spec'],
training_spec['eval_spec'])
absl.logging.info(
'Training complete. Model written to %s. ModelRun written to %s',
fn_args.serving_model_dir, fn_args.model_run_dir)
# Export an eval savedmodel for TFMA. If distributed training, it must only
# be written by the chief worker, as would be done for serving savedmodel.
if _is_chief():
absl.logging.info('Exporting eval_savedmodel for TFMA.')
tfma.export.export_eval_savedmodel(
estimator=training_spec['estimator'],
export_dir_base=fn_args.eval_model_dir,
eval_input_receiver_fn=training_spec['eval_input_receiver_fn'])
absl.logging.info('Exported eval_savedmodel to %s.',
fn_args.eval_model_dir)
# TODO(b/160795287): Deprecate estimator based executor.
# Copy serving and eval model from model_run to model artifact directory.
serving_source = path_utils.serving_model_path(fn_args.model_run_dir)
io_utils.copy_dir(serving_source, serving_dest)
absl.logging.info('Serving model copied to: %s.', serving_dest)
eval_source = path_utils.eval_model_path(fn_args.model_run_dir)
io_utils.copy_dir(eval_source, eval_dest)
absl.logging.info('Eval model copied to: %s.', eval_dest)
else:
absl.logging.info(
'Model export is skipped because this is not the chief worker.')
class Model(_TfxArtifact):
"""Artifact that contains the actual persisted model.
Training components stores the trained model like a saved model in this
artifact. A `Model` artifact contains serialization of the trained model in
one or more formats, each suitable for different usage (e.g. serving,
evaluation), and serving environments.
* File structure:
- `{uri}/`
- `Format-Serving/`: Model exported for serving.
- `saved_model.pb`
- Other actual model files.
- `Format-TFMA/`: Model exported for evaluation.
- `saved_model.pb`
- Other actual model files.
* Commonly used custom properties of the Model artifact:
"""
TYPE_NAME = 'Model'
TYPE_ANNOTATION = SystemModel
class ModelBlessing(_TfxArtifact):
"""Artifact that contains the evaluation of a trained model.
This artifact is usually used with
Conditional when determining
whether to push this model on service or not.
```python
# Run pusher if evaluator has blessed the model.
with tfx.dsl.Cond(evaluator.outputs['blessing'].future()
[0].custom_property('blessed') == 1):
pusher = Pusher(...)
```
* File structure:
- `{uri}/`
- `BLESSED`: if the evaluator has blessed the model.
- `NOT_BLESSED`: if the evaluator has not blessed the model.
- See tfx/components/evaluator/executor.py for how to write
ModelBlessing.
* Commonly used custom properties of the ModelBlessing artifact:
- `blessed`: int value that represents whether the evaluator has blessed its
model or not.
"""
TYPE_NAME = 'ModelBlessing'
The provided code snippet includes necessary dependencies for implementing the `_create_pipeline` function. Write a Python function `def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str, module_file: str, serving_model_dir: str, metadata_path: str, beam_pipeline_args: List[str]) -> pipeline.Pipeline` to solve the following problem:
Implements the chicago taxi pipeline with TFX.
Here is the function:
def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str,
module_file: str, serving_model_dir: str,
metadata_path: str,
beam_pipeline_args: List[str]) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
# Parametrize data root so it can be replaced on runtime. See the
# "Passing Parameters when triggering dags" section of
# https://airflow.apache.org/docs/apache-airflow/stable/dag-run.html
# for more details.
data_root_runtime = data_types.RuntimeParameter(
'data_root', ptype=str, default=data_root)
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input_base=data_root_runtime)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that implements a model.
trainer = Trainer(
module_file=module_file,
custom_executor_spec=executor_spec.ExecutorClassSpec(Executor),
transformed_examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
# Get the latest blessed model for model validation.
model_resolver = resolver.Resolver(
strategy_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(
type=ModelBlessing)).with_id('latest_blessed_model_resolver')
# Uses TFMA to compute a evaluation statistics over features of a model and
# perform quality validation of a candidate model (compared to a baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(signature_name='eval')],
slicing_specs=[
tfma.SlicingSpec(),
tfma.SlicingSpec(feature_keys=['trip_start_hour'])
],
metrics_specs=[
tfma.MetricsSpec(
thresholds={
'accuracy':
tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))
})
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to a file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen, statistics_gen, schema_gen, example_validator, transform,
trainer, model_resolver, evaluator, pusher
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
beam_pipeline_args=beam_pipeline_args) | Implements the chicago taxi pipeline with TFX. |
166,093 | import argparse
import base64
import csv
import json
import os
import subprocess
import tempfile
from typing import List
from absl import app
from absl.flags import argparse_flags
import requests
import tensorflow_data_validation as tfdv
from tensorflow_transform import coders as tft_coders
from tensorflow_transform.tf_metadata import schema_utils
from google.protobuf import text_format
from tensorflow.python.lib.io import file_io
from tensorflow_metadata.proto.v0 import schema_pb2
The provided code snippet includes necessary dependencies for implementing the `_read_schema` function. Write a Python function `def _read_schema(path)` to solve the following problem:
Reads a schema from the provided location. Args: path: The location of the file holding a serialized Schema proto. Returns: An instance of Schema or None if the input argument is None
Here is the function:
def _read_schema(path):
"""Reads a schema from the provided location.
Args:
path: The location of the file holding a serialized Schema proto.
Returns:
An instance of Schema or None if the input argument is None
"""
result = schema_pb2.Schema()
contents = file_io.read_file_to_string(path)
text_format.Parse(contents, result)
return result | Reads a schema from the provided location. Args: path: The location of the file holding a serialized Schema proto. Returns: An instance of Schema or None if the input argument is None |
166,094 | import argparse
import base64
import csv
import json
import os
import subprocess
import tempfile
from typing import List
from absl import app
from absl.flags import argparse_flags
import requests
import tensorflow_data_validation as tfdv
from tensorflow_transform import coders as tft_coders
from tensorflow_transform.tf_metadata import schema_utils
from google.protobuf import text_format
from tensorflow.python.lib.io import file_io
from tensorflow_metadata.proto.v0 import schema_pb2
_LABEL_KEY = 'tips'
def _make_proto_coder(schema):
raw_feature_spec = _get_raw_feature_spec(schema)
raw_schema = schema_utils.schema_from_feature_spec(raw_feature_spec)
return tft_coders.ExampleProtoCoder(raw_schema)
def _do_local_inference(host, port, serialized_examples):
"""Performs inference on a model hosted by the host:port server."""
json_examples = []
for serialized_example in serialized_examples:
# The encoding follows the guidelines in:
# https://www.tensorflow.org/tfx/serving/api_rest
example_bytes = base64.b64encode(serialized_example).decode('utf-8')
predict_request = '{ "b64": "%s" }' % example_bytes
json_examples.append(predict_request)
json_request = '{ "instances": [' + ','.join(map(str, json_examples)) + ']}'
server_url = 'http://' + host + ':' + port + '/v1/models/chicago_taxi:predict'
response = requests.post(
server_url, data=json_request, timeout=_LOCAL_INFERENCE_TIMEOUT_SECONDS)
response.raise_for_status()
prediction = response.json()
print(json.dumps(prediction, indent=4))
def _do_aiplatform_inference(model, version, serialized_examples):
"""Performs inference on the model:version in AI Platform."""
working_dir = tempfile.mkdtemp()
instances_file = os.path.join(working_dir, 'test.json')
json_examples = []
for serialized_example in serialized_examples:
# The encoding follows the example in:
# https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/quests/tpu/invoke_model.py
json_examples.append('{ "inputs": { "b64": "%s" } }' %
base64.b64encode(serialized_example).decode('utf-8'))
file_io.write_string_to_file(instances_file, '\n'.join(json_examples))
gcloud_command = [
'gcloud', 'ai-platform', 'predict', '--model', model, '--version',
version, '--json-instances', instances_file
]
print(subprocess.check_output(gcloud_command))
The provided code snippet includes necessary dependencies for implementing the `_do_inference` function. Write a Python function `def _do_inference(model_handle, examples_file, num_examples, schema)` to solve the following problem:
Sends requests to the model and prints the results. Args: model_handle: handle to the model. This can be either "aiplatform:model:version" or "host:port" examples_file: path to csv file containing examples, with the first line assumed to have the column headers num_examples: number of requests to send to the server schema: a Schema describing the input data Returns: Response from model server
Here is the function:
def _do_inference(model_handle, examples_file, num_examples, schema):
"""Sends requests to the model and prints the results.
Args:
model_handle: handle to the model. This can be either
"aiplatform:model:version" or "host:port"
examples_file: path to csv file containing examples, with the first line
assumed to have the column headers
num_examples: number of requests to send to the server
schema: a Schema describing the input data
Returns:
Response from model server
"""
filtered_features = [
feature for feature in schema.feature if feature.name != _LABEL_KEY
]
del schema.feature[:]
schema.feature.extend(filtered_features)
proto_coder = _make_proto_coder(schema)
csv_reader = csv.DictReader(open(examples_file, 'r'))
dataset_stats = tfdv.generate_statistics_from_csv(examples_file)
serialized_examples = []
for _ in range(num_examples):
one_line = next(csv_reader)
if not one_line:
print('End of example file reached')
break
one_example = {}
for feature in schema.feature:
name = feature.name
feature_stats = tfdv.get_feature_stats(dataset_stats.datasets[0],
tfdv.FeaturePath([name]))
if one_line[name]:
if feature.type == schema_pb2.FLOAT:
one_example[name] = [float(one_line[name])]
elif feature.type == schema_pb2.INT:
one_example[name] = [int(one_line[name])]
elif feature.type == schema_pb2.BYTES:
one_example[name] = [one_line[name].encode('utf8')]
else:
# TF serve does not like missing features, so we'll populate
# the missing features with their mean/mode instead
if feature.type == schema_pb2.FLOAT:
one_example[name] = [feature_stats.num_stats.mean]
elif feature.type == schema_pb2.INT:
one_example[name] = [int(feature_stats.num_stats.mean)]
elif feature.type == schema_pb2.BYTES:
top_values = list(feature_stats.string_stats.top_values)
if top_values:
one_example[name] = [top_values[0].value.encode('utf8')]
else:
one_example[name] = [''.encode('utf8')]
serialized_example = proto_coder.encode(one_example)
serialized_examples.append(serialized_example)
parsed_model_handle = model_handle.split(':')
if parsed_model_handle[0] == 'aiplatform':
_do_aiplatform_inference(
model=parsed_model_handle[1],
version=parsed_model_handle[2],
serialized_examples=serialized_examples)
else:
_do_local_inference(
host=parsed_model_handle[0],
port=parsed_model_handle[1],
serialized_examples=serialized_examples) | Sends requests to the model and prints the results. Args: model_handle: handle to the model. This can be either "aiplatform:model:version" or "host:port" examples_file: path to csv file containing examples, with the first line assumed to have the column headers num_examples: number of requests to send to the server schema: a Schema describing the input data Returns: Response from model server |
166,095 | import argparse
import base64
import csv
import json
import os
import subprocess
import tempfile
from typing import List
from absl import app
from absl.flags import argparse_flags
import requests
import tensorflow_data_validation as tfdv
from tensorflow_transform import coders as tft_coders
from tensorflow_transform.tf_metadata import schema_utils
from google.protobuf import text_format
from tensorflow.python.lib.io import file_io
from tensorflow_metadata.proto.v0 import schema_pb2
The provided code snippet includes necessary dependencies for implementing the `_parse_flags` function. Write a Python function `def _parse_flags(argv: List[str]) -> argparse.Namespace` to solve the following problem:
Command lines flag parsing.
Here is the function:
def _parse_flags(argv: List[str]) -> argparse.Namespace:
"""Command lines flag parsing."""
parser = argparse_flags.ArgumentParser()
parser.add_argument(
'--num_examples',
help=('Number of examples to send to the server.'),
default=1,
type=int)
parser.add_argument(
'--server',
help=('Prediction service host:port or aiplatform:model:version'),
required=True)
parser.add_argument(
'--examples_file',
help=('Path to csv file containing examples.'),
required=True)
parser.add_argument(
'--schema_file', help='File holding the schema for the input data')
return parser.parse_args(argv[1:]) | Command lines flag parsing. |
166,096 | from typing import List
from absl import logging
import tensorflow as tf
import tensorflow_transform as tft
from tfx.components.trainer.fn_args_utils import DataAccessor
from tfx.components.trainer.fn_args_utils import FnArgs
from tfx_bsl.tfxio import dataset_options
_CATEGORICAL_FEATURE_KEYS = [
'trip_start_hour', 'trip_start_day', 'trip_start_month',
'pickup_census_tract', 'dropoff_census_tract', 'pickup_community_area',
'dropoff_community_area'
]
_DENSE_FLOAT_FEATURE_KEYS = ['trip_miles', 'fare', 'trip_seconds']
_FEATURE_BUCKET_COUNT = 10
_BUCKET_FEATURE_KEYS = [
'pickup_latitude', 'pickup_longitude', 'dropoff_latitude',
'dropoff_longitude'
]
_VOCAB_SIZE = 1000
_OOV_SIZE = 10
_VOCAB_FEATURE_KEYS = [
'payment_type',
'company',
]
_LABEL_KEY = 'tips'
_FARE_KEY = 'fare'
def _transformed_name(key):
return key + '_xf'
def _fill_in_missing(x):
"""Replace missing values in a SparseTensor.
Fills in missing values of `x` with '' or 0, and converts to a dense tensor.
Args:
x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1
in the second dimension.
Returns:
A rank 1 tensor where missing values of `x` have been filled in.
"""
if not isinstance(x, tf.sparse.SparseTensor):
return x
default_value = '' if x.dtype == tf.string else 0
return tf.squeeze(
tf.sparse.to_dense(
tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]),
default_value),
axis=1)
The provided code snippet includes necessary dependencies for implementing the `preprocessing_fn` function. Write a Python function `def preprocessing_fn(inputs)` to solve the following problem:
tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations.
Here is the function:
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
for key in _DENSE_FLOAT_FEATURE_KEYS:
# If sparse make it dense, setting nan's to 0 or '', and apply zscore.
outputs[_transformed_name(key)] = tft.scale_to_z_score(
_fill_in_missing(inputs[key]))
for key in _VOCAB_FEATURE_KEYS:
# Build a vocabulary for this feature.
outputs[_transformed_name(key)] = tft.compute_and_apply_vocabulary(
_fill_in_missing(inputs[key]),
top_k=_VOCAB_SIZE,
num_oov_buckets=_OOV_SIZE)
for key in _BUCKET_FEATURE_KEYS:
outputs[_transformed_name(key)] = tft.bucketize(
_fill_in_missing(inputs[key]),
_FEATURE_BUCKET_COUNT)
for key in _CATEGORICAL_FEATURE_KEYS:
outputs[_transformed_name(key)] = _fill_in_missing(inputs[key])
# Was this passenger a big tipper?
taxi_fare = _fill_in_missing(inputs[_FARE_KEY])
tips = _fill_in_missing(inputs[_LABEL_KEY])
outputs[_transformed_name(_LABEL_KEY)] = tf.where(
tf.math.is_nan(taxi_fare),
tf.cast(tf.zeros_like(taxi_fare), tf.int64),
# Test if the tip was > 20% of the fare.
tf.cast(
tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64))
return outputs | tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. |
166,097 | from typing import List
from absl import logging
import tensorflow as tf
import tensorflow_transform as tft
from tfx.components.trainer.fn_args_utils import DataAccessor
from tfx.components.trainer.fn_args_utils import FnArgs
from tfx_bsl.tfxio import dataset_options
def _get_tf_examples_serving_signature(model, tf_transform_output):
"""Returns a serving signature that accepts `tensorflow.Example`."""
# We need to track the layers in the model in order to save it.
# TODO(b/162357359): Revise once the bug is resolved.
model.tft_layer_inference = tf_transform_output.transform_features_layer()
tf.TensorSpec(shape=[None], dtype=tf.string, name='examples')
])
def serve_tf_examples_fn(serialized_tf_example):
"""Returns the output to be used in the serving signature."""
raw_feature_spec = tf_transform_output.raw_feature_spec()
# Remove label feature since these will not be present at serving time.
raw_feature_spec.pop(_LABEL_KEY)
raw_features = tf.io.parse_example(serialized_tf_example, raw_feature_spec)
transformed_features = model.tft_layer_inference(raw_features)
logging.info('serve_transformed_features = %s', transformed_features)
outputs = model(transformed_features)
# TODO(b/154085620): Convert the predicted labels from the model using a
# reverse-lookup (opposite of transform.py).
return {'outputs': outputs}
return serve_tf_examples_fn
def _get_transform_features_signature(model, tf_transform_output):
"""Returns a serving signature that applies tf.Transform to features."""
# We need to track the layers in the model in order to save it.
# TODO(b/162357359): Revise once the bug is resolved.
model.tft_layer_eval = tf_transform_output.transform_features_layer()
tf.TensorSpec(shape=[None], dtype=tf.string, name='examples')
])
def transform_features_fn(serialized_tf_example):
"""Returns the transformed_features to be fed as input to evaluator."""
raw_feature_spec = tf_transform_output.raw_feature_spec()
raw_features = tf.io.parse_example(serialized_tf_example, raw_feature_spec)
transformed_features = model.tft_layer_eval(raw_features)
logging.info('eval_transformed_features = %s', transformed_features)
return transformed_features
return transform_features_fn
def _input_fn(file_pattern: List[str],
data_accessor: DataAccessor,
tf_transform_output: tft.TFTransformOutput,
batch_size: int = 200) -> tf.data.Dataset:
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
data_accessor: DataAccessor for converting input to RecordBatch.
tf_transform_output: A TFTransformOutput.
batch_size: representing the number of consecutive elements of returned
dataset to combine in a single batch
Returns:
A dataset that contains (features, indices) tuple where features is a
dictionary of Tensors, and indices is a single Tensor of label indices.
"""
return data_accessor.tf_dataset_factory(
file_pattern,
dataset_options.TensorFlowDatasetOptions(
batch_size=batch_size, label_key=_transformed_name(_LABEL_KEY)),
tf_transform_output.transformed_metadata.schema).repeat()
def _build_keras_model(hidden_units: List[int] = None) -> tf.keras.Model:
"""Creates a DNN Keras model for classifying taxi data.
Args:
hidden_units: [int], the layer sizes of the DNN (input layer first).
Returns:
A keras Model.
"""
real_valued_columns = [
tf.feature_column.numeric_column(key, shape=())
for key in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS)
]
categorical_columns = [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_VOCAB_SIZE + _OOV_SIZE, default_value=0)
for key in _transformed_names(_VOCAB_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_FEATURE_BUCKET_COUNT, default_value=0)
for key in _transformed_names(_BUCKET_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension
key,
num_buckets=num_buckets,
default_value=0) for key, num_buckets in zip(
_transformed_names(_CATEGORICAL_FEATURE_KEYS),
_MAX_CATEGORICAL_FEATURE_VALUES)
]
indicator_column = [
tf.feature_column.indicator_column(categorical_column)
for categorical_column in categorical_columns
]
model = _wide_and_deep_classifier(
# TODO(b/139668410) replace with premade wide_and_deep keras model
wide_columns=indicator_column,
deep_columns=real_valued_columns,
dnn_hidden_units=hidden_units or [100, 70, 50, 25])
return model
class FnArgs:
"""Args to pass to user defined training/tuning function(s).
Attributes:
working_dir: Working dir.
train_files: A list of patterns for train files.
eval_files: A list of patterns for eval files.
train_steps: Number of train steps.
eval_steps: Number of eval steps.
schema_path: A single uri for schema file. Will be None if not specified.
schema_file: Deprecated, use `schema_path` instead.
transform_graph_path: An optional single uri for transform graph produced by
TFT. Will be None if not specified.
transform_output: Deprecated, use `transform_graph_path` instead.
data_accessor: Contains factories that can create tf.data.Datasets or other
means to access the train/eval data. They provide a uniform way of
accessing data, regardless of how the data is stored on disk.
serving_model_dir: A single uri for the output directory of the serving
model.
eval_model_dir: A single uri for the output directory of the eval model.
Note that this is estimator only, Keras doesn't require it for TFMA.
model_run_dir: A single uri for the output directory of model training
related files.
base_model: An optional base model path that will be used for this training.
hyperparameters: An optional keras_tuner.HyperParameters config.
custom_config: An optional dictionary passed to the component.
"""
working_dir = attr.ib(type=str, default=None)
train_files = attr.ib(type=List[str], default=None)
eval_files = attr.ib(type=List[str], default=None)
train_steps = attr.ib(type=int, default=None)
eval_steps = attr.ib(type=int, default=None)
schema_path = attr.ib(type=str, default=None)
schema_file = attr.ib(type=str, default=None)
transform_graph_path = attr.ib(type=str, default=None)
transform_output = attr.ib(type=str, default=None)
data_accessor = attr.ib(type=DataAccessor, default=None)
serving_model_dir = attr.ib(type=str, default=None)
eval_model_dir = attr.ib(type=str, default=None)
model_run_dir = attr.ib(type=str, default=None)
base_model = attr.ib(type=str, default=None)
hyperparameters = attr.ib(type=Dict[str, Any], default=None)
custom_config = attr.ib(type=Dict[str, Any], default=None)
The provided code snippet includes necessary dependencies for implementing the `run_fn` function. Write a Python function `def run_fn(fn_args: FnArgs)` to solve the following problem:
Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs.
Here is the function:
def run_fn(fn_args: FnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
# Number of nodes in the first layer of the DNN
first_dnn_layer_size = 100
num_dnn_layers = 4
dnn_decay_factor = 0.7
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
train_dataset = _input_fn(fn_args.train_files, fn_args.data_accessor,
tf_transform_output, 40)
eval_dataset = _input_fn(fn_args.eval_files, fn_args.data_accessor,
tf_transform_output, 40)
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
model = _build_keras_model(
# Construct layers sizes with exponetial decay
hidden_units=[
max(2, int(first_dnn_layer_size * dnn_decay_factor**i))
for i in range(num_dnn_layers)
])
# Write logs to path
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=fn_args.model_run_dir, update_freq='epoch')
model.fit(
train_dataset,
steps_per_epoch=fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps,
callbacks=[tensorboard_callback])
signatures = {
'serving_default':
_get_tf_examples_serving_signature(model, tf_transform_output),
'transform_features':
_get_transform_features_signature(model, tf_transform_output),
}
model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures) | Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs. |
166,098 | from typing import List
import tensorflow as tf
from tensorflow import estimator as tf_estimator
import tensorflow_model_analysis as tfma
import tensorflow_transform as tft
from tensorflow_transform.tf_metadata import schema_utils
from tfx.components.trainer.fn_args_utils import DataAccessor
from tfx_bsl.tfxio import dataset_options
_CATEGORICAL_FEATURE_KEYS = [
'trip_start_hour', 'trip_start_day', 'trip_start_month',
'pickup_census_tract', 'dropoff_census_tract', 'pickup_community_area',
'dropoff_community_area'
]
_DENSE_FLOAT_FEATURE_KEYS = ['trip_miles', 'fare', 'trip_seconds']
_FEATURE_BUCKET_COUNT = 10
_BUCKET_FEATURE_KEYS = [
'pickup_latitude', 'pickup_longitude', 'dropoff_latitude',
'dropoff_longitude'
]
_VOCAB_SIZE = 1000
_OOV_SIZE = 10
_VOCAB_FEATURE_KEYS = [
'payment_type',
'company',
]
_LABEL_KEY = 'tips'
_FARE_KEY = 'fare'
def _transformed_name(key):
return key + '_xf'
def _fill_in_missing(x):
"""Replace missing values in a SparseTensor.
Fills in missing values of `x` with '' or 0, and converts to a dense tensor.
Args:
x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1
in the second dimension.
Returns:
A rank 1 tensor where missing values of `x` have been filled in.
"""
if not isinstance(x, tf.sparse.SparseTensor):
return x
default_value = '' if x.dtype == tf.string else 0
return tf.squeeze(
tf.sparse.to_dense(
tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]),
default_value),
axis=1)
The provided code snippet includes necessary dependencies for implementing the `preprocessing_fn` function. Write a Python function `def preprocessing_fn(inputs)` to solve the following problem:
tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations.
Here is the function:
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
for key in _DENSE_FLOAT_FEATURE_KEYS:
# If sparse make it dense, setting nan's to 0 or '', and apply zscore.
outputs[_transformed_name(key)] = tft.scale_to_z_score(
_fill_in_missing(inputs[key]))
for key in _VOCAB_FEATURE_KEYS:
# Build a vocabulary for this feature.
outputs[_transformed_name(key)] = tft.compute_and_apply_vocabulary(
_fill_in_missing(inputs[key]),
top_k=_VOCAB_SIZE,
num_oov_buckets=_OOV_SIZE)
for key in _BUCKET_FEATURE_KEYS:
outputs[_transformed_name(key)] = tft.bucketize(
_fill_in_missing(inputs[key]), _FEATURE_BUCKET_COUNT)
for key in _CATEGORICAL_FEATURE_KEYS:
outputs[_transformed_name(key)] = _fill_in_missing(inputs[key])
# Was this passenger a big tipper?
taxi_fare = _fill_in_missing(inputs[_FARE_KEY])
tips = _fill_in_missing(inputs[_LABEL_KEY])
outputs[_transformed_name(_LABEL_KEY)] = tf.compat.v1.where(
tf.math.is_nan(taxi_fare),
tf.cast(tf.zeros_like(taxi_fare), tf.int64),
# Test if the tip was > 20% of the fare.
tf.cast(
tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64))
return outputs | tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. |
166,099 | from typing import List
import tensorflow as tf
from tensorflow import estimator as tf_estimator
import tensorflow_model_analysis as tfma
import tensorflow_transform as tft
from tensorflow_transform.tf_metadata import schema_utils
from tfx.components.trainer.fn_args_utils import DataAccessor
from tfx_bsl.tfxio import dataset_options
def _build_estimator(config, hidden_units=None, warm_start_from=None):
"""Build an estimator for predicting the tipping behavior of taxi riders.
Args:
config: tf.estimator.RunConfig defining the runtime environment for the
estimator (including model_dir).
hidden_units: [int], the layer sizes of the DNN (input layer first)
warm_start_from: Optional directory to warm start from.
Returns:
A dict of the following:
- estimator: The estimator that will be used for training and eval.
- train_spec: Spec for training.
- eval_spec: Spec for eval.
- eval_input_receiver_fn: Input function for eval.
"""
real_valued_columns = [
tf.feature_column.numeric_column(key, shape=())
for key in _transformed_names(_DENSE_FLOAT_FEATURE_KEYS)
]
categorical_columns = [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_VOCAB_SIZE + _OOV_SIZE, default_value=0)
for key in _transformed_names(_VOCAB_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=_FEATURE_BUCKET_COUNT, default_value=0)
for key in _transformed_names(_BUCKET_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity( # pylint: disable=g-complex-comprehension
key,
num_buckets=num_buckets,
default_value=0) for key, num_buckets in zip(
_transformed_names(_CATEGORICAL_FEATURE_KEYS),
_MAX_CATEGORICAL_FEATURE_VALUES)
]
return tf_estimator.DNNLinearCombinedClassifier(
config=config,
linear_feature_columns=categorical_columns,
dnn_feature_columns=real_valued_columns,
dnn_hidden_units=hidden_units or [100, 70, 50, 25],
warm_start_from=warm_start_from)
def _example_serving_receiver_fn(tf_transform_output, schema):
"""Build the serving in inputs.
Args:
tf_transform_output: A TFTransformOutput.
schema: the schema of the input data.
Returns:
Tensorflow graph which parses examples, applying tf-transform to them.
"""
raw_feature_spec = _get_raw_feature_spec(schema)
raw_feature_spec.pop(_LABEL_KEY)
raw_input_fn = tf_estimator.export.build_parsing_serving_input_receiver_fn(
raw_feature_spec, default_batch_size=None)
serving_input_receiver = raw_input_fn()
transformed_features = tf_transform_output.transform_raw_features(
serving_input_receiver.features)
return tf_estimator.export.ServingInputReceiver(
transformed_features, serving_input_receiver.receiver_tensors)
def _eval_input_receiver_fn(tf_transform_output, schema):
"""Build everything needed for the tf-model-analysis to run the model.
Args:
tf_transform_output: A TFTransformOutput.
schema: the schema of the input data.
Returns:
EvalInputReceiver function, which contains:
- Tensorflow graph which parses raw untransformed features, applies the
tf-transform preprocessing operators.
- Set of raw, untransformed features.
- Label against which predictions will be compared.
"""
# Notice that the inputs are raw features, not transformed features here.
raw_feature_spec = _get_raw_feature_spec(schema)
serialized_tf_example = tf.compat.v1.placeholder(
dtype=tf.string, shape=[None], name='input_example_tensor')
# Add a parse_example operator to the tensorflow graph, which will parse
# raw, untransformed, tf examples.
features = tf.io.parse_example(
serialized=serialized_tf_example, features=raw_feature_spec)
# Now that we have our raw examples, process them through the tf-transform
# function computed during the preprocessing step.
transformed_features = tf_transform_output.transform_raw_features(
features)
# The key name MUST be 'examples'.
receiver_tensors = {'examples': serialized_tf_example}
# NOTE: Model is driven by transformed features (since training works on the
# materialized output of TFT, but slicing will happen on raw features.
features.update(transformed_features)
return tfma.export.EvalInputReceiver(
features=features,
receiver_tensors=receiver_tensors,
labels=transformed_features[_transformed_name(_LABEL_KEY)])
def _input_fn(file_pattern: List[str],
data_accessor: DataAccessor,
tf_transform_output: tft.TFTransformOutput,
batch_size: int = 200) -> tf.data.Dataset:
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
data_accessor: DataAccessor for converting input to RecordBatch.
tf_transform_output: A TFTransformOutput.
batch_size: representing the number of consecutive elements of returned
dataset to combine in a single batch
Returns:
A dataset that contains (features, indices) tuple where features is a
dictionary of Tensors, and indices is a single Tensor of label indices.
"""
return data_accessor.tf_dataset_factory(
file_pattern,
dataset_options.TensorFlowDatasetOptions(
batch_size=batch_size, label_key=_transformed_name(_LABEL_KEY)),
tf_transform_output.transformed_metadata.schema)
The provided code snippet includes necessary dependencies for implementing the `trainer_fn` function. Write a Python function `def trainer_fn(trainer_fn_args, schema)` to solve the following problem:
Build the estimator using the high level API. Args: trainer_fn_args: Holds args used to train the model as name/value pairs. schema: Holds the schema of the training examples. Returns: A dict of the following: - estimator: The estimator that will be used for training and eval. - train_spec: Spec for training. - eval_spec: Spec for eval. - eval_input_receiver_fn: Input function for eval.
Here is the function:
def trainer_fn(trainer_fn_args, schema):
"""Build the estimator using the high level API.
Args:
trainer_fn_args: Holds args used to train the model as name/value pairs.
schema: Holds the schema of the training examples.
Returns:
A dict of the following:
- estimator: The estimator that will be used for training and eval.
- train_spec: Spec for training.
- eval_spec: Spec for eval.
- eval_input_receiver_fn: Input function for eval.
"""
# Number of nodes in the first layer of the DNN
first_dnn_layer_size = 100
num_dnn_layers = 4
dnn_decay_factor = 0.7
train_batch_size = 40
eval_batch_size = 40
tf_transform_output = tft.TFTransformOutput(trainer_fn_args.transform_output)
train_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda
trainer_fn_args.train_files,
trainer_fn_args.data_accessor,
tf_transform_output,
batch_size=train_batch_size)
eval_input_fn = lambda: _input_fn( # pylint: disable=g-long-lambda
trainer_fn_args.eval_files,
trainer_fn_args.data_accessor,
tf_transform_output,
batch_size=eval_batch_size)
train_spec = tf_estimator.TrainSpec( # pylint: disable=g-long-lambda
train_input_fn,
max_steps=trainer_fn_args.train_steps)
serving_receiver_fn = lambda: _example_serving_receiver_fn( # pylint: disable=g-long-lambda
tf_transform_output, schema)
exporter = tf_estimator.FinalExporter('chicago-taxi', serving_receiver_fn)
eval_spec = tf_estimator.EvalSpec(
eval_input_fn,
steps=trainer_fn_args.eval_steps,
exporters=[exporter],
name='chicago-taxi-eval')
# Keep multiple checkpoint files for distributed training, note that
# keep_max_checkpoint should be greater or equal to the number of replicas to
# avoid race condition.
run_config = tf_estimator.RunConfig(
save_checkpoints_steps=999, keep_checkpoint_max=5)
run_config = run_config.replace(model_dir=trainer_fn_args.serving_model_dir)
warm_start_from = trainer_fn_args.base_model
estimator = _build_estimator(
# Construct layers sizes with exponetial decay
hidden_units=[
max(2, int(first_dnn_layer_size * dnn_decay_factor**i))
for i in range(num_dnn_layers)
],
config=run_config,
warm_start_from=warm_start_from)
# Create an input receiver for TFMA processing
receiver_fn = lambda: _eval_input_receiver_fn( # pylint: disable=g-long-lambda
tf_transform_output, schema)
return {
'estimator': estimator,
'train_spec': train_spec,
'eval_spec': eval_spec,
'eval_input_receiver_fn': receiver_fn
} | Build the estimator using the high level API. Args: trainer_fn_args: Holds args used to train the model as name/value pairs. schema: Holds the schema of the training examples. Returns: A dict of the following: - estimator: The estimator that will be used for training and eval. - train_spec: Spec for training. - eval_spec: Spec for eval. - eval_input_receiver_fn: Input function for eval. |
166,100 | import os
from typing import List
import absl
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.dsl.components.common import resolver
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
class Model(_TfxArtifact):
"""Artifact that contains the actual persisted model.
Training components stores the trained model like a saved model in this
artifact. A `Model` artifact contains serialization of the trained model in
one or more formats, each suitable for different usage (e.g. serving,
evaluation), and serving environments.
* File structure:
- `{uri}/`
- `Format-Serving/`: Model exported for serving.
- `saved_model.pb`
- Other actual model files.
- `Format-TFMA/`: Model exported for evaluation.
- `saved_model.pb`
- Other actual model files.
* Commonly used custom properties of the Model artifact:
"""
TYPE_NAME = 'Model'
TYPE_ANNOTATION = SystemModel
class ModelBlessing(_TfxArtifact):
"""Artifact that contains the evaluation of a trained model.
This artifact is usually used with
Conditional when determining
whether to push this model on service or not.
```python
# Run pusher if evaluator has blessed the model.
with tfx.dsl.Cond(evaluator.outputs['blessing'].future()
[0].custom_property('blessed') == 1):
pusher = Pusher(...)
```
* File structure:
- `{uri}/`
- `BLESSED`: if the evaluator has blessed the model.
- `NOT_BLESSED`: if the evaluator has not blessed the model.
- See tfx/components/evaluator/executor.py for how to write
ModelBlessing.
* Commonly used custom properties of the ModelBlessing artifact:
- `blessed`: int value that represents whether the evaluator has blessed its
model or not.
"""
TYPE_NAME = 'ModelBlessing'
The provided code snippet includes necessary dependencies for implementing the `_create_pipeline` function. Write a Python function `def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str, module_file: str, serving_model_dir: str, metadata_path: str, beam_pipeline_args: List[str]) -> pipeline.Pipeline` to solve the following problem:
Implements the chicago taxi pipeline with TFX.
Here is the function:
def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str,
module_file: str, serving_model_dir: str,
metadata_path: str,
beam_pipeline_args: List[str]) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input_base=data_root)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=True)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that implements a model.
trainer = Trainer(
module_file=module_file,
examples=transform.outputs['transformed_examples'],
transform_graph=transform.outputs['transform_graph'],
schema=schema_gen.outputs['schema'],
train_args=trainer_pb2.TrainArgs(num_steps=1000),
eval_args=trainer_pb2.EvalArgs(num_steps=150))
# Get the latest blessed model for model validation.
model_resolver = resolver.Resolver(
strategy_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(
type=ModelBlessing)).with_id('latest_blessed_model_resolver')
# Uses TFMA to compute a evaluation statistics over features of a model and
# perform quality validation of a candidate model (compared to a baseline).
eval_config = tfma.EvalConfig(
model_specs=[
tfma.ModelSpec(
signature_name='serving_default', label_key='tips_xf',
preprocessing_function_names=['transform_features'])
],
slicing_specs=[tfma.SlicingSpec()],
metrics_specs=[
tfma.MetricsSpec(metrics=[
tfma.MetricConfig(
class_name='BinaryAccuracy',
threshold=tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10})))
])
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to a file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen,
statistics_gen,
schema_gen,
example_validator,
transform,
trainer,
model_resolver,
evaluator,
pusher,
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
beam_pipeline_args=beam_pipeline_args) | Implements the chicago taxi pipeline with TFX. |
166,101 | import os
from typing import List
import absl
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.components.trainer.executor import Executor
from tfx.dsl.components.base import executor_spec
from tfx.dsl.components.common import resolver
from tfx.dsl.experimental import latest_artifacts_resolver
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.local.local_dag_runner import LocalDagRunner
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
class Executor(GenericExecutor):
"""Local estimator based trainer executor used by the TFX Trainer component.
How to create a trainer callback function to be used by this Trainer executor:
An estimator can be executed by TFX by first creating a trainer_fn callback
method that returns an estimator and some additional parameters, similar to
https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_utils.py#L285.
This becomes the basis of the new Executor for Trainer. This Executor will
then train and evaluate this estimator using the
tf.estimator.train_and_evaluate API to train locally.
"""
def Do(self, input_dict: Dict[str, List[types.Artifact]],
output_dict: Dict[str, List[types.Artifact]],
exec_properties: Dict[str, Any]) -> None:
"""Uses a user-supplied tf.estimator to train a TensorFlow model locally.
The Trainer Executor invokes a training_fn callback function provided by
the user via the module_file parameter. With the tf.estimator returned by
this function, the Trainer Executor then builds a TensorFlow model using the
user-provided tf.estimator.
Args:
input_dict: Input dict from input key to a list of ML-Metadata Artifacts.
- examples: Examples used for training, must include 'train' and 'eval'
if custom splits is not specified in train_args and eval_args.
- transform_graph: Optional input transform graph.
- schema: Schema of the data.
output_dict: Output dict from output key to a list of Artifacts.
- model: Exported model.
- model_run: Model training related outputs (e.g., Tensorboard logs)
exec_properties: A dict of execution properties.
- train_args: JSON string of trainer_pb2.TrainArgs instance, providing
args for training.
- eval_args: JSON string of trainer_pb2.EvalArgs instance, providing
args for eval.
- module_file: Python module file containing UDF model definition.
Exactly one of `module_file`, `module_path` and `trainer_fn` should
be passed.
- module_path: Python module path containing UDF model definition.
Exactly one of `module_file`, `module_path` and `trainer_fn` should
be passed.
- trainer_fn: Python module path to the trainer function.
Exactly one of `module_file`, `module_path` and `trainer_fn` should
be passed.
- warm_starting: Whether or not we need to do warm starting.
- warm_start_from: Optional. If warm_starting is True, this is the
directory to find previous model to warm start on.
- custom_config: Optional. JSON-serialized dict of additional parameters
to pass to trainer function.
Returns:
None
Raises:
ValueError: When not exactly one of `module_file`, `module_path` and
`trainer_fn` are present in `exec_properties`.
"""
self._log_startup(input_dict, output_dict, exec_properties)
fn_args = self._GetFnArgs(input_dict, output_dict, exec_properties)
trainer_fn = udf_utils.get_fn(exec_properties, 'trainer_fn')
schema = io_utils.parse_pbtxt_file(fn_args.schema_file, schema_pb2.Schema())
# TODO(b/160795287): Deprecate estimator based executor.
# Provide user with a modified fn_args, with model_run given as
# the working directory. Executor will then copy user models to
# model artifact directory.
serving_dest = fn_args.serving_model_dir
eval_dest = fn_args.eval_model_dir
working_dir = fn_args.model_run_dir
fn_args.serving_model_dir = path_utils.serving_model_dir(working_dir)
fn_args.eval_model_dir = path_utils.eval_model_dir(working_dir)
training_spec = trainer_fn(fn_args, schema)
# Train the model
absl.logging.info('Training model.')
tf_estimator.train_and_evaluate(training_spec['estimator'],
training_spec['train_spec'],
training_spec['eval_spec'])
absl.logging.info(
'Training complete. Model written to %s. ModelRun written to %s',
fn_args.serving_model_dir, fn_args.model_run_dir)
# Export an eval savedmodel for TFMA. If distributed training, it must only
# be written by the chief worker, as would be done for serving savedmodel.
if _is_chief():
absl.logging.info('Exporting eval_savedmodel for TFMA.')
tfma.export.export_eval_savedmodel(
estimator=training_spec['estimator'],
export_dir_base=fn_args.eval_model_dir,
eval_input_receiver_fn=training_spec['eval_input_receiver_fn'])
absl.logging.info('Exported eval_savedmodel to %s.',
fn_args.eval_model_dir)
# TODO(b/160795287): Deprecate estimator based executor.
# Copy serving and eval model from model_run to model artifact directory.
serving_source = path_utils.serving_model_path(fn_args.model_run_dir)
io_utils.copy_dir(serving_source, serving_dest)
absl.logging.info('Serving model copied to: %s.', serving_dest)
eval_source = path_utils.eval_model_path(fn_args.model_run_dir)
io_utils.copy_dir(eval_source, eval_dest)
absl.logging.info('Eval model copied to: %s.', eval_dest)
else:
absl.logging.info(
'Model export is skipped because this is not the chief worker.')
class Model(_TfxArtifact):
"""Artifact that contains the actual persisted model.
Training components stores the trained model like a saved model in this
artifact. A `Model` artifact contains serialization of the trained model in
one or more formats, each suitable for different usage (e.g. serving,
evaluation), and serving environments.
* File structure:
- `{uri}/`
- `Format-Serving/`: Model exported for serving.
- `saved_model.pb`
- Other actual model files.
- `Format-TFMA/`: Model exported for evaluation.
- `saved_model.pb`
- Other actual model files.
* Commonly used custom properties of the Model artifact:
"""
TYPE_NAME = 'Model'
TYPE_ANNOTATION = SystemModel
class ModelBlessing(_TfxArtifact):
"""Artifact that contains the evaluation of a trained model.
This artifact is usually used with
Conditional when determining
whether to push this model on service or not.
```python
# Run pusher if evaluator has blessed the model.
with tfx.dsl.Cond(evaluator.outputs['blessing'].future()
[0].custom_property('blessed') == 1):
pusher = Pusher(...)
```
* File structure:
- `{uri}/`
- `BLESSED`: if the evaluator has blessed the model.
- `NOT_BLESSED`: if the evaluator has not blessed the model.
- See tfx/components/evaluator/executor.py for how to write
ModelBlessing.
* Commonly used custom properties of the ModelBlessing artifact:
- `blessed`: int value that represents whether the evaluator has blessed its
model or not.
"""
TYPE_NAME = 'ModelBlessing'
The provided code snippet includes necessary dependencies for implementing the `_create_pipeline` function. Write a Python function `def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str, module_file: str, serving_model_dir: str, metadata_path: str, beam_pipeline_args: List[str]) -> pipeline.Pipeline` to solve the following problem:
Implements the chicago taxi pipeline with TFX.
Here is the function:
def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str,
module_file: str, serving_model_dir: str,
metadata_path: str,
beam_pipeline_args: List[str]) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input_base=data_root)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Get the latest model so that we can warm start from the model.
latest_model_resolver = resolver.Resolver(
strategy_class=latest_artifacts_resolver.LatestArtifactsResolver,
latest_model=Channel(type=Model)).with_id('latest_model_resolver')
# Uses user-provided Python function that implements a model.
trainer = Trainer(
module_file=module_file,
custom_executor_spec=executor_spec.ExecutorClassSpec(Executor),
transformed_examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
base_model=latest_model_resolver.outputs['latest_model'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
# Get the latest blessed model for model validation.
model_resolver = resolver.Resolver(
strategy_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(
type=ModelBlessing)).with_id('latest_blessed_model_resolver')
# Uses TFMA to compute a evaluation statistics over features of a model and
# perform quality validation of a candidate model (compared to a baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(signature_name='eval')],
slicing_specs=[
tfma.SlicingSpec(),
tfma.SlicingSpec(feature_keys=['trip_start_hour'])
],
metrics_specs=[
tfma.MetricsSpec(
thresholds={
'accuracy':
tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))
})
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
# Change threshold will be ignored if there is no baseline (first run).
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to a file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen,
statistics_gen,
schema_gen,
example_validator,
transform,
latest_model_resolver,
trainer,
model_resolver,
evaluator,
pusher,
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
beam_pipeline_args=beam_pipeline_args) | Implements the chicago taxi pipeline with TFX. |
166,102 | import os
from typing import List
import absl
import tensorflow as tf
from tensorflow import keras
import tensorflow_transform as tft
from tfx.components.trainer.rewriting import converters
from tfx.components.trainer.rewriting import rewriter
from tfx.components.trainer.rewriting import rewriter_factory
from tfx.dsl.io import fileio
from tfx import v1 as tfx
from tfx_bsl.public import tfxio
_CUR_PAGE_FEATURE_KEY = 'cur_page'
_LABEL_KEY = 'label'
_VOCAB_FILENAME = 'vocab'
_TOP_K = 100
The provided code snippet includes necessary dependencies for implementing the `preprocessing_fn` function. Write a Python function `def preprocessing_fn(inputs)` to solve the following problem:
Callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations.
Here is the function:
def preprocessing_fn(inputs):
"""Callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = inputs.copy()
# Compute a vocabulary based on the TOP-K current pages and labels seen in
# the dataset.
vocab = tft.vocabulary(
tf.concat([inputs[_CUR_PAGE_FEATURE_KEY], inputs[_LABEL_KEY]], axis=0),
top_k=_TOP_K,
vocab_filename=_VOCAB_FILENAME)
# Apply the vocabulary to both the current page feature and the label,
# converting the strings into integers.
for k in [_CUR_PAGE_FEATURE_KEY, _LABEL_KEY]:
# Out-of-vocab strings will be assigned the _TOP_K value.
outputs[k] = tft.apply_vocabulary(inputs[k], vocab, default_value=_TOP_K)
return outputs | Callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. |
166,103 | import os
from typing import List
import absl
import tensorflow as tf
from tensorflow import keras
import tensorflow_transform as tft
from tfx.components.trainer.rewriting import converters
from tfx.components.trainer.rewriting import rewriter
from tfx.components.trainer.rewriting import rewriter_factory
from tfx.dsl.io import fileio
from tfx import v1 as tfx
from tfx_bsl.public import tfxio
_CUR_PAGE_FEATURE_KEY = 'cur_page'
_SESSION_INDEX_FEATURE_KEY = 'session_index'
_VOCAB_FILENAME = 'vocab'
_TRAIN_BATCH_SIZE = 32
_EVAL_BATCH_SIZE = 16
def _input_fn(file_pattern: List[str],
data_accessor: tfx.components.DataAccessor,
tf_transform_output: tft.TFTransformOutput,
batch_size: int = 200) -> tf.data.Dataset:
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
data_accessor: DataAccessor for converting input to RecordBatch.
tf_transform_output: A TFTransformOutput.
batch_size: representing the number of consecutive elements of returned
dataset to combine in a single batch.
Returns:
A dataset that contains (features, indices) tuple where features is a
dictionary of Tensors, and indices is a single Tensor of label indices.
"""
dataset = data_accessor.tf_dataset_factory(
file_pattern,
tfxio.TensorFlowDatasetOptions(
batch_size=batch_size, label_key=_LABEL_KEY),
tf_transform_output.transformed_metadata.schema)
return dataset.repeat()
def _build_keras_model() -> keras.Model:
"""Creates a Keras model for predicting the next page.
Returns:
A Keras Model.
"""
# This model has two inputs: (i) current page and (ii) session index.
cur_page_input = keras.Input(shape=(), name=_CUR_PAGE_FEATURE_KEY)
session_index_input = keras.Input(shape=(1,), name=_SESSION_INDEX_FEATURE_KEY)
inputs = [cur_page_input, session_index_input]
# Create an embedding for the current page.
cur_page_emb = keras.layers.Embedding(
_TOP_K + 1, _EMBEDDING_DIM, input_length=1)(
cur_page_input)
x = keras.layers.Concatenate()([cur_page_emb, session_index_input])
x = keras.layers.Dense(_UNITS, activation='relu')(x)
outputs = keras.layers.Dense(_TOP_K + 1)(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=keras.optimizers.Adam(0.0001),
metrics=[
'sparse_categorical_accuracy', 'sparse_top_k_categorical_accuracy'
])
model.summary(print_fn=absl.logging.info)
return model
def _get_inference_fn(model, tf_transform_output):
"""Defines the function used for inference."""
model.tft_layer = tf_transform_output.transform_features_layer()
def inference_fn(cur_page, session_index):
"""Returns the output to be used in the serving signature."""
return model({
_CUR_PAGE_FEATURE_KEY: cur_page,
_SESSION_INDEX_FEATURE_KEY: session_index
})
return inference_fn
The provided code snippet includes necessary dependencies for implementing the `run_fn` function. Write a Python function `def run_fn(fn_args: tfx.components.FnArgs)` to solve the following problem:
Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs.
Here is the function:
def run_fn(fn_args: tfx.components.FnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
train_dataset = _input_fn(
fn_args.train_files,
fn_args.data_accessor,
tf_transform_output,
batch_size=_TRAIN_BATCH_SIZE)
eval_dataset = _input_fn(
fn_args.eval_files,
fn_args.data_accessor,
tf_transform_output,
batch_size=_EVAL_BATCH_SIZE)
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
model = _build_keras_model()
model.fit(
train_dataset,
steps_per_epoch=fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps,
verbose=2)
signatures = {
'serving_default':
_get_inference_fn(model, tf_transform_output).get_concrete_function(
tf.TensorSpec(
shape=[None], dtype=tf.int64, name=_CUR_PAGE_FEATURE_KEY),
tf.TensorSpec(
shape=[None], dtype=tf.int64,
name=_SESSION_INDEX_FEATURE_KEY)),
}
# Create the saved_model in a temporary directory.
temp_saving_model_dir = os.path.join(fn_args.serving_model_dir, 'temp')
model.save(temp_saving_model_dir, save_format='tf', signatures=signatures)
# Convert the saved_model to a tfjs model and store it in the final directory.
tfrw = rewriter_factory.create_rewriter(
rewriter_factory.TFJS_REWRITER, name='tfjs_rewriter')
converters.rewrite_saved_model(temp_saving_model_dir,
fn_args.serving_model_dir, tfrw,
rewriter.ModelType.TFJS_MODEL)
# Copy the vocabulary computed by transform to the final directory.
# The vocabulary is not included in the original savedmodel because vocab
# lookups are currently not supported in TFJS and are expected to be done
# independently by client code.
fileio.copy(
tf_transform_output.vocabulary_file_by_name(_VOCAB_FILENAME),
os.path.join(fn_args.serving_model_dir, _VOCAB_FILENAME))
fileio.rmtree(temp_saving_model_dir) | Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs. |
166,104 | import os
from typing import List
import absl
import tensorflow_model_analysis as tfma
from tfx.v1 import dsl
from tfx.v1 import orchestration
from tfx.v1 import proto
from tfx.v1 import types
from tfx.v1.components import Evaluator
from tfx.v1.components import ExampleValidator
from tfx.v1.components import ImportExampleGen
from tfx.v1.components import Pusher
from tfx.v1.components import SchemaGen
from tfx.v1.components import StatisticsGen
from tfx.v1.components import Trainer
from tfx.v1.components import Transform
The provided code snippet includes necessary dependencies for implementing the `_create_pipeline` function. Write a Python function `def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str, module_file: str, serving_model_dir: str, metadata_path: str, beam_pipeline_args: List[str]) -> dsl.Pipeline` to solve the following problem:
Implements the page prediction pipline with TFX.
Here is the function:
def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str,
module_file: str, serving_model_dir: str,
metadata_path: str,
beam_pipeline_args: List[str]) -> dsl.Pipeline:
"""Implements the page prediction pipline with TFX."""
input_config = proto.Input(
splits=[proto.Input.Split(name='input', pattern='*.tfrecord.gz')])
output_config = proto.Output(
split_config=proto.SplitConfig(splits=[
proto.SplitConfig.Split(name='train', hash_buckets=9),
proto.SplitConfig.Split(name='eval', hash_buckets=1)
]))
# Brings data in to the pipline
example_gen = ImportExampleGen(
input_base=data_root,
input_config=input_config,
output_config=output_config)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(
examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that trains a model.
trainer = Trainer(
module_file=module_file,
examples=transform.outputs['transformed_examples'],
transform_graph=transform.outputs['transform_graph'],
schema=schema_gen.outputs['schema'],
train_args=proto.TrainArgs(num_steps=100000),
eval_args=proto.EvalArgs(num_steps=200))
# Get the latest blessed model for model validation.
model_resolver = dsl.Resolver(
strategy_class=dsl.experimental.LatestBlessedModelStrategy,
model=dsl.Channel(type=types.standard_artifacts.Model),
model_blessing=dsl.Channel(
type=types.standard_artifacts.ModelBlessing)).with_id(
'latest_blessed_model_resolver')
# Uses TFMA to compute evaluation statistics over features of a model and
# perform quality validation of a candidate model (compared to a baseline).
eval_config = tfma.EvalConfig(
# Directly evaluates the tfjs model.
model_specs=[tfma.ModelSpec(label_key='label', model_type='tf_js')],
slicing_specs=[tfma.SlicingSpec()],
metrics_specs=[
tfma.MetricsSpec(metrics=[
tfma.MetricConfig(
class_name='SparseCategoricalAccuracy',
threshold=tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
# Increase this threshold when training on complete
# dataset.
lower_bound={'value': 0.01}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-2})))
])
])
evaluator = Evaluator(
examples=transform.outputs['transformed_examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to a file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=proto.PushDestination(
filesystem=proto.PushDestination.Filesystem(
base_directory=serving_model_dir)))
components = [
example_gen,
statistics_gen,
schema_gen,
example_validator,
transform,
trainer,
model_resolver,
evaluator,
pusher,
]
return dsl.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=components,
metadata_connection_config=orchestration.metadata
.sqlite_metadata_connection_config(metadata_path),
enable_cache=True,
beam_pipeline_args=beam_pipeline_args) | Implements the page prediction pipline with TFX. |
166,105 | from typing import Any, Dict, List, Union
import apache_beam as beam
from apache_beam.io.gcp.internal.clients import bigquery
import tensorflow as tf
def _sanitize_page_path(page_path: str):
"""Remove everything after the query."""
return page_path.split('?')[0]
def create_tensorflow_example(
features: Dict[str, List[Union[int, float, str]]]):
"""Populate a Tensorflow Example with the given features."""
result = tf.train.Example()
for name, value in features.items():
if not value:
raise ValueError('each feature must have a populated value list.')
if isinstance(value[0], int):
result.features.feature[name].int64_list.value.extend(value)
elif isinstance(value[0], float):
result.features.feature[name].float_list.value.extend(value)
else:
result.features.feature[name].bytes_list.value.extend(
[bytes(v, 'utf-8') for v in value])
return result
The provided code snippet includes necessary dependencies for implementing the `ga_session_to_tensorflow_examples` function. Write a Python function `def ga_session_to_tensorflow_examples(session: List[Any])` to solve the following problem:
Converts a Google Analytics Session to Tensorflow Examples.
Here is the function:
def ga_session_to_tensorflow_examples(session: List[Any]):
"""Converts a Google Analytics Session to Tensorflow Examples."""
examples = []
for i in range(len(session) - 1):
features = {
# Add any additional desired training features here.
'cur_page': [_sanitize_page_path(session[i]['page']['pagePath'])],
'label': [_sanitize_page_path(session[i + 1]['page']['pagePath'])],
'session_index': [i],
}
examples.append(create_tensorflow_example(features))
return examples | Converts a Google Analytics Session to Tensorflow Examples. |
166,106 | from typing import Any, Dict, List, Union
import apache_beam as beam
from apache_beam.io.gcp.internal.clients import bigquery
import tensorflow as tf
def is_duplicate_event(first_event: Dict[str, Any],
second_event: Dict[str, Any]):
return (first_event['time'] == second_event['time'] or
first_event['page']['pagePath'] == second_event['page']['pagePath']) | null |
166,107 | from typing import Any, Dict, List, Union
import apache_beam as beam
from apache_beam.io.gcp.internal.clients import bigquery
import tensorflow as tf
class ExampleGeneratingDoFn(beam.DoFn):
"""Creates Tensorflow Examples from the provided Google Analytics session."""
def process(self, entry: Dict[str, Any]):
session = entry['hits']
session.sort(key=lambda s: s['hitNumber'])
filtered_session = []
for s in session:
if filtered_session and is_duplicate_event(filtered_session[-1], s):
continue
filtered_session.append(s)
return ga_session_to_tensorflow_examples(filtered_session)
The provided code snippet includes necessary dependencies for implementing the `run_beam_pipeline` function. Write a Python function `def run_beam_pipeline()` to solve the following problem:
Run the apache beam pipeline with the specified flags.
Here is the function:
def run_beam_pipeline():
"""Run the apache beam pipeline with the specified flags."""
# Params used for running the Beam pipeline. Update these based on your
# requirements.
params = {}
# Specify the projectid for BigQuery
params['projectId'] = 'my_project_id'
# Specify the datasetid for BigQuery
params['datasetId'] = 'my_dataset_id'
# Specify the table for BigQuery
params['tableId'] = 'my_table_id'
# Specify the list of flags for the Beam pipeline
params['flags'] = ['--temp_location=my_temp_location']
# Specify the desination for the generated examples.
params['destination'] = 'my_destination'
table_spec = bigquery.TableReference(
projectId=params['projectId'],
datasetId=params['datasetId'],
tableId=params['tableId'])
with beam.Pipeline(
options=beam.options.pipeline_options.PipelineOptions(
flags=params['flags'])) as p:
_ = (
p
| 'ReadTable' >> beam.io.ReadFromBigQuery(table=table_spec)
| 'ConvertToTensorFlowExamples' >> beam.ParDo(ExampleGeneratingDoFn())
| 'Write' >> beam.io.tfrecordio.WriteToTFRecord(
'gs://tfxdata/data/output',
coder=beam.coders.ProtoCoder(tf.train.Example),
file_name_suffix='.tfrecord.gz')) | Run the apache beam pipeline with the specified flags. |
166,108 | import datetime
import os
from typing import List
from absl import logging
import keras_tuner
import tensorflow as tf
from tensorflow import keras
import tensorflow_transform as tft
import tfx.v1 as tfx
from tfx_bsl.public import tfxio
from tensorflow_cloud.core import machine_config
from tensorflow_cloud.tuner import tuner as cloud_tuner
from tensorflow_metadata.proto.v0 import schema_pb2
_FEATURE_KEYS = [
'culmen_length_mm', 'culmen_depth_mm', 'flipper_length_mm', 'body_mass_g'
]
_LABEL_KEY = 'species'
def _transformed_name(key):
return key + '_xf'
The provided code snippet includes necessary dependencies for implementing the `preprocessing_fn` function. Write a Python function `def preprocessing_fn(inputs)` to solve the following problem:
tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations.
Here is the function:
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
for key in _FEATURE_KEYS:
# Nothing to transform for the penguin dataset. This code is just to
# show how the preprocessing function for Transform should be defined.
# We just assign original values to the transformed feature.
outputs[_transformed_name(key)] = inputs[key]
# TODO(b/157064428): Support label transformation for Keras.
# Do not apply label transformation as it will result in wrong evaluation.
outputs[_transformed_name(_LABEL_KEY)] = inputs[_LABEL_KEY]
return outputs | tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. |
166,109 | import datetime
import os
from typing import List
from absl import logging
import keras_tuner
import tensorflow as tf
from tensorflow import keras
import tensorflow_transform as tft
import tfx.v1 as tfx
from tfx_bsl.public import tfxio
from tensorflow_cloud.core import machine_config
from tensorflow_cloud.tuner import tuner as cloud_tuner
from tensorflow_metadata.proto.v0 import schema_pb2
_LABEL_KEY = 'species'
_TRAIN_BATCH_SIZE = 20
_EVAL_BATCH_SIZE = 10
_CLOUD_FIT_IMAGE = 'gcr.io/my-project-id/cloud_fit'
def _get_hyperparameters() -> keras_tuner.HyperParameters:
"""Returns hyperparameters for building Keras model."""
hp = keras_tuner.HyperParameters()
# Defines search space.
hp.Choice('learning_rate', [1e-5, 1e-4, 1e-3, 1e-2], default=1e-2)
hp.Int('num_layers', 1, 4, default=2)
return hp
def _build_keras_model(hparams: keras_tuner.HyperParameters) -> tf.keras.Model:
"""Creates a DNN Keras model for classifying penguin data.
Args:
hparams: Holds HyperParameters for tuning.
Returns:
A Keras Model.
"""
# The model below is built with Functional API, please refer to
# https://www.tensorflow.org/guide/keras/overview for all API options.
inputs = [
keras.layers.Input(shape=(1,), name=_transformed_name(f))
for f in _FEATURE_KEYS
]
d = keras.layers.concatenate(inputs)
for _ in range(int(hparams.get('num_layers'))):
d = keras.layers.Dense(8, activation='relu')(d)
outputs = keras.layers.Dense(3)(d)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=keras.optimizers.Adam(hparams.get('learning_rate')),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[keras.metrics.SparseCategoricalAccuracy()])
model.summary(print_fn=logging.info)
return model
The provided code snippet includes necessary dependencies for implementing the `tuner_fn` function. Write a Python function `def tuner_fn(fn_args: tfx.components.FnArgs) -> tfx.components.TunerFnResult` to solve the following problem:
Build the tuner using the CloudTuner API. Args: fn_args: Holds args as name/value pairs. See https://www.tensorflow.org/tfx/api_docs/python/tfx/components/trainer/fn_args_utils/FnArgs. - transform_graph_path: optional transform graph produced by TFT. - custom_config: An optional dictionary passed to the component. In this example, it contains the dict ai_platform_tuning_args. - working_dir: working dir for tuning. - train_files: List of file paths containing training tf.Example data. - eval_files: List of file paths containing eval tf.Example data. - train_steps: number of train steps. - eval_steps: number of eval steps. Returns: A namedtuple contains the following: - tuner: A BaseTuner that will be used for tuning. - fit_kwargs: Args to pass to tuner's run_trial function for fitting the model , e.g., the training and validation dataset. Required args depend on the above tuner's implementation. For DistributingCloudTuner, we generate datasets at the remote jobs rather than serialize and then deserialize them.
Here is the function:
def tuner_fn(fn_args: tfx.components.FnArgs) -> tfx.components.TunerFnResult:
"""Build the tuner using the CloudTuner API.
Args:
fn_args: Holds args as name/value pairs. See
https://www.tensorflow.org/tfx/api_docs/python/tfx/components/trainer/fn_args_utils/FnArgs.
- transform_graph_path: optional transform graph produced by TFT.
- custom_config: An optional dictionary passed to the component. In this
example, it contains the dict ai_platform_tuning_args.
- working_dir: working dir for tuning.
- train_files: List of file paths containing training tf.Example data.
- eval_files: List of file paths containing eval tf.Example data.
- train_steps: number of train steps.
- eval_steps: number of eval steps.
Returns:
A namedtuple contains the following:
- tuner: A BaseTuner that will be used for tuning.
- fit_kwargs: Args to pass to tuner's run_trial function for fitting the
model , e.g., the training and validation dataset. Required
args depend on the above tuner's implementation. For
DistributingCloudTuner, we generate datasets at the remote
jobs rather than serialize and then deserialize them.
"""
# study_id should be the same across multiple tuner workers which starts
# approximately at the same time.
study_id = 'DistributingCloudTuner_study_{}'.format(
datetime.datetime.now().strftime('%Y%m%d%H'))
if _CLOUD_FIT_IMAGE == 'gcr.io/my-project-id/cloud_fit':
raise ValueError('Build your own cloud_fit image, ' +
'default dummy one is used!')
tuner = cloud_tuner.DistributingCloudTuner(
_build_keras_model,
# The project/region configuations for Cloud Vizier service and its trial
# executions. Note: this example uses the same configuration as the
# CAIP Training service for distributed tuning flock management to view
# all of the pipeline's jobs and resources in the same project. It can
# also be configured separately.
project_id=fn_args.custom_config['ai_platform_tuning_args']['project'],
region=fn_args.custom_config['ai_platform_tuning_args']['region'],
objective=keras_tuner.Objective('val_sparse_categorical_accuracy', 'max'),
hyperparameters=_get_hyperparameters(),
max_trials=5, # Optional.
directory=os.path.join(fn_args.custom_config['remote_trials_working_dir'],
study_id),
study_id=study_id,
container_uri=_CLOUD_FIT_IMAGE,
# Optional `MachineConfig` that represents the configuration for the
# general workers in a distribution cluster. More options see:
# https://github.com/tensorflow/cloud/blob/master/src/python/tensorflow_cloud/core/machine_config.py
replica_config=machine_config.COMMON_MACHINE_CONFIGS['K80_1X'],
# Optional total number of workers in a distribution cluster including a
# chief worker.
replica_count=2)
return tfx.components.TunerFnResult(
tuner=tuner,
fit_kwargs={
'steps_per_epoch': fn_args.train_steps,
'validation_steps': fn_args.eval_steps,
'train_files': fn_args.train_files,
'eval_files': fn_args.eval_files,
'transform_graph_path': fn_args.transform_graph_path,
'label_key': _LABEL_KEY,
'train_batch_size': _TRAIN_BATCH_SIZE,
'eval_batch_size': _EVAL_BATCH_SIZE,
}) | Build the tuner using the CloudTuner API. Args: fn_args: Holds args as name/value pairs. See https://www.tensorflow.org/tfx/api_docs/python/tfx/components/trainer/fn_args_utils/FnArgs. - transform_graph_path: optional transform graph produced by TFT. - custom_config: An optional dictionary passed to the component. In this example, it contains the dict ai_platform_tuning_args. - working_dir: working dir for tuning. - train_files: List of file paths containing training tf.Example data. - eval_files: List of file paths containing eval tf.Example data. - train_steps: number of train steps. - eval_steps: number of eval steps. Returns: A namedtuple contains the following: - tuner: A BaseTuner that will be used for tuning. - fit_kwargs: Args to pass to tuner's run_trial function for fitting the model , e.g., the training and validation dataset. Required args depend on the above tuner's implementation. For DistributingCloudTuner, we generate datasets at the remote jobs rather than serialize and then deserialize them. |
166,110 | import datetime
import os
from typing import List
from absl import logging
import keras_tuner
import tensorflow as tf
from tensorflow import keras
import tensorflow_transform as tft
import tfx.v1 as tfx
from tfx_bsl.public import tfxio
from tensorflow_cloud.core import machine_config
from tensorflow_cloud.tuner import tuner as cloud_tuner
from tensorflow_metadata.proto.v0 import schema_pb2
_TRAIN_BATCH_SIZE = 20
_EVAL_BATCH_SIZE = 10
def _get_tf_examples_serving_signature(model, tf_transform_output):
"""Returns a serving signature that accepts `tensorflow.Example`."""
# We need to track the layers in the model in order to save it.
# TODO(b/162357359): Revise once the bug is resolved.
model.tft_layer_inference = tf_transform_output.transform_features_layer()
tf.TensorSpec(shape=[None], dtype=tf.string, name='examples')
])
def serve_tf_examples_fn(serialized_tf_example):
"""Returns the output to be used in the serving signature."""
raw_feature_spec = tf_transform_output.raw_feature_spec()
# Remove label feature since these will not be present at serving time.
raw_feature_spec.pop(_LABEL_KEY)
raw_features = tf.io.parse_example(serialized_tf_example, raw_feature_spec)
transformed_features = model.tft_layer_inference(raw_features)
logging.info('serve_transformed_features = %s', transformed_features)
outputs = model(transformed_features)
# TODO(b/154085620): Convert the predicted labels from the model using a
# reverse-lookup (opposite of transform.py).
return {'outputs': outputs}
return serve_tf_examples_fn
def _get_transform_features_signature(model, tf_transform_output):
"""Returns a serving signature that applies tf.Transform to features."""
# We need to track the layers in the model in order to save it.
# TODO(b/162357359): Revise once the bug is resolved.
model.tft_layer_eval = tf_transform_output.transform_features_layer()
tf.TensorSpec(shape=[None], dtype=tf.string, name='examples')
])
def transform_features_fn(serialized_tf_example):
"""Returns the transformed_features to be fed as input to evaluator."""
raw_feature_spec = tf_transform_output.raw_feature_spec()
raw_features = tf.io.parse_example(serialized_tf_example, raw_feature_spec)
transformed_features = model.tft_layer_eval(raw_features)
logging.info('eval_transformed_features = %s', transformed_features)
return transformed_features
return transform_features_fn
def _input_fn(file_pattern: List[str],
data_accessor: tfx.components.DataAccessor,
schema: schema_pb2.Schema,
batch_size: int = 200) -> tf.data.Dataset:
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
data_accessor: DataAccessor for converting input to RecordBatch.
schema: Schema from TFTransform component.
batch_size: representing the number of consecutive elements of returned
dataset to combine in a single batch
Returns:
A dataset that contains (features, indices) tuple where features is a
dictionary of Tensors, and indices is a single Tensor of label indices.
"""
return data_accessor.tf_dataset_factory(
file_pattern,
tfxio.TensorFlowDatasetOptions(
batch_size=batch_size, label_key=_transformed_name(_LABEL_KEY)),
schema).repeat()
def _get_hyperparameters() -> keras_tuner.HyperParameters:
"""Returns hyperparameters for building Keras model."""
hp = keras_tuner.HyperParameters()
# Defines search space.
hp.Choice('learning_rate', [1e-5, 1e-4, 1e-3, 1e-2], default=1e-2)
hp.Int('num_layers', 1, 4, default=2)
return hp
def _build_keras_model(hparams: keras_tuner.HyperParameters) -> tf.keras.Model:
"""Creates a DNN Keras model for classifying penguin data.
Args:
hparams: Holds HyperParameters for tuning.
Returns:
A Keras Model.
"""
# The model below is built with Functional API, please refer to
# https://www.tensorflow.org/guide/keras/overview for all API options.
inputs = [
keras.layers.Input(shape=(1,), name=_transformed_name(f))
for f in _FEATURE_KEYS
]
d = keras.layers.concatenate(inputs)
for _ in range(int(hparams.get('num_layers'))):
d = keras.layers.Dense(8, activation='relu')(d)
outputs = keras.layers.Dense(3)(d)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=keras.optimizers.Adam(hparams.get('learning_rate')),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[keras.metrics.SparseCategoricalAccuracy()])
model.summary(print_fn=logging.info)
return model
The provided code snippet includes necessary dependencies for implementing the `run_fn` function. Write a Python function `def run_fn(fn_args: tfx.components.FnArgs)` to solve the following problem:
Train the model based on given args. Args: fn_args: Holds args as name/value pairs. See https://www.tensorflow.org/tfx/api_docs/python/tfx/components/trainer/fn_args_utils/FnArgs. - train_files: List of file paths containing training tf.Example data. - eval_files: List of file paths containing eval tf.Example data. - data_accessor: Contains factories that can create tf.data.Datasets or other means to access the train/eval data. They provide a uniform way of accessing data, regardless of how the data is stored on disk. - train_steps: number of train steps. - eval_steps: number of eval steps. - transform_output: A uri to a path containing statistics and metadata from TFTransform component. produced by TFT. Will be None if not specified. - model_run_dir: A single uri for the output directory of model training related files. - hyperparameters: An optional keras_tuner.HyperParameters config.
Here is the function:
def run_fn(fn_args: tfx.components.FnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args as name/value pairs. See
https://www.tensorflow.org/tfx/api_docs/python/tfx/components/trainer/fn_args_utils/FnArgs.
- train_files: List of file paths containing training tf.Example data.
- eval_files: List of file paths containing eval tf.Example data.
- data_accessor: Contains factories that can create tf.data.Datasets or
other means to access the train/eval data. They provide a uniform way of
accessing data, regardless of how the data is stored on disk.
- train_steps: number of train steps.
- eval_steps: number of eval steps.
- transform_output: A uri to a path containing statistics and metadata
from TFTransform component. produced by TFT. Will be None if not
specified.
- model_run_dir: A single uri for the output directory of model training
related files.
- hyperparameters: An optional keras_tuner.HyperParameters config.
"""
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
schema = tf_transform_output.transformed_metadata.schema
train_dataset = _input_fn(
fn_args.train_files,
fn_args.data_accessor,
schema,
batch_size=_TRAIN_BATCH_SIZE)
eval_dataset = _input_fn(
fn_args.eval_files,
fn_args.data_accessor,
schema,
batch_size=_EVAL_BATCH_SIZE)
if fn_args.hyperparameters:
hparams = keras_tuner.HyperParameters.from_config(fn_args.hyperparameters)
else:
# This is a shown case when hyperparameters is decided and Tuner is removed
# from the pipeline. User can also inline the hyperparameters directly in
# _build_keras_model.
hparams = _get_hyperparameters()
logging.info('HyperParameters for training: %s', hparams.get_config())
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
model = _build_keras_model(hparams)
# Write logs to path
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=fn_args.model_run_dir, update_freq='epoch')
model.fit(
train_dataset,
steps_per_epoch=fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps,
callbacks=[tensorboard_callback])
signatures = {
'serving_default':
_get_tf_examples_serving_signature(model, tf_transform_output),
'transform_features':
_get_transform_features_signature(model, tf_transform_output),
}
model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures) | Train the model based on given args. Args: fn_args: Holds args as name/value pairs. See https://www.tensorflow.org/tfx/api_docs/python/tfx/components/trainer/fn_args_utils/FnArgs. - train_files: List of file paths containing training tf.Example data. - eval_files: List of file paths containing eval tf.Example data. - data_accessor: Contains factories that can create tf.data.Datasets or other means to access the train/eval data. They provide a uniform way of accessing data, regardless of how the data is stored on disk. - train_steps: number of train steps. - eval_steps: number of eval steps. - transform_output: A uri to a path containing statistics and metadata from TFTransform component. produced by TFT. Will be None if not specified. - model_run_dir: A single uri for the output directory of model training related files. - hyperparameters: An optional keras_tuner.HyperParameters config. |
166,111 | from typing import List
from absl import logging
import tensorflow as tf
import tensorflow_transform as tft
from tfx import v1 as tfx
from tfx_bsl.public import tfxio
FEATURE_KEYS = [
'culmen_length_mm', 'culmen_depth_mm', 'flipper_length_mm', 'body_mass_g'
]
_LABEL_KEY = 'species'
def transformed_name(key):
return key + '_xf'
The provided code snippet includes necessary dependencies for implementing the `preprocessing_fn` function. Write a Python function `def preprocessing_fn(inputs)` to solve the following problem:
tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations.
Here is the function:
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
for key in FEATURE_KEYS:
# tft.scale_to_z_score computes the mean and variance of the given feature
# and scales the output based on the result.
outputs[transformed_name(key)] = tft.scale_to_z_score(inputs[key])
# TODO(b/157064428): Support label transformation for Keras.
# Do not apply label transformation as it will result in wrong evaluation.
outputs[transformed_name(_LABEL_KEY)] = inputs[_LABEL_KEY]
return outputs | tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.