code stringlengths 17 6.64M |
|---|
class ROIActionHead(torch.nn.Module):
'\n Generic Action Head class.\n '
def __init__(self, cfg, dim_in):
super(ROIActionHead, self).__init__()
self.feature_extractor = make_roi_action_feature_extractor(cfg, dim_in)
self.predictor = make_roi_action_predictor(cfg, self.feature_extractor.dim_out)
self.post_processor = make_roi_action_post_processor(cfg)
self.loss_evaluator = make_roi_action_loss_evaluator(cfg)
self.accuracy_evaluator = make_roi_action_accuracy_evaluator(cfg)
self.test_ext = cfg.TEST.EXTEND_SCALE
def forward(self, slow_features, fast_features, boxes, objects=None, keypoints=None, extras={}, part_forward=(- 1)):
assert (not (self.training and (part_forward >= 0)))
if (part_forward == 1):
boxes = extras['current_feat_p']
objects = extras['current_feat_o']
keypoints = [extras['current_feat_h'], extras['current_feat_pose']]
if self.training:
proposals = self.loss_evaluator.sample_box(boxes)
else:
proposals = [box.extend(self.test_ext) for box in boxes]
(x, x_pooled, x_objects, x_keypoints, x_pose) = self.feature_extractor(slow_features, fast_features, proposals, objects, keypoints, extras, part_forward)
if (part_forward == 0):
pooled_feature = prepare_pooled_feature(x_pooled, boxes)
if (x_objects is None):
object_pooled_feature = None
else:
object_pooled_feature = prepare_pooled_feature(x_objects, objects)
if (x_keypoints is None):
keypoints_pooled_feature = None
else:
split_kpts = keypoints
keypoints_pooled_feature = prepare_pooled_feature(x_keypoints, split_kpts)
if (x_pose is None):
pose_pooled_feature = None
else:
pose_pooled_feature = prepare_pooled_feature(x_pose, keypoints)
return ([pooled_feature, object_pooled_feature, keypoints_pooled_feature, pose_pooled_feature], {}, {}, {})
action_logits = self.predictor(x)
if (not self.training):
result = self.post_processor((action_logits,), boxes)
return (result, {}, {}, {})
box_num = action_logits.size(0)
box_num = torch.as_tensor([box_num], dtype=torch.float32, device=action_logits.device)
all_reduce(box_num, average=True)
(loss_dict, loss_weight) = self.loss_evaluator([action_logits], box_num.item())
metric_dict = self.accuracy_evaluator([action_logits], proposals, box_num.item())
pooled_feature = prepare_pooled_feature(x_pooled, proposals)
if (x_objects is None):
object_pooled_feature = []
else:
object_pooled_feature = prepare_pooled_feature(x_objects, objects)
if (x_keypoints is None):
keypoints_pooled_feature = []
else:
split_kpts = keypoints
keypoints_pooled_feature = prepare_pooled_feature(x_keypoints, split_kpts)
if self.training:
pose_pooled_feature = prepare_pooled_feature(x_pose, keypoints)
if (part_forward == 1):
pose_pooled_feature = prepare_pooled_feature(x_pose, keypoints[1])
return ([pooled_feature, object_pooled_feature, keypoints_pooled_feature, pose_pooled_feature], loss_dict, loss_weight, metric_dict)
def c2_weight_mapping(self):
weight_map = {}
for (name, m_child) in self.named_children():
if (m_child.state_dict() and hasattr(m_child, 'c2_weight_mapping')):
child_map = m_child.c2_weight_mapping()
for (key, val) in child_map.items():
new_key = ((name + '.') + key)
weight_map[new_key] = val
return weight_map
|
def build_roi_action_head(cfg, dim_in):
return ROIActionHead(cfg, dim_in)
|
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = (out_features or in_features)
hidden_features = (hidden_features or in_features)
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
|
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
head_dim = (dim // num_heads)
self.scale = (qk_scale or (head_dim ** (- 0.5)))
self.qkv = nn.Linear(dim, (dim * 3), bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
(B, N, C) = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, (C // self.num_heads)).permute(2, 0, 3, 1, 4)
(q, k, v) = (qkv[0], qkv[1], qkv[2])
attn = ((q @ k.transpose((- 2), (- 1))) * self.scale)
attn = attn.softmax(dim=(- 1))
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
|
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int((dim * mlp_ratio))
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
x = (x + self.drop_path(self.attn(self.norm1(x))))
x = (x + self.drop_path(self.mlp(self.norm2(x))))
return x
|
class PoseTransformer(nn.Module):
def __init__(self, num_frame=1, num_joints=17, in_chans=2, embed_dim_ratio=32, depth=4, num_heads=8, mlp_ratio=2.0, qkv_bias=True, qk_scale=None, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.2, norm_layer=None):
' ##########hybrid_backbone=None, representation_size=None,\n Args:\n num_frame (int, tuple): input frame number\n num_joints (int, tuple): joints number\n in_chans (int): number of input channels, 2D joints have 2 channels: (x,y)\n embed_dim_ratio (int): embedding dimension ratio\n depth (int): depth of transformer\n num_heads (int): number of attention heads\n mlp_ratio (int): ratio of mlp hidden dim to embedding dim\n qkv_bias (bool): enable bias for qkv if True\n qk_scale (float): override default qk scale of head_dim ** -0.5 if set\n drop_rate (float): dropout rate\n attn_drop_rate (float): attention dropout rate\n drop_path_rate (float): stochastic depth rate\n norm_layer: (nn.Module): normalization layer\n '
super().__init__()
norm_layer = (norm_layer or partial(nn.LayerNorm, eps=1e-06))
embed_dim = (embed_dim_ratio * num_joints)
out_dim = 1024
self.Spatial_patch_to_embedding = nn.Linear(in_chans, embed_dim_ratio)
self.Spatial_pos_embed = nn.Parameter(torch.zeros(1, num_joints, embed_dim_ratio))
self.Temporal_pos_embed = nn.Parameter(torch.zeros(1, num_frame, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
self.Spatial_blocks = nn.ModuleList([Block(dim=embed_dim_ratio, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer) for i in range(depth)])
self.blocks = nn.ModuleList([Block(dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer) for i in range(depth)])
self.Spatial_norm = norm_layer(embed_dim_ratio)
self.Temporal_norm = norm_layer(embed_dim)
self.weighted_mean = torch.nn.Conv1d(in_channels=num_frame, out_channels=1, kernel_size=1)
self.head = nn.Sequential(nn.LayerNorm(embed_dim), nn.Linear(embed_dim, out_dim))
def Spatial_forward_features(self, x):
(b, _, f, p) = x.shape
x = rearrange(x, 'b c f p -> (b f) p c')
x = x.float()
x = self.Spatial_patch_to_embedding(x)
x += self.Spatial_pos_embed
x = self.pos_drop(x)
for blk in self.Spatial_blocks:
x = blk(x)
x = self.Spatial_norm(x)
x = rearrange(x, '(b f) w c -> b f (w c)', f=f)
return x
def forward_features(self, x):
b = x.shape[0]
x += self.Temporal_pos_embed
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x)
x = self.Temporal_norm(x)
x = self.weighted_mean(x)
x = x.view(b, 1, (- 1))
return x
def forward(self, x):
x = x.permute(0, 3, 1, 2)
(b, _, _, p) = x.shape
x = self.Spatial_forward_features(x)
x = self.forward_features(x)
x = torch.flatten(x, start_dim=1)
x = self.head(x)
x = x.view(x.shape[0], x.shape[1], 1, 1, 1)
return x
|
@registry.ROI_ACTION_PREDICTORS.register('FCPredictor')
class FCPredictor(nn.Module):
def __init__(self, config, dim_in):
super(FCPredictor, self).__init__()
num_classes = config.MODEL.ROI_ACTION_HEAD.NUM_CLASSES
dropout_rate = config.MODEL.ROI_ACTION_HEAD.DROPOUT_RATE
if (dropout_rate > 0):
self.dropout = nn.Dropout(p=dropout_rate, inplace=True)
self.cls_score = nn.Linear(dim_in, num_classes)
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.constant_(self.cls_score.bias, 0)
def forward(self, x):
x = x.view(x.size(0), (- 1))
if hasattr(self, 'dropout'):
x = self.dropout(x)
scores = self.cls_score(x)
return scores
def c2_weight_mapping(self):
return {'cls_score.weight': 'pred_w', 'cls_score.bias': 'pred_b'}
|
def make_roi_action_predictor(cfg, dim_in):
func = registry.ROI_ACTION_PREDICTORS[cfg.MODEL.ROI_ACTION_HEAD.PREDICTOR]
return func(cfg, dim_in)
|
class Combined3dROIHeads(torch.nn.ModuleDict):
def __init__(self, cfg, heads):
super(Combined3dROIHeads, self).__init__(heads)
self.cfg = cfg.clone()
def forward(self, slow_features, fast_features, boxes, objects=None, keypoints=None, extras={}, part_forward=(- 1)):
(result, loss_action, loss_weight, accuracy_action) = self.action(slow_features, fast_features, boxes, objects, keypoints, extras, part_forward)
return (result, loss_action, loss_weight, accuracy_action)
def c2_weight_mapping(self):
weight_map = {}
for (name, m_child) in self.named_children():
if (m_child.state_dict() and hasattr(m_child, 'c2_weight_mapping')):
child_map = m_child.c2_weight_mapping()
for (key, val) in child_map.items():
new_key = ((name + '.') + key)
weight_map[new_key] = val
return weight_map
|
def build_3d_roi_heads(cfg, dim_in):
roi_heads = []
roi_heads.append(('action', build_roi_action_head(cfg, dim_in)))
if roi_heads:
roi_heads = Combined3dROIHeads(cfg, roi_heads)
return roi_heads
|
def make_optimizer(cfg, model):
params = []
bn_param_set = set()
transformer_param_set = set()
for (name, module) in model.named_modules():
if isinstance(module, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):
bn_param_set.add((name + '.weight'))
bn_param_set.add((name + '.bias'))
elif isinstance(module, HITStructure):
for (param_name, _) in module.named_parameters(name):
transformer_param_set.add(param_name)
for (key, value) in model.named_parameters():
if (not value.requires_grad):
continue
lr = cfg.SOLVER.BASE_LR
weight_decay = cfg.SOLVER.WEIGHT_DECAY
if (key in bn_param_set):
weight_decay = cfg.SOLVER.WEIGHT_DECAY_BN
elif ('bias' in key):
lr = (cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR)
weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS
if (key in transformer_param_set):
lr = (lr * cfg.SOLVER.IA_LR_FACTOR)
params += [{'params': [value], 'lr': lr, 'weight_decay': weight_decay}]
optimizer = torch.optim.SGD(params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM)
return optimizer
|
def make_lr_scheduler(cfg, optimizer):
scheduler = cfg.SOLVER.SCHEDULER
if (scheduler not in ('half_period_cosine', 'warmup_multi_step')):
raise ValueError('Scheduler not available')
if (scheduler == 'warmup_multi_step'):
return WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, warmup_factor=cfg.SOLVER.WARMUP_FACTOR, warmup_iters=(cfg.SOLVER.WARMUP_ITERS if cfg.SOLVER.WARMUP_ON else 0), warmup_method=cfg.SOLVER.WARMUP_METHOD)
elif (scheduler == 'half_period_cosine'):
return HalfPeriodCosStepLR(optimizer, warmup_factor=cfg.SOLVER.WARMUP_FACTOR, warmup_iters=(cfg.SOLVER.WARMUP_ITERS if cfg.SOLVER.WARMUP_ON else 0), max_iters=cfg.SOLVER.MAX_ITER, warmup_method=cfg.SOLVER.WARMUP_METHOD)
|
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(self, optimizer, milestones, gamma=0.1, warmup_factor=(1.0 / 3), warmup_iters=500, warmup_method='linear', last_epoch=(- 1)):
if (not (list(milestones) == sorted(milestones))):
raise ValueError('Milestones should be a list of increasing integers. Got {}', milestones)
if (warmup_method not in ('constant', 'linear')):
raise ValueError("Only 'constant' or 'linear' warmup_method acceptedgot {}".format(warmup_method))
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = 1
if (self.last_epoch < self.warmup_iters):
if (self.warmup_method == 'constant'):
warmup_factor = self.warmup_factor
elif (self.warmup_method == 'linear'):
alpha = (float(self.last_epoch) / self.warmup_iters)
warmup_factor = ((self.warmup_factor * (1 - alpha)) + alpha)
return [((base_lr * warmup_factor) * (self.gamma ** bisect_right(self.milestones, self.last_epoch))) for base_lr in self.base_lrs]
|
class HalfPeriodCosStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(self, optimizer, warmup_factor=(1.0 / 3), warmup_iters=8000, max_iters=60000, warmup_method='linear', last_epoch=(- 1)):
if (warmup_method not in ('constant', 'linear')):
raise ValueError("Only 'constant' or 'linear' warmup_method acceptedgot {}".format(warmup_method))
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.max_iters = max_iters
self.warmup_method = warmup_method
super(HalfPeriodCosStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = 1
if (self.last_epoch < self.warmup_iters):
if (self.warmup_method == 'constant'):
warmup_factor = self.warmup_factor
elif (self.warmup_method == 'linear'):
alpha = (float(self.last_epoch) / self.warmup_iters)
warmup_factor = ((self.warmup_factor * (1 - alpha)) + alpha)
else:
warmup_factor = (0.5 * (math.cos(((self.last_epoch / self.max_iters) * math.pi)) + 1))
return [(base_lr * warmup_factor) for base_lr in self.base_lrs]
|
class MemoryPool(object):
def __init__(self):
self.cache = defaultdict(dict)
def update(self, update_info):
for (movie_id, feature_per_movie) in update_info.items():
self.cache[movie_id].update(feature_per_movie)
def update_list(self, update_info_list):
for update_info in update_info_list:
self.update(update_info)
def __getitem__(self, item):
if (isinstance(item, tuple) and (len(item) == 2)):
return self.cache[item[0]][item[1]]
return self.cache[item]
def __setitem__(self, key, value):
if (isinstance(key, tuple) and (len(key) == 2)):
self.cache[key[0]][key[1]] = value
else:
self.cache[key] = value
def __delitem__(self, item):
if (isinstance(item, tuple) and (len(item) == 2)):
del self.cache[item[0]][item[1]]
else:
del self.cache[item]
def __contains__(self, item):
if (isinstance(item, tuple) and (len(item) == 2)):
return ((item[0] in self.cache) and (item[1] in self.cache[item[0]]))
return (item in self.cache)
def items(self):
return self.cache.items()
|
def _block_set(ia_blocks):
if ((len(ia_blocks) > 0) and isinstance(ia_blocks[0], list)):
ia_blocks = list(itertools.chain.from_iterable(ia_blocks))
return ia_blocks
|
def has_person(ia_config):
ia_blocks = _block_set(ia_config.I_BLOCK_LIST)
return (ia_config.ACTIVE and ('P' in ia_blocks) and (ia_config.MAX_PERSON > 0))
|
def has_object(ia_config):
ia_blocks = _block_set(ia_config.I_BLOCK_LIST)
return (ia_config.ACTIVE and ('O' in ia_blocks) and (ia_config.MAX_OBJECT > 0))
|
def has_memory(ia_config):
ia_blocks = _block_set(ia_config.I_BLOCK_LIST)
return (ia_config.ACTIVE and ('M' in ia_blocks) and (ia_config.MAX_PER_SEC > 0))
|
def has_hand(ia_config):
ia_blocks = _block_set(ia_config.I_BLOCK_LIST)
return (ia_config.ACTIVE and ('H' in ia_blocks) and (ia_config.MAX_HAND > 0))
|
def _rename_weights(weights, weight_map):
logger = logging.getLogger(__name__)
logger.info('Remapping C2 weights')
max_c2_key_size = max([len(k) for k in weight_map.values()])
new_weights = OrderedDict()
for k in weight_map:
c2_name = weight_map[k]
logger.info('C2 name: {: <{}} mapped name: {}'.format(c2_name, max_c2_key_size, k))
if (c2_name not in weights):
logger.info('{} not found in C2 weights file, skipped.'.format(c2_name))
continue
v = weights[c2_name]
w = torch.from_numpy(v)
new_weights[k] = w
return new_weights
|
def _load_c2_pickled_weights(file_path):
with open(file_path, 'rb') as f:
if torch._six.PY3:
data = pickle.load(f, encoding='latin1')
else:
data = pickle.load(f)
if ('blobs' in data):
weights = data['blobs']
else:
weights = data
return weights
|
def load_c2_format(f, weight_map):
state_dict = _load_c2_pickled_weights(f)
state_dict = _rename_weights(state_dict, weight_map)
return dict(model=state_dict)
|
class Checkpointer(object):
def __init__(self, model, optimizer=None, scheduler=None, save_dir='', save_to_disk=None, logger=None):
self.model = model
self.optimizer = optimizer
self.scheduler = scheduler
self.save_dir = save_dir
self.save_to_disk = save_to_disk
if (logger is None):
logger = logging.getLogger(__name__)
self.logger = logger
def save(self, name, **kwargs):
if (not self.save_dir):
return
if (not self.save_to_disk):
return
data = {}
data['model'] = self.model.state_dict()
if (self.optimizer is not None):
data['optimizer'] = self.optimizer.state_dict()
if (self.scheduler is not None):
data['scheduler'] = self.scheduler.state_dict()
data.update(kwargs)
save_file = os.path.join(self.save_dir, '{}.pth'.format(name))
self.logger.info('Saving checkpoint to {}'.format(save_file))
torch.save(data, save_file)
self.tag_last_checkpoint(save_file)
def load(self, f=None, model_weight_only=False, adjust_scheduler=False, no_head=False):
if self.has_checkpoint():
f = self.get_checkpoint_file()
if (not f):
self.logger.info('No checkpoint found. Initializing model from scratch')
return {}
self.logger.info('Loading checkpoint from {}'.format(f))
checkpoint = self._load_file(f)
self._load_model(checkpoint, no_head)
if (('optimizer' in checkpoint) and self.optimizer):
if model_weight_only:
del checkpoint['optimizer']
else:
self.logger.info('Loading optimizer from {}'.format(f))
self.optimizer.load_state_dict(checkpoint.pop('optimizer'))
if (('scheduler' in checkpoint) and self.scheduler):
if model_weight_only:
del checkpoint['scheduler']
elif adjust_scheduler:
last_epoch = checkpoint.pop('scheduler')['last_epoch']
self.logger.info('Adjust scheduler at iteration {}'.format(last_epoch))
self.scheduler.step(last_epoch)
else:
self.logger.info('Loading scheduler from {}'.format(f))
self.scheduler.load_state_dict(checkpoint.pop('scheduler'))
if model_weight_only:
checkpoint['iteration'] = 0
checkpoint['person_pool'] = MemoryPool()
return checkpoint
def has_checkpoint(self):
save_file = os.path.join(self.save_dir, 'last_checkpoint')
return os.path.exists(save_file)
def get_checkpoint_file(self):
save_file = os.path.join(self.save_dir, 'last_checkpoint')
try:
with open(save_file, 'r') as f:
last_saved = f.read()
last_saved = last_saved.strip()
except IOError:
last_saved = ''
return last_saved
def tag_last_checkpoint(self, last_filename):
save_file = os.path.join(self.save_dir, 'last_checkpoint')
with open(save_file, 'w') as f:
f.write(last_filename)
def _load_file(self, f):
return torch.load(f, map_location=torch.device('cpu'))
def _load_model(self, checkpoint, no_head):
load_state_dict(self.model, checkpoint.pop('model'), no_head)
|
class ActionCheckpointer(Checkpointer):
def __init__(self, cfg, model, optimizer=None, scheduler=None, save_dir='', save_to_disk=None, logger=None):
super(ActionCheckpointer, self).__init__(model, optimizer, scheduler, save_dir, save_to_disk, logger)
self.cfg = cfg.clone()
def _load_file(self, f):
if f.endswith('.pkl'):
return load_c2_format(f, self._get_c2_weight_map())
loaded = super(ActionCheckpointer, self)._load_file(f)
if ('model' not in loaded):
loaded = dict(model=loaded)
return loaded
def _get_c2_weight_map(self):
if hasattr(self.model, 'c2_weight_mapping'):
return self.model.c2_weight_mapping()
elif (hasattr(self.model, 'module') and hasattr(self.model.module, 'c2_weight_mapping')):
return self.model.module.c2_weight_mapping()
else:
raise RuntimeError('Cannot get C2 weight mapping from current model definition.')
|
def get_world_size():
if (not dist.is_available()):
return 1
if (not dist.is_initialized()):
return 1
return dist.get_world_size()
|
def get_rank():
if (not dist.is_available()):
return 0
if (not dist.is_initialized()):
return 0
return dist.get_rank()
|
def is_main_process():
return (get_rank() == 0)
|
def synchronize(group=None):
'\n Helper function to synchronize (barrier) among all processes when\n using distributed training\n '
if (not dist.is_available()):
return
if (not dist.is_initialized()):
return
if (group is None):
group = _get_global_gloo_group()
world_size = dist.get_world_size(group=group)
if (world_size == 1):
return
dist.barrier(group=group)
|
@functools.lru_cache()
def _get_global_gloo_group():
'\n Return a process group based on gloo backend, containing all the ranks\n The result is cached.\n '
if (dist.get_backend() == 'nccl'):
return dist.new_group(backend='gloo')
else:
return dist.group.WORLD
|
def _serialize_to_tensor(data, group):
backend = dist.get_backend(group)
assert (backend in ['gloo', 'nccl'])
device = torch.device(('cpu' if (backend == 'gloo') else 'cuda'))
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to(device=device)
return tensor
|
def _pad_to_largest_tensor(tensor, group):
'\n Returns:\n list[int]: size of the tensor, on each rank\n Tensor: padded tensor that has the max size\n '
world_size = dist.get_world_size(group=group)
assert (world_size >= 1), 'comm.all_gather must be called from ranks within the given group!'
local_size = torch.tensor([tensor.numel()], dtype=torch.int64, device=tensor.device)
size_list = [torch.zeros([1], dtype=torch.int64, device=tensor.device) for _ in range(world_size)]
dist.all_gather(size_list, local_size, group=group)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
if (local_size != max_size):
padding = torch.zeros(((max_size - local_size),), dtype=torch.uint8, device=tensor.device)
tensor = torch.cat((tensor, padding), dim=0)
return (size_list, tensor)
|
def all_gather(data, group=None):
'\n Run all_gather on arbitrary picklable data (not necessarily tensors).\n Args:\n data: any picklable object\n group: a torch process group. By default, will use a group which\n contains all ranks on gloo backend.\n Returns:\n list[data]: list of data gathered from each rank\n '
if (get_world_size() == 1):
return [data]
if (group is None):
group = _get_global_gloo_group()
if (dist.get_world_size(group) == 1):
return [data]
tensor = _serialize_to_tensor(data, group)
(size_list, tensor) = _pad_to_largest_tensor(tensor, group)
max_size = max(size_list)
tensor_list = [torch.empty((max_size,), dtype=torch.uint8, device=tensor.device) for _ in size_list]
dist.all_gather(tensor_list, tensor, group=group)
data_list = []
for (size, tensor) in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
|
def gather(data, dst=0, group=None):
'\n Run gather on arbitrary picklable data (not necessarily tensors).\n Args:\n data: any picklable object\n dst (int): destination rank\n group: a torch process group. By default, will use a group which\n contains all ranks on gloo backend.\n Returns:\n list[data]: on dst, a list of data gathered from each rank. Otherwise,\n an empty list.\n '
if (get_world_size() == 1):
return [data]
if (group is None):
group = _get_global_gloo_group()
if (dist.get_world_size(group=group) == 1):
return [data]
rank = dist.get_rank(group=group)
tensor = _serialize_to_tensor(data, group)
(size_list, tensor) = _pad_to_largest_tensor(tensor, group)
if (rank == dst):
max_size = max(size_list)
tensor_list = [torch.empty((max_size,), dtype=torch.uint8, device=tensor.device) for _ in size_list]
dist.gather(tensor, tensor_list, dst=dst, group=group)
data_list = []
for (size, tensor) in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
else:
dist.gather(tensor, [], dst=dst, group=group)
return []
|
def reduce_dict(input_dict, average=True):
'\n Args:\n input_dict (dict): all the values will be reduced\n average (bool): whether to do average or sum\n Reduce the values in the dictionary from all processes so that process with rank\n 0 has the averaged results. Returns a dict with the same fields as\n input_dict, after reduction.\n '
world_size = get_world_size()
if (world_size < 2):
return input_dict
with torch.no_grad():
names = []
values = []
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if ((dist.get_rank() == 0) and average):
values /= world_size
reduced_dict = {k: v for (k, v) in zip(names, values)}
return reduced_dict
|
def all_reduce(tensor, average=False):
world_size = get_world_size()
if (world_size < 2):
return
dist.all_reduce(tensor)
if average:
tensor /= world_size
|
def setup_logger(name, save_dir, distributed_rank, filename=None):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.propagate = False
if (distributed_rank > 0):
return logger
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
if save_dir:
if (filename is None):
filename = (time.strftime('%Y-%m-%d_%H.%M.%S', time.localtime()) + '.log')
fh = logging.FileHandler(os.path.join(save_dir, filename))
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
|
def setup_tblogger(save_dir, distributed_rank):
if (distributed_rank > 0):
return None
from tensorboardX import SummaryWriter
tbdir = os.path.join(save_dir, 'tb')
os.makedirs(tbdir, exist_ok=True)
tblogger = SummaryWriter(tbdir)
return tblogger
|
class SmoothedValue(object):
'Track a series of values and provide access to smoothed values over a\n window or the global series average.\n '
def __init__(self, window_size=20):
self.deque = deque(maxlen=window_size)
self.series = []
self.total = 0.0
self.count = 0
def update(self, value):
self.deque.append(value)
self.series.append(value)
self.count += 1
self.total += value
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque))
return d.mean().item()
@property
def global_avg(self):
return (self.total / self.count)
|
class MetricLogger(object):
def __init__(self, delimiter='\t'):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for (k, v) in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if (attr in self.meters):
return self.meters[attr]
if (attr in self.__dict__):
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr))
def __str__(self):
loss_str = []
for (name, meter) in self.meters.items():
loss_str.append('{}: {:.4f} ({:.4f})'.format(name, meter.median, meter.global_avg))
return self.delimiter.join(loss_str)
|
def align_and_update_state_dicts(model_state_dict, loaded_state_dict, no_head):
"\n Strategy: suppose that the models that we will create will have prefixes appended\n to each of its keys, for example due to an extra level of nesting that the original\n pre-trained weights from ImageNet won't contain. For example, model.state_dict()\n might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains\n res2.conv1.weight. We thus want to match both parameters together.\n For that, we look for each model weight, look among all loaded keys if there is one\n that is a suffix of the current weight name, and use it if that's the case.\n If multiple matches exist, take the one with longest size\n of the corresponding name. For example, for the same model as before, the pretrained\n weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,\n we want to match backbone[0].body.conv1.weight to conv1.weight, and\n backbone[0].body.res2.conv1.weight to res2.conv1.weight.\n "
current_keys = sorted(list(model_state_dict.keys()))
loaded_keys = sorted(list(loaded_state_dict.keys()))
match_matrix = [(len(j) if i.endswith(j) else 0) for i in current_keys for j in loaded_keys]
match_matrix = torch.as_tensor(match_matrix).view(len(current_keys), len(loaded_keys))
(max_match_size, idxs) = match_matrix.max(1)
idxs[(max_match_size == 0)] = (- 1)
max_size = (max([len(key) for key in current_keys]) if current_keys else 1)
max_size_loaded = (max([len(key) for key in loaded_keys]) if loaded_keys else 1)
log_str_template = '{: <{}} loaded from {: <{}} of shape {}'
logger = logging.getLogger(__name__)
for (idx_new, idx_old) in enumerate(idxs.tolist()):
if (idx_old == (- 1)):
continue
key = current_keys[idx_new]
key_old = loaded_keys[idx_old]
if (no_head and key_old.startswith('roi_heads.')):
logger.info('{} will not be loaded.'.format(key))
continue
model_state_dict[key] = loaded_state_dict[key_old]
logger.info(log_str_template.format(key, max_size, key_old, max_size_loaded, tuple(loaded_state_dict[key_old].shape)))
|
def strip_prefix_if_present(state_dict, prefix):
keys = sorted(state_dict.keys())
if (not all((key.startswith(prefix) for key in keys))):
return state_dict
stripped_state_dict = OrderedDict()
for (key, value) in state_dict.items():
stripped_state_dict[key.replace(prefix, '')] = value
return stripped_state_dict
|
def load_state_dict(model, loaded_state_dict, no_head):
model_state_dict = model.state_dict()
loaded_state_dict = strip_prefix_if_present(loaded_state_dict, prefix='module.')
align_and_update_state_dicts(model_state_dict, loaded_state_dict, no_head)
model.load_state_dict(model_state_dict)
|
def set_seed(seed, rank, world_size):
rng = random.Random(seed)
seed_per_rank = [rng.randint(0, ((2 ** 32) - 1)) for _ in range(world_size)]
cur_seed = seed_per_rank[rank]
random.seed(cur_seed)
torch.manual_seed(cur_seed)
torch.cuda.manual_seed(cur_seed)
np.random.seed(cur_seed)
|
def _register_generic(module_dict, module_name, module):
assert (module_name not in module_dict)
module_dict[module_name] = module
|
class Registry(dict):
'\n A helper class for managing registering modules, it extends a dictionary\n and provides a register functions.\n\n Eg. creeting a registry:\n some_registry = Registry({"default": default_module})\n\n There\'re two ways of registering new modules:\n 1): normal way is just calling register function:\n def foo():\n ...\n some_registry.register("foo_module", foo)\n 2): used as decorator when declaring the module:\n @some_registry.register("foo_module")\n @some_registry.register("foo_modeul_nickname")\n def foo():\n ...\n\n Access of module is just like using a dictionary, eg:\n f = some_registry["foo_modeul"]\n '
def __init__(self, *args, **kwargs):
super(Registry, self).__init__(*args, **kwargs)
def register(self, module_name, module=None):
if (module is not None):
_register_generic(self, module_name, module)
return
def register_fn(fn):
_register_generic(self, module_name, fn)
return fn
return register_fn
|
def av_decode_video(video_path):
with av.open(video_path) as container:
frames = []
for frame in container.decode(video=0):
frames.append(frame.to_rgb().to_ndarray())
return frames
|
def cv2_decode_video(video_path):
frames = []
for frame in container.decode(video=0):
frames.append(frame.to_rgb().to_ndarray())
return frames
|
def image_decode(video_path):
frames = []
try:
with Image.open(video_path) as img:
frames.append(np.array(img.convert('RGB')))
except BaseException as e:
raise RuntimeError('Caught "{}" when loading {}'.format(str(e), video_path))
return frames
|
def csv2COCOJson(csv_path, movie_list, img_root, json_path, min_json_path):
ann_df = pd.read_csv(csv_path, header=None)
movie_ids = {}
with open(movie_list) as movief:
for (idx, line) in enumerate(movief):
name = line[:line.find('.')]
movie_ids[name] = idx
movie_infos = {}
iter_num = len(ann_df)
for rows in tqdm(ann_df.itertuples(), total=iter_num, desc='Calculating info'):
(_, movie_name, timestamp, x1, y1, x2, y2, action_id, person_id) = rows
if (movie_name not in movie_infos):
movie_infos[movie_name] = {}
movie_infos[movie_name]['img_infos'] = {}
img_path = os.path.join(movie_name, '{}.jpg'.format(timestamp))
movie_infos[movie_name]['size'] = Image.open(os.path.join(img_root, img_path)).size
movie_info = movie_infos[movie_name]
img_infos = movie_info['img_infos']
(width, height) = movie_info['size']
movie_id = (movie_ids[movie_name] * 10000)
for tid in range(902, 1799):
img_id = (movie_id + tid)
img_path = os.path.join(movie_name, '{}.jpg'.format(tid))
video_path = os.path.join(movie_name, '{}.mp4'.format(tid))
img_infos[tid] = {'id': img_id, 'img_path': img_path, 'video_path': video_path, 'height': height, 'width': width, 'movie': movie_name, 'timestamp': tid, 'annotations': {}}
img_info = movie_infos[movie_name]['img_infos'][timestamp]
if (person_id not in img_info['annotations']):
box_id = ((img_info['id'] * 1000) + person_id)
(box_w, box_h) = ((x2 - x1), (y2 - y1))
width = img_info['width']
height = img_info['height']
(real_x1, real_y1) = ((x1 * width), (y1 * height))
(real_box_w, real_box_h) = ((box_w * width), (box_h * height))
area = (real_box_w * real_box_h)
img_info['annotations'][person_id] = {'id': box_id, 'image_id': img_info['id'], 'category_id': 1, 'action_ids': [], 'person_id': person_id, 'bbox': list(map((lambda x: round(x, 2)), [real_x1, real_y1, real_box_w, real_box_h])), 'area': round(area, 5), 'keypoints': [], 'iscrowd': 0}
box_info = img_info['annotations'][person_id]
box_info['action_ids'].append(action_id)
tic = time.time()
print('Writing into json file...')
jsondata = {}
jsondata['categories'] = [{'supercategory': 'person', 'id': 1, 'name': 'person'}]
anns = [img_info.pop('annotations').values() for movie_info in movie_infos.values() for img_info in movie_info['img_infos'].values()]
anns = list(itertools.chain.from_iterable(anns))
jsondata['annotations'] = anns
imgs = [movie_info['img_infos'].values() for movie_info in movie_infos.values()]
imgs = list(itertools.chain.from_iterable(imgs))
jsondata['images'] = imgs
with open(json_path, 'w') as jsonf:
json.dump(jsondata, jsonf, indent=4)
print('Write json dataset into json file {} successfully.'.format(json_path))
with open(min_json_path, 'w') as jsonminf:
json.dump(jsondata, jsonminf)
print('Write json dataset with no indent into json file {} successfully.'.format(min_json_path))
print('Done (t={:0.2f}s)'.format((time.time() - tic)))
|
def genCOCOJson(movie_list, img_root, json_path, min_json_path):
movie_ids = {}
with open(movie_list) as movief:
for (idx, line) in enumerate(movief):
name = line[:line.find('.')]
movie_ids[name] = idx
movie_infos = {}
for movie_name in tqdm(movie_ids):
movie_infos[movie_name] = {}
movie_infos[movie_name]['img_infos'] = {}
img_path = os.path.join(movie_name, '902.jpg')
movie_infos[movie_name]['size'] = Image.open(os.path.join(img_root, img_path)).size
movie_info = movie_infos[movie_name]
img_infos = movie_info['img_infos']
(width, height) = movie_info['size']
movie_id = (movie_ids[movie_name] * 10000)
for tid in range(902, 1799):
img_id = (movie_id + tid)
img_path = os.path.join(movie_name, '{}.jpg'.format(tid))
video_path = os.path.join(movie_name, '{}.mp4'.format(tid))
img_infos[tid] = {'id': img_id, 'img_path': img_path, 'video_path': video_path, 'height': height, 'width': width, 'movie': movie_name, 'timestamp': tid, 'annotations': {}}
tic = time.time()
print('Writing into json file...')
jsondata = {}
jsondata['categories'] = [{'supercategory': 'person', 'id': 1, 'name': 'person'}]
anns = [img_info.pop('annotations').values() for movie_info in movie_infos.values() for img_info in movie_info['img_infos'].values()]
anns = list(itertools.chain.from_iterable(anns))
jsondata['annotations'] = anns
imgs = [movie_info['img_infos'].values() for movie_info in movie_infos.values()]
imgs = list(itertools.chain.from_iterable(imgs))
jsondata['images'] = imgs
with open(json_path, 'w') as jsonf:
json.dump(jsondata, jsonf, indent=4)
print('Write json dataset into json file {} successfully.'.format(json_path))
with open(min_json_path, 'w') as jsonminf:
json.dump(jsondata, jsonminf)
print('Write json dataset with no indent into json file {} successfully.'.format(min_json_path))
print('Done (t={:0.2f}s)'.format((time.time() - tic)))
|
def main():
parser = argparse.ArgumentParser(description='Generate coco format json for AVA.')
parser.add_argument('--csv_path', default='', help='path to csv file', type=str)
parser.add_argument('--movie_list', required=True, help='path to movie list', type=str)
parser.add_argument('--img_root', required=True, help='root directory of extracted key frames', type=str)
parser.add_argument('--json_path', default='', help='path of output json', type=str)
parser.add_argument('--min_json_path', default='', help='path of output minimized json', type=str)
args = parser.parse_args()
if (args.json_path == ''):
if (args.csv_path == ''):
json_path = 'test.json'
else:
dotpos = args.csv_path.rfind('.')
if (dotpos < 0):
csv_name = args.csv_path
else:
csv_name = args.csv_path[:dotpos]
json_path = (csv_name + '.json')
else:
json_path = args.json_path
if (args.min_json_path == ''):
dotpos = json_path.rfind('.')
if (dotpos < 0):
json_name = json_path
else:
json_name = json_path[:dotpos]
min_json_path = (json_name + '_min.json')
else:
min_json_path = args.min_json_path
if (args.csv_path == ''):
genCOCOJson(args.movie_list, args.img_root, json_path, min_json_path)
else:
csv2COCOJson(args.csv_path, args.movie_list, args.img_root, json_path, min_json_path)
|
def slice_movie_yuv(movie_path, clip_root, midframe_root='', start_sec=895, end_sec=1805, targ_fps=30, targ_size=360):
probe_args = ['ffprobe', '-show_format', '-show_streams', '-of', 'json', movie_path]
p = subprocess.Popen(probe_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = p.communicate()
if (p.returncode != 0):
return 'Message from {}:\nffprobe error!'.format(movie_path)
video_stream = json.loads(out.decode('utf8'))['streams'][0]
width = int(video_stream['width'])
height = int(video_stream['height'])
if (min(width, height) <= targ_size):
(new_width, new_height) = (width, height)
elif (height > width):
new_width = targ_size
new_height = int((round((((new_width * height) / width) / 2)) * 2))
else:
new_height = targ_size
new_width = int((round((((new_height * width) / height) / 2)) * 2))
vid_name = os.path.basename(movie_path)
vid_id = vid_name[:vid_name.find('.')]
targ_dir = os.path.join(clip_root, vid_id)
os.makedirs(targ_dir, exist_ok=True)
if (midframe_root != ''):
frame_targ_dir = os.path.join(midframe_root, vid_id)
os.makedirs(frame_targ_dir, exist_ok=True)
args1_input = ['ffmpeg', '-ss', str(start_sec), '-t', str(((end_sec - start_sec) + 1)), '-i', movie_path]
filter_args = 'fps=fps={}'.format(targ_fps)
if (min(width, height) > targ_size):
filter_args = ('scale={}:{}, '.format(new_width, new_height) + filter_args)
args1_output = ['-f', 'rawvideo', '-pix_fmt', 'yuv420p', '-filter:v', filter_args, 'pipe:']
args1 = (args1_input + args1_output)
stdout_stream = subprocess.PIPE
total_err_file = os.path.join(targ_dir, 'decode.err')
total_err_f_obj = open(total_err_file, 'wb')
process1 = subprocess.Popen(args1, stdout=stdout_stream, stderr=total_err_f_obj)
(width, height) = (new_width, new_height)
frame_size = int(((width * height) * 1.5))
clip_size = (targ_fps * frame_size)
err_msg_list = []
enc_err_sec = []
frame_err_sec = []
for cur_sec in range(start_sec, (end_sec + 1)):
in_bytes = process1.stdout.read(clip_size)
if (not in_bytes):
err_msg_list.append('Warning: No more data after timestamp {}.'.format(cur_sec))
break
actual_frame_num = int((len(in_bytes) / frame_size))
if (actual_frame_num < targ_fps):
err_msg_list.append('Warning: Timestamp {} has only {} frames.'.format(cur_sec, actual_frame_num))
out_filename = os.path.join(targ_dir, '{}.mp4'.format(cur_sec))
args = ['ffmpeg', '-f', 'rawvideo', '-pix_fmt', 'yuv420p', '-r', str(targ_fps), '-s', '{}x{}'.format(width, height), '-i', 'pipe:', '-pix_fmt', 'yuv420p', out_filename, '-y']
process2 = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=None, stderr=subprocess.PIPE)
(out, err) = process2.communicate(input=in_bytes)
err_str = err.decode('utf8')
if ('error' in err_str.lower()):
enc_err_sec.append(cur_sec)
if (midframe_root != ''):
midframe_filename = os.path.join(frame_targ_dir, '{}.jpg'.format(cur_sec))
args = ['ffmpeg', '-f', 'rawvideo', '-pix_fmt', 'yuv420p', '-s', '{}x{}'.format(width, height), '-i', 'pipe:', '-pix_fmt', 'yuv420p', midframe_filename, '-y']
process2 = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=None, stderr=subprocess.PIPE)
(out, err) = process2.communicate(input=in_bytes[:frame_size])
err_str = err.decode('utf8')
if ('error' in err_str.lower()):
frame_err_sec.append(cur_sec)
in_bytes = process1.stdout.read()
more_frame = int((len(in_bytes) / frame_size))
if (more_frame > 0):
err_msg_list.append('Warning: {} frames has been dropped.'.format(more_frame))
process1.wait()
with open(total_err_file, 'r') as total_err_f_obj:
err_str = total_err_f_obj.read()
if ('error' in err_str.lower()):
err_msg_list.append('Error happens when decoding the raw video.')
else:
os.remove(total_err_file)
if (len(enc_err_sec) > 0):
err_msg_list.append('Error in encoding short clips. Some of the error timestamps are {}.'.format(enc_err_sec[:5]))
if (len(frame_err_sec) > 0):
err_msg_list.append('Error in encoding key frames. Some of the error timestamps are {}.'.format(frame_err_sec[:5]))
if (len(err_msg_list) > 0):
err_msg = '\n'.join(err_msg_list)
err_msg = ('Message from {}:\n'.format(movie_path) + err_msg)
else:
err_msg = ''
return err_msg
|
def multiprocess_wrapper(args):
(args, kwargs) = args
return slice_movie_yuv(*args, **kwargs)
|
def main():
parser = argparse.ArgumentParser(description='Script for processing AVA videos.')
parser.add_argument('--movie_root', required=True, help='root directory of downloaded movies', type=str)
parser.add_argument('--clip_root', required=True, help='root directory to store segmented video clips', type=str)
parser.add_argument('--kframe_root', default='', help='root directory to store extracted key frames', type=str)
parser.add_argument('--process_num', default=4, help='the number of processes', type=int)
args = parser.parse_args()
movie_path_list = []
clip_root_list = []
kwargs_list = []
movie_names = os.listdir(args.movie_root)
for movie_name in movie_names:
movie_path = os.path.join(args.movie_root, movie_name)
movie_path_list.append(movie_path)
clip_root_list.append(args.clip_root)
kwargs_list.append(dict(midframe_root=args.kframe_root))
pool = Pool(args.process_num)
for ret_msg in tqdm.tqdm(pool.imap_unordered(multiprocess_wrapper, zip(zip(movie_path_list, clip_root_list), kwargs_list)), total=len(movie_path_list)):
if (ret_msg != ''):
tqdm.tqdm.write(ret_msg)
|
def make_cython_ext(name, module, sources):
extra_compile_args = None
if (platform.system() != 'Windows'):
extra_compile_args = {'cxx': ['-Wno-unused-function', '-Wno-write-strings']}
extension = Extension('{}.{}'.format(module, name), [os.path.join(*module.split('.'), p) for p in sources], include_dirs=[np.get_include()], language='c++', extra_compile_args=extra_compile_args)
(extension,) = cythonize(extension)
return extension
|
def make_cuda_ext(name, module, sources):
return CUDAExtension(name='{}.{}'.format(module, name), sources=[os.path.join(*module.split('.'), p) for p in sources], extra_compile_args={'cxx': [], 'nvcc': ['-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__']})
|
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, 'hit/csrc')
main_file = glob.glob(os.path.join(extensions_dir, '*.cpp'))
source_cpu = glob.glob(os.path.join(extensions_dir, 'cpu', '*.cpp'))
source_cuda = glob.glob(os.path.join(extensions_dir, 'cuda', '*.cu'))
sources = (main_file + source_cpu)
extension = CppExtension
extra_compile_args = {'cxx': []}
define_macros = []
if ((torch.cuda.is_available() and (CUDA_HOME is not None)) or (os.getenv('FORCE_CUDA', '0') == '1')):
extension = CUDAExtension
sources += source_cuda
define_macros += [('WITH_CUDA', None)]
extra_compile_args['nvcc'] = ['-O3', '-DCUDA_HAS_FP16=1', '-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__']
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [extension('hit._custom_cuda_ext', sources, include_dirs=include_dirs, define_macros=define_macros, extra_compile_args=extra_compile_args), make_cython_ext(name='soft_nms_cpu', module='detector.nms', sources=['src/soft_nms_cpu.pyx']), make_cuda_ext(name='nms_cpu', module='detector.nms', sources=['src/nms_cpu.cpp']), make_cuda_ext(name='nms_cuda', module='detector.nms', sources=['src/nms_cuda.cpp', 'src/nms_kernel.cu'])]
return ext_modules
|
def main():
parser = argparse.ArgumentParser(description='PyTorch Object Detection Inference')
parser.add_argument('--config-file', default='config_files/hitnet.yaml', metavar='FILE', help='path to config file')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('opts', help='Modify config options using the command-line', default=None, nargs=argparse.REMAINDER)
args = parser.parse_args()
num_gpus = (int(os.environ['WORLD_SIZE']) if ('WORLD_SIZE' in os.environ) else 1)
distributed = (num_gpus > 1)
if distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
save_dir = ''
logger = setup_logger('hit', save_dir, get_rank())
logger.info('Using {} GPUs'.format(num_gpus))
logger.info(cfg)
logger.info('Collecting env info (might take some time)')
logger.info(('\n' + get_pretty_env_info()))
model = build_detection_model(cfg)
model.to('cuda')
output_dir = cfg.OUTPUT_DIR
checkpointer = ActionCheckpointer(cfg, model, save_dir=output_dir)
checkpointer.load(cfg.MODEL.WEIGHT)
output_folders = ([None] * len(cfg.DATASETS.TEST))
dataset_names = cfg.DATASETS.TEST
mem_active = has_memory(cfg.MODEL.HIT_STRUCTURE)
if cfg.OUTPUT_DIR:
for (idx, dataset_name) in enumerate(dataset_names):
output_folder = os.path.join(cfg.OUTPUT_DIR, 'inference', dataset_name)
os.makedirs(output_folder, exist_ok=True)
output_folders[idx] = output_folder
data_loaders_test = make_data_loader(cfg, is_train=False, is_distributed=distributed)
for (output_folder, dataset_name, data_loader_test) in zip(output_folders, dataset_names, data_loaders_test):
inference(model, data_loader_test, dataset_name, mem_active=mem_active, output_folder=output_folder)
synchronize()
|
def train(cfg, local_rank, distributed, tblogger=None, transfer_weight=False, adjust_lr=False, skip_val=False, no_head=False):
model = build_detection_model(cfg)
device = torch.device('cuda')
model.to(device)
optimizer = make_optimizer(cfg, model)
scheduler = make_lr_scheduler(cfg, optimizer)
if distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank, broadcast_buffers=False, find_unused_parameters=True)
arguments = {}
arguments['iteration'] = 0
arguments['person_pool'] = MemoryPool()
output_dir = cfg.OUTPUT_DIR
save_to_disk = (get_rank() == 0)
checkpointer = ActionCheckpointer(cfg, model, optimizer, scheduler, output_dir, save_to_disk)
extra_checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT, model_weight_only=transfer_weight, adjust_scheduler=adjust_lr, no_head=no_head)
arguments.update(extra_checkpoint_data)
data_loader = make_data_loader(cfg, is_train=True, is_distributed=distributed, start_iter=arguments['iteration'])
checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
val_period = cfg.SOLVER.EVAL_PERIOD
mem_active = has_memory(cfg.MODEL.HIT_STRUCTURE)
if (not skip_val):
dataset_names_val = cfg.DATASETS.TEST
data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)
else:
dataset_names_val = []
data_loaders_val = []
do_train(model, data_loader, optimizer, scheduler, checkpointer, device, checkpoint_period, arguments, tblogger, val_period, dataset_names_val, data_loaders_val, distributed, mem_active)
return model
|
def run_test(cfg, model, distributed):
if distributed:
model = model.module
torch.cuda.empty_cache()
output_folders = ([None] * len(cfg.DATASETS.TEST))
dataset_names = cfg.DATASETS.TEST
if cfg.OUTPUT_DIR:
for (idx, dataset_name) in enumerate(dataset_names):
output_folder = os.path.join(cfg.OUTPUT_DIR, 'inference', dataset_name)
os.makedirs(output_folder, exist_ok=True)
output_folders[idx] = output_folder
data_loaders_test = make_data_loader(cfg, is_train=False, is_distributed=distributed)
for (output_folder, dataset_name, data_loader_test) in zip(output_folders, dataset_names, data_loaders_test):
inference(model, data_loader_test, dataset_name, mem_active=has_memory(cfg.MODEL.HIT_STRUCTURE), output_folder=output_folder)
synchronize()
|
def main():
parser = argparse.ArgumentParser(description='PyTorch Action Detection Training')
parser.add_argument('--config-file', default='config_files/hitnet.yaml', metavar='FILE', help='path to config file', type=str)
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--skip-final-test', dest='skip_test', help='Do not test the final model', action='store_true')
parser.add_argument('--skip-val-in-train', dest='skip_val', help='Do not validate during training', action='store_true', default=True)
parser.add_argument('--transfer', dest='transfer_weight', help='Transfer weight from a pretrained model', action='store_true', default=True)
parser.add_argument('--adjust-lr', dest='adjust_lr', help='Adjust learning rate scheduler from old checkpoint', action='store_true')
parser.add_argument('--no-head', dest='no_head', help='Not load the head layer parameters from weight file', action='store_true', default=True)
parser.add_argument('--use-tfboard', action='store_true', dest='tfboard', help='Use tensorboard to log stats', default=True)
parser.add_argument('--seed', type=int, default=2, help='Manual seed at the begining.')
parser.add_argument('opts', help='Modify config options using the command-line', default=None, nargs=argparse.REMAINDER)
args = parser.parse_args()
num_gpus = (int(os.environ['WORLD_SIZE']) if ('WORLD_SIZE' in os.environ) else 1)
args.distributed = (num_gpus > 1)
if args.distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
global_rank = get_rank()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
output_dir = cfg.OUTPUT_DIR
if output_dir:
os.makedirs(output_dir, exist_ok=True)
logger = setup_logger('hit', output_dir, global_rank)
logger.info('Using {} GPUs'.format(num_gpus))
logger.info(args)
logger.info('Collecting env info (might take some time)')
logger.info(('\n' + get_pretty_env_info()))
logger.info('Loaded configuration file {}'.format(args.config_file))
with open(args.config_file, 'r') as cf:
config_str = ('\n' + cf.read())
logger.info(config_str)
logger.info('Running with config:\n{}'.format(cfg))
tblogger = None
if args.tfboard:
tblogger = setup_tblogger(output_dir, global_rank)
set_seed(args.seed, global_rank, num_gpus)
model = train(cfg, args.local_rank, args.distributed, tblogger, args.transfer_weight, args.adjust_lr, args.skip_val, args.no_head)
if (tblogger is not None):
tblogger.close()
if (not args.skip_test):
run_test(cfg, model, args.distributed)
|
def read_acclog(log_dir, log_name):
with open(os.path.join(log_dir, log_name), 'r') as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
acc_list = []
epo_list = []
for i in range(len(lines)):
epo_i = lines[i].split(' ')[0]
acc_i = lines[i].split('(')[1]
acc_i = acc_i.split(',')[0]
epo_list.append(int(epo_i))
acc_list.append(float(acc_i))
return (acc_list, epo_list)
|
def main():
global args
args = parser.parse_args()
log_dir = ('%s/%s/' % (args.work_dir, args.log_dir))
(acc1_list, epo1_list) = read_acclog(log_dir, log_name='val_acc1.txt')
best_acc1 = np.max(acc1_list)
best_idx1 = acc1_list.index(np.max(acc1_list))
best_epo1 = epo1_list[best_idx1]
(acc5_list, epo5_list) = read_acclog(log_dir, log_name='val_acc5.txt')
best_acc5 = np.max(acc5_list)
best_idx5 = acc5_list.index(np.max(acc5_list))
best_epo5 = epo5_list[best_idx5]
with open(os.path.join(log_dir, 'best.txt'), 'w') as f:
f.write((((('Acc@1:' + str(best_acc1)) + ' epoch:') + str(best_epo1)) + '\n'))
f.write((((('Acc@5:' + str(best_acc5)) + ' epoch:') + str(best_epo5)) + '\n'))
f.write((('Err@1:' + str((100 - best_acc1))) + '\n'))
f.write(('Err@5:' + str((100 - best_acc5))))
print(('-' * 80))
print('* best Acc@1: {:.3f} at epoch {}, Err@1: {:.3f}'.format(best_acc1, best_epo1, (100 - best_acc1)))
print(('-' * 80))
print('* best Acc@5: {:.3f} at epoch {}, Err@5: {:.3f}'.format(best_acc5, best_epo5, (100 - best_acc5)))
print(('-' * 80))
|
def read_acclog(log_dir, log_name):
with open(os.path.join(log_dir, log_name), 'r') as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
acc_list = []
epo_list = []
for i in range(len(lines)):
epo_i = lines[i].split(' ')[0]
acc_i = lines[i].split('(')[1]
acc_i = acc_i.split(',')[0]
epo_list.append(int(epo_i))
acc_list.append(float(acc_i))
return (acc_list, epo_list)
|
def read_losslog(log_dir, log_name):
with open(os.path.join(log_dir, log_name), 'r') as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
loss_list = []
epo_list = []
for i in range(len(lines)):
epo_i = lines[i].split(' ')[0]
loss_i = lines[i].split(' ')[(- 1)]
epo_list.append(int(epo_i))
loss_list.append(round(float(loss_i), 3))
return (loss_list, epo_list)
|
def plot_loss(loss, epochs, save_path, plot_name):
plt.figure()
plt.plot(epochs, loss, label='training')
plt.title('Training loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.xlim(min(epochs), max(epochs))
plt.ylim(int(min(loss)), int(max(loss)))
plt.yticks(range(int(min(loss)), int(max(loss)), 1))
plt.legend(loc='upper right')
plt.savefig((save_path + '{}.png'.format(plot_name)))
plt.show()
|
def plot_acc(acc, val_acc, epochs, val_epochs, save_path, plot_name):
plt.figure()
plt.plot(epochs, acc, label='training')
plt.plot(val_epochs, val_acc, label='validation')
plt.title('Training and validation acc')
plt.ylabel('acc')
plt.xlabel('epoch')
plt.xlim(min(epochs), max(epochs))
plt.ylim(0, 100)
plt.legend(loc='upper right')
plt.savefig((save_path + '{}.png'.format(plot_name)))
plt.show()
|
def main():
global args
args = parser.parse_args()
log_dir = ('%s/%s/' % (args.work_dir, args.log_dir))
save_path = log_dir
(acc1_list, epo1_list) = read_acclog(log_dir, log_name='train_acc1.txt')
(val_acc1_list, val_epo1_list) = read_acclog(log_dir, log_name='val_acc1.txt')
plot_acc(acc1_list, val_acc1_list, epo1_list, val_epo1_list, save_path, plot_name='acc1_plot')
(acc5_list, epo5_list) = read_acclog(log_dir, log_name='train_acc5.txt')
(val_acc5_list, val_epo5_list) = read_acclog(log_dir, log_name='val_acc5.txt')
plot_acc(acc5_list, val_acc5_list, epo5_list, val_epo5_list, save_path, plot_name='acc5_plot')
(loss_list, epo_list) = read_losslog(log_dir, log_name='loss_plot.txt')
plot_loss(loss_list, epo_list, save_path, plot_name='loss_plot')
|
class SELayer(nn.Module):
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(nn.Linear(channel, (channel // reduction), bias=False), nn.ReLU(inplace=True), nn.Linear((channel // reduction), channel, bias=False), nn.Sigmoid())
def forward(self, x):
(b, c, _, _) = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return (x * y.expand_as(x))
|
class eca_layer(nn.Module):
'Constructs a ECA module.\n Args:\n channel: Number of channels of the input feature map\n k_size: Adaptive selection of kernel size\n source: https://github.com/BangguWu/ECANet\n '
def __init__(self, channel, k_size=3):
super(eca_layer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv = nn.Conv1d(1, 1, kernel_size=k_size, padding=((k_size - 1) // 2), bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
(b, c, h, w) = x.size()
y = self.avg_pool(x)
y = self.conv(y.squeeze((- 1)).transpose((- 1), (- 2))).transpose((- 1), (- 2)).unsqueeze((- 1))
y = self.sigmoid(y)
return (x * y.expand_as(x))
|
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
'3x3 convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation)
|
def conv1x1(in_planes, out_planes, stride=1):
'1x1 convolution'
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
|
class RLA_Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, rla_channel=32, SE=False, ECA_size=None, groups=1, base_width=64, dilation=1, norm_layer=None, reduction=16):
super(RLA_Bottleneck, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
width = (int((planes * (base_width / 64.0))) * groups)
self.conv1 = conv1x1((inplanes + rla_channel), width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, (planes * self.expansion))
self.bn3 = norm_layer((planes * self.expansion))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.averagePooling = None
if ((downsample is not None) and (stride != 1)):
self.averagePooling = nn.AvgPool2d((2, 2), stride=(2, 2))
self.se = None
if SE:
self.se = SELayer((planes * self.expansion), reduction)
self.eca = None
if (ECA_size != None):
self.eca = eca_layer((planes * self.expansion), int(ECA_size))
def forward(self, x, h):
identity = x
x = torch.cat((x, h), dim=1)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.se != None):
out = self.se(out)
if (self.eca != None):
out = self.eca(out)
y = out
if (self.downsample is not None):
identity = self.downsample(identity)
if (self.averagePooling is not None):
h = self.averagePooling(h)
out += identity
out = self.relu(out)
return (out, y, h)
|
@BACKBONES.register_module()
class RLA_ResNet(nn.Module):
'\n rla_channel: the number of filters of the shared(recurrent) conv in RLA\n SE: whether use SE or not \n ECA: None: not use ECA, or specify a list of kernel sizes\n \n frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n -1 means not freezing any parameters.\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only.\n zero_init_last_bn (bool): Whether to use zero init for last norm layer\n in resblocks to let them behave as identity.\n '
def __init__(self, block=RLA_Bottleneck, layers=[3, 4, 6, 3], num_classes=1000, rla_channel=32, SE=False, ECA=None, frozen_stages=(- 1), norm_eval=True, style='pytorch', zero_init_last_bn=True, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None):
super(RLA_ResNet, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if (replace_stride_with_dilation is None):
replace_stride_with_dilation = [False, False, False]
if (len(replace_stride_with_dilation) != 3):
raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation))
if (ECA is None):
ECA = ([None] * 4)
elif (len(ECA) != 4):
raise ValueError('argument ECA should be a 4-element tuple, got {}'.format(ECA))
self.rla_channel = rla_channel
self.zero_init_last_bn = zero_init_last_bn
self.flops = False
self.frozen_stages = frozen_stages
self.norm_eval = norm_eval
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
conv_outs = ([None] * 4)
recurrent_convs = ([None] * 4)
stages = ([None] * 4)
stage_bns = ([None] * 4)
(stages[0], stage_bns[0], conv_outs[0], recurrent_convs[0]) = self._make_layer(block, 64, layers[0], rla_channel=rla_channel, SE=SE, ECA_size=ECA[0])
(stages[1], stage_bns[1], conv_outs[1], recurrent_convs[1]) = self._make_layer(block, 128, layers[1], rla_channel=rla_channel, SE=SE, ECA_size=ECA[1], stride=2, dilate=replace_stride_with_dilation[0])
(stages[2], stage_bns[2], conv_outs[2], recurrent_convs[2]) = self._make_layer(block, 256, layers[2], rla_channel=rla_channel, SE=SE, ECA_size=ECA[2], stride=2, dilate=replace_stride_with_dilation[1])
(stages[3], stage_bns[3], conv_outs[3], recurrent_convs[3]) = self._make_layer(block, 512, layers[3], rla_channel=rla_channel, SE=SE, ECA_size=ECA[3], stride=2, dilate=replace_stride_with_dilation[2])
self.conv_outs = nn.ModuleList(conv_outs)
self.recurrent_convs = nn.ModuleList(recurrent_convs)
self.stages = nn.ModuleList(stages)
self.stage_bns = nn.ModuleList(stage_bns)
self.tanh = nn.Tanh()
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_last_bn:
for m in self.modules():
if isinstance(m, RLA_Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
def _make_layer(self, block, planes, blocks, rla_channel, SE, ECA_size, stride=1, dilate=False):
conv_out = conv1x1((planes * block.expansion), rla_channel)
recurrent_conv = conv3x3(rla_channel, rla_channel)
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, rla_channel=rla_channel, SE=SE, ECA_size=ECA_size, groups=self.groups, base_width=self.base_width, dilation=previous_dilation, norm_layer=norm_layer))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, rla_channel=rla_channel, SE=SE, ECA_size=ECA_size, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer))
bns = [norm_layer(rla_channel) for _ in range(blocks)]
return (nn.ModuleList(layers), nn.ModuleList(bns), conv_out, recurrent_conv)
def _forward_impl(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
(batch, _, height, width) = x.size()
if self.flops:
h = torch.zeros(batch, self.rla_channel, height, width)
else:
h = torch.zeros(batch, self.rla_channel, height, width, device='cuda')
outs = []
for (layers, bns, conv_out, recurrent_conv) in zip(self.stages, self.stage_bns, self.conv_outs, self.recurrent_convs):
for (layer, bn) in zip(layers, bns):
(x, y, h) = layer(x, h)
y_out = conv_out(y)
h = (h + y_out)
h = bn(h)
h = self.tanh(h)
h = recurrent_conv(h)
outs.append(x)
return tuple(outs)
def forward(self, x):
return self._forward_impl(x)
def _freeze_stages(self):
if (self.frozen_stages >= 0):
self.bn1.eval()
for m in [self.conv1, self.bn1]:
for param in m.parameters():
param.requires_grad = False
for i in range(self.frozen_stages):
m = nn.ModuleList([])
m.append(self.stages[i])
m.append(self.stage_bns[i])
m.append(self.conv_outs[i])
m.append(self.recurrent_convs[i])
for layer in m:
layer.eval()
for param in layer.parameters():
param.requires_grad = False
def init_weights(self, pretrained=None):
'Initialize the weights in backbone.\n\n Args:\n pretrained (str, optional): Path to pre-trained weights.\n Defaults to None.\n '
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if self.zero_init_last_bn:
for m in self.modules():
if isinstance(m, Bottleneck):
constant_init(m.norm3, 0)
elif isinstance(m, BasicBlock):
constant_init(m.norm2, 0)
else:
raise TypeError('pretrained must be a str or None')
def train(self, mode=True):
'Convert the model into training mode while keep normalization layer\n freezed.'
super(RLA_ResNet, self).train(mode)
self._freeze_stages()
if (mode and self.norm_eval):
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
|
class ConvGRUCell_layer(nn.Module):
def __init__(self, input_channel, output_channel, kernel_size=3):
super(ConvGRUCell_layer, self).__init__()
gru_input_channel = (input_channel + output_channel)
self.output_channel = output_channel
self.kernel_size = kernel_size
self.padding = (kernel_size // 2)
self.gate_conv = nn.Conv2d(gru_input_channel, (output_channel * 2), kernel_size=self.kernel_size, padding=self.padding)
self.reset_gate_norm = nn.GroupNorm(1, output_channel, 1e-06, True)
self.update_gate_norm = nn.GroupNorm(1, output_channel, 1e-06, True)
self.output_conv = nn.Conv2d(gru_input_channel, output_channel, kernel_size=self.kernel_size, padding=self.padding)
self.output_norm = nn.GroupNorm(1, output_channel, 1e-06, True)
self.activation = nn.Tanh()
def gates(self, x, h):
c = torch.cat((x, h), dim=1)
f = self.gate_conv(c)
C = f.shape[1]
(r, u) = torch.split(f, (C // 2), 1)
rn = self.reset_gate_norm(r)
un = self.update_gate_norm(u)
rns = torch.sigmoid(rn)
uns = torch.sigmoid(un)
return (rns, uns)
def output(self, x, h, r, u):
f = torch.cat((x, (r * h)), dim=1)
o = self.output_conv(f)
on = self.output_norm(o)
return on
def forward(self, x, h=None):
(N, C, H, W) = x.shape
HC = self.output_channel
if (h is None):
h = torch.zeros((N, HC, H, W), dtype=torch.float, device=x.device)
(r, u) = self.gates(x, h)
o = self.output(x, h, r, u)
y = self.activation(o)
return ((u * h) + ((1 - u) * y))
|
class ConvLSTMCell_layer(nn.Module):
def __init__(self, input_dim, hidden_dim, kernel_size=(3, 3), bias=False):
'\n Initialize ConvLSTM cell.\n Parameters\n ----------\n input_dim: int\n Number of channels of input tensor.\n hidden_dim: int\n Number of channels of hidden state.\n kernel_size: (int, int)\n Size of the convolutional kernel.\n bias: bool\n Whether or not to add the bias.\n '
super(ConvLSTMCell_layer, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.padding = ((kernel_size[0] // 2), (kernel_size[1] // 2))
self.bias = bias
self.conv = nn.Conv2d(in_channels=(self.input_dim + self.hidden_dim), out_channels=(4 * self.hidden_dim), kernel_size=self.kernel_size, padding=self.padding, bias=self.bias)
def forward(self, input_tensor, cur_state):
(h_cur, c_cur) = cur_state
combined = torch.cat([input_tensor, h_cur], dim=1)
combined_conv = self.conv(combined)
(cc_i, cc_f, cc_o, cc_g) = torch.split(combined_conv, self.hidden_dim, dim=1)
i = torch.sigmoid(cc_i)
f = torch.sigmoid(cc_f)
o = torch.sigmoid(cc_o)
g = torch.tanh(cc_g)
c_next = ((f * c_cur) + (i * g))
h_next = (o * torch.tanh(c_next))
return (h_next, c_next)
|
class eca_layer(nn.Module):
'Constructs a ECA module.\n Args:\n channel: Number of channels of the input feature map\n k_size: Adaptive selection of kernel size\n source: https://github.com/BangguWu/ECANet\n '
def __init__(self, channel, k_size=3):
super(eca_layer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv = nn.Conv1d(1, 1, kernel_size=k_size, padding=((k_size - 1) // 2), bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
(b, c, h, w) = x.size()
y = self.avg_pool(x)
y = self.conv(y.squeeze((- 1)).transpose((- 1), (- 2))).transpose((- 1), (- 2)).unsqueeze((- 1))
y = self.sigmoid(y)
return (x * y.expand_as(x))
|
def _make_divisible(v: float, divisor: int, min_value: Optional[int]=None) -> int:
'\n This function is taken from the original tf repo.\n It ensures that all layers have a channel number that is divisible by 8\n It can be seen here:\n https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py\n :param v:\n :param divisor:\n :param min_value:\n :return:\n '
if (min_value is None):
min_value = divisor
new_v = max(min_value, ((int((v + (divisor / 2))) // divisor) * divisor))
if (new_v < (0.9 * v)):
new_v += divisor
return new_v
|
class ConvBNReLU(nn.Sequential):
def __init__(self, in_planes: int, out_planes: int, kernel_size: int=3, stride: int=1, groups: int=1, norm_layer: Optional[Callable[(..., nn.Module)]]=None) -> None:
padding = ((kernel_size - 1) // 2)
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
super(ConvBNReLU, self).__init__(nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False), norm_layer(out_planes), nn.ReLU6(inplace=True))
|
class InvertedResidual(nn.Module):
def __init__(self, inp: int, oup: int, stride: int, expand_ratio: int, norm_layer: Optional[Callable[(..., nn.Module)]]=None, ECA_ksize=None) -> None:
super(InvertedResidual, self).__init__()
self.stride = stride
assert (stride in [1, 2])
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
hidden_dim = int(round((inp * expand_ratio)))
self.use_res_connect = ((self.stride == 1) and (inp == oup))
layers: List[nn.Module] = []
if (expand_ratio != 1):
layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1, norm_layer=norm_layer))
layers.extend([ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim, norm_layer=norm_layer), nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), norm_layer(oup)])
self.eca_ksize = ECA_ksize
if (ECA_ksize != None):
layers.append(eca_layer(oup, ECA_ksize))
self.conv = nn.Sequential(*layers)
def forward(self, x: Tensor) -> Tensor:
if self.use_res_connect:
return (x + self.conv(x))
else:
return self.conv(x)
|
class MobileNetV2(nn.Module):
def __init__(self, num_classes: int=1000, width_mult: float=1.0, inverted_residual_setting: Optional[List[List[int]]]=None, round_nearest: int=8, block: Optional[Callable[(..., nn.Module)]]=None, norm_layer: Optional[Callable[(..., nn.Module)]]=None, ECA=False) -> None:
'\n MobileNet V2 main class\n\n Args:\n num_classes (int): Number of classes\n width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount\n inverted_residual_setting: Network structure\n round_nearest (int): Round the number of channels in each layer to be a multiple of this number\n Set to 1 to turn off rounding\n block: Module specifying inverted residual building block for mobilenet\n norm_layer: Module specifying the normalization layer to use\n\n '
super(MobileNetV2, self).__init__()
if (block is None):
block = InvertedResidual
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
input_channel = 32
last_channel = 1280
if (inverted_residual_setting is None):
inverted_residual_setting = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]]
if ((len(inverted_residual_setting) == 0) or (len(inverted_residual_setting[0]) != 4)):
raise ValueError('inverted_residual_setting should be non-empty or a 4-element list, got {}'.format(inverted_residual_setting))
input_channel = _make_divisible((input_channel * width_mult), round_nearest)
self.last_channel = _make_divisible((last_channel * max(1.0, width_mult)), round_nearest)
features: List[nn.Module] = [ConvBNReLU(3, input_channel, stride=2, norm_layer=norm_layer)]
for (t, c, n, s) in inverted_residual_setting:
output_channel = _make_divisible((c * width_mult), round_nearest)
for i in range(n):
if ECA:
if (c < 96):
ECA_ksize = 1
else:
ECA_ksize = 3
else:
ECA_ksize = None
stride = (s if (i == 0) else 1)
features.append(block(input_channel, output_channel, stride, expand_ratio=t, norm_layer=norm_layer, ECA_ksize=ECA_ksize))
input_channel = output_channel
features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1, norm_layer=norm_layer))
self.features = nn.Sequential(*features)
self.classifier = nn.Sequential(nn.Dropout(0.2), nn.Linear(self.last_channel, num_classes))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if (m.bias is not None):
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def _forward_impl(self, x: Tensor) -> Tensor:
x = self.features(x)
x = nn.functional.adaptive_avg_pool2d(x, (1, 1)).reshape(x.shape[0], (- 1))
x = self.classifier(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
|
def mobilenet_v2(**kwargs: Any) -> MobileNetV2:
'\n Constructs a MobileNetV2 architecture from\n `"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_.\n '
print('Constructing mobilenetv2......')
model = MobileNetV2(**kwargs)
return model
|
def mobilenetv2_eca(eca=True):
'\n default: \n ECA=False\n '
print('Constructing mobilenetv2_eca......')
model = MobileNetV2(ECA=eca)
return model
|
def conv_out(in_planes, out_planes):
'1x1 convolution'
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, bias=False)
|
def recurrent_dsconv(in_planes, out_planes, groups):
'3x3 deepwise separable convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=1, padding=1, groups=groups, bias=False)
|
def _make_divisible(v: float, divisor: int, min_value: Optional[int]=None) -> int:
'\n This function is taken from the original tf repo.\n It ensures that all layers have a channel number that is divisible by 8\n It can be seen here:\n https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py\n :param v:\n :param divisor:\n :param min_value:\n :return:\n '
if (min_value is None):
min_value = divisor
new_v = max(min_value, ((int((v + (divisor / 2))) // divisor) * divisor))
if (new_v < (0.9 * v)):
new_v += divisor
return new_v
|
class ConvBNReLU(nn.Sequential):
def __init__(self, in_planes: int, out_planes: int, kernel_size: int=3, stride: int=1, groups: int=1, norm_layer: Optional[Callable[(..., nn.Module)]]=None) -> None:
padding = ((kernel_size - 1) // 2)
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
super(ConvBNReLU, self).__init__(nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False), norm_layer(out_planes), nn.ReLU6(inplace=True))
|
class InvertedResidual(nn.Module):
def __init__(self, inp: int, oup: int, stride: int, expand_ratio: int, rla_channel: int, norm_layer: Optional[Callable[(..., nn.Module)]]=None, ECA_ksize=None) -> None:
super(InvertedResidual, self).__init__()
self.stride = stride
assert (stride in [1, 2])
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
hidden_dim = int(round((inp * expand_ratio)))
hidden_rla = (hidden_dim + rla_channel)
self.use_res_connect = ((self.stride == 1) and (inp == oup))
self.conv1x1 = None
if (expand_ratio != 1):
self.conv1x1 = ConvBNReLU(inp, hidden_dim, kernel_size=1, norm_layer=norm_layer)
layers: List[nn.Module] = []
layers.extend([ConvBNReLU(hidden_rla, hidden_rla, stride=stride, groups=hidden_rla, norm_layer=norm_layer), nn.Conv2d(hidden_rla, oup, 1, 1, 0, bias=False), norm_layer(oup)])
self.eca_ksize = ECA_ksize
if (ECA_ksize != None):
layers.append(eca_layer(oup, ECA_ksize))
self.conv = nn.Sequential(*layers)
self.averagePooling = None
if (self.stride != 1):
self.averagePooling = nn.AvgPool2d((2, 2), stride=(2, 2))
def forward(self, x: Tensor, h: Tensor) -> Tensor:
identity = x
if (self.conv1x1 is not None):
x = self.conv1x1(x)
x = torch.cat((x, h), dim=1)
y = self.conv(x)
if self.use_res_connect:
out = (identity + y)
else:
out = y
if (self.averagePooling is not None):
h = self.averagePooling(h)
return (out, y, h)
|
class dsRLA_MobileNetV2(nn.Module):
def __init__(self, num_classes: int=1000, width_mult: float=1.0, rla_channel: int=32, inverted_residual_setting: Optional[List[List[int]]]=None, round_nearest: int=8, block: Optional[Callable[(..., nn.Module)]]=None, norm_layer: Optional[Callable[(..., nn.Module)]]=None, ECA=False) -> None:
'\n MobileNet V2 main class\n\n Args:\n num_classes (int): Number of classes\n width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount\n inverted_residual_setting: Network structure\n round_nearest (int): Round the number of channels in each layer to be a multiple of this number\n Set to 1 to turn off rounding\n block: Module specifying inverted residual building block for mobilenet\n norm_layer: Module specifying the normalization layer to use\n\n '
super(dsRLA_MobileNetV2, self).__init__()
if (block is None):
block = InvertedResidual
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
input_channel = 32
last_channel = 1280
if (inverted_residual_setting is None):
inverted_residual_setting = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]]
if ((len(inverted_residual_setting) == 0) or (len(inverted_residual_setting[0]) != 4)):
raise ValueError('inverted_residual_setting should be non-empty or a 4-element list, got {}'.format(inverted_residual_setting))
input_channel = _make_divisible((input_channel * width_mult), round_nearest)
self.last_channel = _make_divisible((last_channel * max(1.0, width_mult)), round_nearest)
self.conv1 = ConvBNReLU(3, input_channel, stride=2, norm_layer=norm_layer)
self.newcell = [0]
for i in range(1, len(inverted_residual_setting)):
if (inverted_residual_setting[i][3] == 2):
self.newcell.append(i)
num_stages = len(inverted_residual_setting)
stages = ([None] * num_stages)
stage_bns = ([None] * num_stages)
conv_outs = ([None] * num_stages)
recurrent_dsconvs = ([recurrent_dsconv(rla_channel, rla_channel, rla_channel)] * len(self.newcell))
j = 0
for (t, c, n, s) in inverted_residual_setting:
output_channel = _make_divisible((c * width_mult), round_nearest)
stages[j] = []
stage_bns[j] = nn.ModuleList([norm_layer(rla_channel) for _ in range(n)])
conv_outs[j] = conv_out(output_channel, rla_channel)
for i in range(n):
if ECA:
if (c < 96):
ECA_ksize = 1
else:
ECA_ksize = 3
else:
ECA_ksize = None
stride = (s if (i == 0) else 1)
stages[j].append(block(input_channel, output_channel, stride, expand_ratio=t, rla_channel=rla_channel, norm_layer=norm_layer, ECA_ksize=ECA_ksize))
input_channel = output_channel
stages[j] = nn.ModuleList(stages[j])
j += 1
self.stages = nn.ModuleList(stages)
self.conv_outs = nn.ModuleList(conv_outs)
self.recurrent_dsconvs = nn.ModuleList(recurrent_dsconvs)
self.stage_bns = nn.ModuleList(stage_bns)
self.rla_channel = rla_channel
self.flops = False
self.conv2 = ConvBNReLU((input_channel + rla_channel), self.last_channel, kernel_size=1, norm_layer=norm_layer)
self.bn2 = norm_layer(rla_channel)
self.relu = nn.ReLU6(inplace=True)
self.tanh = nn.Tanh()
self.classifier = nn.Sequential(nn.Dropout(0.2), nn.Linear(self.last_channel, num_classes))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if (m.bias is not None):
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def _forward_impl(self, x: Tensor) -> Tensor:
x = self.conv1(x)
(batch, _, height, width) = x.size()
if self.flops:
h = torch.zeros(batch, self.rla_channel, height, width)
else:
h = torch.zeros(batch, self.rla_channel, height, width, device='cuda')
j = 0
k = (- 1)
for (stage, bns, conv_out) in zip(self.stages, self.stage_bns, self.conv_outs):
if (j in self.newcell):
k += 1
recurrent_dsconv = self.recurrent_dsconvs[k]
for (layer, bn) in zip(stage, bns):
(x, y, h) = layer(x, h)
y_out = conv_out(y)
h = (h + y_out)
h = bn(h)
h = self.tanh(h)
h = recurrent_dsconv(h)
j += 1
h = self.bn2(h)
h = self.relu(h)
x = torch.cat((x, h), dim=1)
x = self.conv2(x)
x = nn.functional.adaptive_avg_pool2d(x, (1, 1)).reshape(x.shape[0], (- 1))
x = self.classifier(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
|
def dsrla_mobilenetv2(rla_channel=32):
' Constructs a RLA_MobileNetV2 model.\n default: \n rla_channel = 32, ECA=False\n '
print('Constructing dsrla_mobilenetv2......')
model = dsRLA_MobileNetV2(rla_channel=rla_channel)
return model
|
def dsrla_mobilenetv2_eca(rla_channel=32, eca=True):
' Constructs a RLA_MobileNetV2 model.\n default: \n rla_channel = 32, ECA=False\n '
print('Constructing dsrla_mobilenetv2_eca......')
model = dsRLA_MobileNetV2(rla_channel=rla_channel, ECA=eca)
return model
|
def dsrla_mobilenetv2_k6():
' Constructs a RLA_MobileNetV2 model.\n default: \n rla_channel = 32, ECA=False\n '
print('Constructing dsrla_mobilenetv2_k6......')
model = dsRLA_MobileNetV2(rla_channel=6)
return model
|
def dsrla_mobilenetv2_k6_eca(eca=True):
' Constructs a RLA_MobileNetV2 model.\n default: \n rla_channel = 32, ECA=False\n '
print('Constructing dsrla_mobilenetv2_k6_eca......')
model = dsRLA_MobileNetV2(rla_channel=6, ECA=eca)
return model
|
def dsrla_mobilenetv2_k12():
' Constructs a RLA_MobileNetV2 model.\n default: \n rla_channel = 32, ECA=False\n '
print('Constructing dsrla_mobilenetv2_k12......')
model = dsRLA_MobileNetV2(rla_channel=12)
return model
|
def dsrla_mobilenetv2_k12_eca(eca=True):
' Constructs a RLA_MobileNetV2 model.\n default: \n rla_channel = 32, ECA=False\n '
print('Constructing dsrla_mobilenetv2_k12_eca......')
model = dsRLA_MobileNetV2(rla_channel=12, ECA=eca)
return model
|
def dsrla_mobilenetv2_k24():
' Constructs a RLA_MobileNetV2 model.\n default: \n rla_channel = 32, ECA=False\n '
print('Constructing dsrla_mobilenetv2_k24......')
model = dsRLA_MobileNetV2(rla_channel=24)
return model
|
def dsrla_mobilenetv2_k24_eca(eca=True):
' Constructs a RLA_MobileNetV2 model.\n default: \n rla_channel = 32, ECA=False\n '
print('Constructing dsrla_mobilenetv2_k24_eca......')
model = dsRLA_MobileNetV2(rla_channel=24, ECA=eca)
return model
|
def dsrla_mobilenetv2_k32():
' Constructs a RLA_MobileNetV2 model.\n default: \n rla_channel = 32, ECA=False\n '
print('Constructing dsrla_mobilenetv2_k32......')
model = dsRLA_MobileNetV2(rla_channel=32)
return model
|
def dsrla_mobilenetv2_k32_eca(eca=True):
' Constructs a RLA_MobileNetV2 model.\n default: \n rla_channel = 32, ECA=False\n '
print('Constructing dsrla_mobilenetv2_k32_eca......')
model = dsRLA_MobileNetV2(rla_channel=32, ECA=eca)
return model
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.