code stringlengths 17 6.64M |
|---|
def calc_num_params(model):
return sum((p.numel() for p in model.parameters() if p.requires_grad))
|
def parse_args():
parser = argparse.ArgumentParser(description='MMAction2 GradCAM demo')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file/url')
parser.add_argument('video', help='video file/url or rawframes directory')
parser.add_a... |
def build_inputs(model, video_path, use_frames=False):
'build inputs for GradCAM.\n\n Note that, building inputs for GradCAM is exactly the same as building\n inputs for Recognizer test stage. Codes from `inference_recognizer`.\n\n Args:\n model (nn.Module): Recognizer model.\n video_path (... |
def _resize_frames(frame_list, scale, keep_ratio=True, interpolation='bilinear'):
'resize frames according to given scale.\n\n Codes are modified from `mmaction2/datasets/pipelines/augmentation.py`,\n `Resize` class.\n\n Args:\n frame_list (list[np.ndarray]): frames to be resized.\n scale (... |
def main():
args = parse_args()
device = torch.device(args.device)
cfg = Config.fromfile(args.config)
cfg.merge_from_dict(args.cfg_options)
model = init_recognizer(cfg, args.checkpoint, device=device, use_frames=args.use_frames)
inputs = build_inputs(model, args.video, use_frames=args.use_fram... |
def init_recognizer(config, checkpoint=None, device='cuda:0', use_frames=False):
"Initialize a recognizer from config file.\n\n Args:\n config (str | :obj:`mmcv.Config`): Config file path or the config\n object.\n checkpoint (str | None, optional): Checkpoint path/url. If set to None,\... |
def inference_recognizer(model, video_path, label_path, use_frames=False, outputs=None, as_tensor=True):
'Inference a video with the detector.\n\n Args:\n model (nn.Module): The loaded recognizer.\n video_path (str): The video file path/url or the rawframes directory\n path. If ``use_f... |
def train_model(model, dataset, cfg, distributed=False, validate=False, test=dict(test_best=False, test_last=False), timestamp=None, meta=None):
'Train model entry function.\n\n Args:\n model (nn.Module): The model to be trained.\n dataset (:obj:`Dataset`): Train dataset.\n cfg (dict): The... |
def bbox_target(pos_bboxes_list, neg_bboxes_list, gt_labels, cfg):
'Generate classification targets for bboxes.\n\n Args:\n pos_bboxes_list (list[Tensor]): Positive bboxes list.\n neg_bboxes_list (list[Tensor]): Negative bboxes list.\n gt_labels (list[Tensor]): Groundtruth classification l... |
def compute_precision_recall(scores, labels, num_gt):
'Compute precision and recall.\n\n Args:\n scores: A float numpy array representing detection score\n labels: A boolean numpy array representing true/false positive labels\n num_gt: Number of ground truth instances\n\n Raises:\n ... |
def compute_average_precision(precision, recall):
'Compute Average Precision according to the definition in VOCdevkit.\n\n Precision is modified to ensure that it does not decrease as recall\n decrease.\n\n Args:\n precision: A float [N, 1] numpy array of precisions\n recall: A float [N, 1]... |
def compute_cor_loc(num_gt_imgs_per_class, num_images_correctly_detected_per_class):
'Compute CorLoc according to the definition in the following paper.\n\n https://www.robots.ox.ac.uk/~vgg/rg/papers/deselaers-eccv10.pdf\n\n Returns nans if there are no ground truth images for a class.\n\n Args:\n ... |
class DetectionEvaluator():
'Interface for object detection evalution classes.\n\n Example usage of the Evaluator:\n ------------------------------\n evaluator = DetectionEvaluator(categories)\n\n # Detections and groundtruth for image 1.\n evaluator.add_single_groundtruth_image_info(...)\n eval... |
class ObjectDetectionEvaluator(DetectionEvaluator):
'A class to evaluate detections.'
def __init__(self, categories, matching_iou_threshold=0.5, evaluate_corlocs=False, metric_prefix=None, use_weighted_mean_ap=False, evaluate_masks=False):
"Constructor.\n\n Args:\n categories: A lis... |
class PascalDetectionEvaluator(ObjectDetectionEvaluator):
'A class to evaluate detections using PASCAL metrics.'
def __init__(self, categories, matching_iou_threshold=0.5):
super(PascalDetectionEvaluator, self).__init__(categories, matching_iou_threshold=matching_iou_threshold, evaluate_corlocs=False... |
class ObjectDetectionEvaluation():
'Internal implementation of Pascal object detection metrics.'
def __init__(self, num_groundtruth_classes, matching_iou_threshold=0.5, nms_iou_threshold=1.0, nms_max_output_boxes=10000, use_weighted_mean_ap=False, label_id_offset=0):
if (num_groundtruth_classes < 1):... |
class InputDataFields():
'Names for the input tensors.\n\n Holds the standard data field names to use for identifying input tensors.\n This should be used by the decoder to identify keys for the returned\n tensor_dict containing input tensors. And it should be used by the model to\n identify the tenso... |
class DetectionResultFields():
'Naming conventions for storing the output of the detector.\n\n Attributes:\n source_id: source of the original image.\n key: unique key corresponding to image.\n detection_boxes: coordinates of the detection boxes in the image.\n detection_scores: det... |
def det2csv(dataset, results, custom_classes):
csv_results = []
for idx in range(len(dataset)):
video_id = dataset.video_infos[idx]['video_id']
timestamp = dataset.video_infos[idx]['timestamp']
result = results[idx]
for (label, _) in enumerate(result):
for bbox in r... |
def results2csv(dataset, results, out_file, custom_classes=None):
if isinstance(results[0], list):
csv_results = det2csv(dataset, results, custom_classes)
def to_str(item):
if isinstance(item, float):
return f'{item:.3f}'
return str(item)
with open(out_file, 'w') as f:... |
def print_time(message, start):
print(('==> %g seconds to %s' % ((time.time() - start), message)), flush=True)
|
def make_image_key(video_id, timestamp):
'Returns a unique identifier for a video id & timestamp.'
return f'{video_id},{int(timestamp):04d}'
|
def read_csv(csv_file, class_whitelist=None):
'Loads boxes and class labels from a CSV file in the AVA format.\n\n CSV file format described at https://research.google.com/ava/download.html.\n\n Args:\n csv_file: A file object.\n class_whitelist: If provided, boxes corresponding to (integer) c... |
def read_exclusions(exclusions_file):
'Reads a CSV file of excluded timestamps.\n\n Args:\n exclusions_file: A file object containing a csv of video-id,timestamp.\n\n Returns:\n A set of strings containing excluded image keys, e.g.\n "aaaaaaaaaaa,0904",\n or an empty set if exclu... |
def read_labelmap(labelmap_file):
'Reads a labelmap without the dependency on protocol buffers.\n\n Args:\n labelmap_file: A file object containing a label map protocol buffer.\n\n Returns:\n labelmap: The label map in the form used by the\n object_detection_evaluation\n module -... |
def ava_eval(result_file, result_type, label_file, ann_file, exclude_file, verbose=True, custom_classes=None):
assert (result_type in ['mAP'])
start = time.time()
(categories, class_whitelist) = read_labelmap(open(label_file))
if (custom_classes is not None):
custom_classes = custom_classes[1:... |
class OutputHook():
'Output feature map of some layers.\n\n Args:\n module (nn.Module): The whole module to get layers.\n outputs (tuple[str] | list[str]): Layer name to output. Default: None.\n as_tensor (bool): Determine to return a tensor or a numpy array.\n Default: False.\n... |
def rgetattr(obj, attr, *args):
def _getattr(obj, attr):
return getattr(obj, attr, *args)
return functools.reduce(_getattr, ([obj] + attr.split('.')))
|
@OPTIMIZERS.register_module()
class CopyOfSGD(SGD):
'A clone of torch.optim.SGD.\n\n A customized optimizer could be defined like CopyOfSGD. You may derive from\n built-in optimizers in torch.optim, or directly implement a new optimizer.\n '
|
@OPTIMIZER_BUILDERS.register_module()
class TSMOptimizerConstructor(DefaultOptimizerConstructor):
'Optimizer constructor in TSM model.\n\n This constructor builds optimizer in different ways from the default one.\n\n 1. Parameters of the first conv layer have default lr and weight decay.\n 2. Parameters ... |
def cycle(iterable):
iterator = iter(iterable)
while True:
try:
(yield next(iterator))
except StopIteration:
iterator = iter(iterable)
|
class OmniSourceDistSamplerSeedHook(Hook):
def before_epoch(self, runner):
for data_loader in runner.data_loaders:
if hasattr(data_loader.sampler, 'set_epoch'):
data_loader.sampler.set_epoch(runner.epoch)
elif hasattr(data_loader.batch_sampler.sampler, 'set_epoch')... |
class OmniSourceRunner(EpochBasedRunner):
'OmniSource Epoch-based Runner.\n\n This runner train models epoch by epoch, the epoch length is defined by the\n dataloader[0], which is the main dataloader.\n '
def run_iter(self, data_batch, train_mode, source, **kwargs):
if (self.batch_processor ... |
@HOOKS.register_module()
class TINLrUpdaterHook(LrUpdaterHook):
def __init__(self, min_lr, **kwargs):
self.min_lr = min_lr
super().__init__(**kwargs)
def get_warmup_lr(self, cur_iters):
if (self.warmup == 'linear'):
k = (((cur_iters / self.warmup_iters) * (1 - self.warmup... |
@DATASETS.register_module()
class AudioDataset(BaseDataset):
"Audio dataset for video recognition. Extracts the audio feature on-the-\n fly. Annotation file can be that of the rawframe dataset, or:\n\n .. code-block:: txt\n\n some/directory-1.wav 163 1\n some/directory-2.wav 122 1\n som... |
@DATASETS.register_module()
class AudioFeatureDataset(BaseDataset):
"Audio feature dataset for video recognition. Reads the features\n extracted off-line. Annotation file can be that of the rawframe dataset,\n or:\n\n .. code-block:: txt\n\n some/directory-1.npy 163 1\n some/directory-2.npy... |
@DATASETS.register_module()
class AudioVisualDataset(RawframeDataset):
'Dataset that reads both audio and visual data, supporting both rawframes\n and videos. The annotation file is same as that of the rawframe dataset,\n such as:\n\n .. code-block:: txt\n\n some/directory-1 163 1\n some/di... |
class BaseDataset(Dataset, metaclass=ABCMeta):
"Base class for datasets.\n\n All datasets to process video should subclass it.\n All subclasses should overwrite:\n\n - Methods:`load_annotations`, supporting to load information from an\n annotation file.\n - Methods:`prepare_train_frames`, providing... |
def build_dataset(cfg, default_args=None):
'Build a dataset from config dict.\n\n Args:\n cfg (dict): Config dict. It should at least contain the key "type".\n default_args (dict | None, optional): Default initialization arguments.\n Default: None.\n\n Returns:\n Dataset: The... |
def build_dataloader(dataset, videos_per_gpu, workers_per_gpu, num_gpus=1, dist=True, shuffle=True, seed=None, drop_last=False, pin_memory=True, **kwargs):
'Build PyTorch DataLoader.\n\n In distributed training, each GPU/process has a dataloader.\n In non-distributed training, there is only one dataloader f... |
def worker_init_fn(worker_id, num_workers, rank, seed):
'Init the random seed for various workers.'
worker_seed = (((num_workers * rank) + worker_id) + seed)
np.random.seed(worker_seed)
random.seed(worker_seed)
|
@DATASETS.register_module()
class RepeatDataset():
'A wrapper of repeated dataset.\n\n The length of repeated dataset will be ``times`` larger than the original\n dataset. This is useful when the data loading time is long but the dataset\n is small. Using RepeatDataset can reduce the data loading time be... |
@DATASETS.register_module()
class HVUDataset(BaseDataset):
"HVU dataset, which supports the recognition tags of multiple categories.\n Accept both video annotation files or rawframe annotation files.\n\n The dataset loads videos or raw frames and applies specified transforms to\n return a dict containing... |
@DATASETS.register_module()
class ImageDataset(VideoDataset):
'Image dataset for action recognition, used in the Project OmniSource.\n\n The dataset loads image list and apply specified transforms to return a\n dict containing the image tensors and other information. For the\n ImageDataset\n\n The ann... |
@PIPELINES.register_module()
class Compose():
'Compose a data pipeline with a sequence of transforms.\n\n Args:\n transforms (list[dict | callable]):\n Either config dicts of transforms or transform objects.\n '
def __init__(self, transforms):
assert isinstance(transforms, Seq... |
def to_tensor(data):
'Convert objects of various python types to :obj:`torch.Tensor`.\n\n Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,\n :class:`Sequence`, :class:`int` and :class:`float`.\n '
if isinstance(data, torch.Tensor):
return data
if isinstance(data, np.nda... |
@PIPELINES.register_module()
class ToTensor():
'Convert some values in results dict to `torch.Tensor` type in data\n loader pipeline.\n\n Args:\n keys (Sequence[str]): Required keys to be converted.\n '
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
... |
@PIPELINES.register_module()
class Rename():
'Rename the key in results.\n\n Args:\n mapping (dict): The keys in results that need to be renamed. The key of\n the dict is the original name, while the value is the new name. If\n the original name not found in results, do nothing.\n ... |
@PIPELINES.register_module()
class ToDataContainer():
"Convert the data to DataContainer.\n\n Args:\n fields (Sequence[dict]): Required fields to be converted\n with keys and attributes. E.g.\n fields=(dict(key='gt_bbox', stack=False),).\n Note that key can also be a lis... |
@PIPELINES.register_module()
class ImageToTensor():
'Convert image type to `torch.Tensor` type.\n\n Args:\n keys (Sequence[str]): Required keys to be converted.\n '
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
'Performs the ImageToTensor formating.... |
@PIPELINES.register_module()
class Transpose():
'Transpose image channels to a given order.\n\n Args:\n keys (Sequence[str]): Required keys to be converted.\n order (Sequence[int]): Image channel order.\n '
def __init__(self, keys, order):
self.keys = keys
self.order = ord... |
@PIPELINES.register_module()
class Collect():
'Collect data from the loader relevant to the specific task.\n\n This keeps the items in ``keys`` as it is, and collect items in\n ``meta_keys`` into a meta item called ``meta_name``.This is usually\n the last stage of the data loader pipeline.\n For examp... |
@PIPELINES.register_module()
class FormatShape():
'Format final imgs shape to the given input_format.\n\n Required keys are "imgs", "num_clips" and "clip_len", added or modified\n keys are "imgs" and "input_shape".\n\n Args:\n input_format (str): Define the final imgs format.\n collapse (bo... |
@PIPELINES.register_module()
class FormatAudioShape():
'Format final audio shape to the given input_format.\n\n Required keys are "imgs", "num_clips" and "clip_len", added or modified\n keys are "imgs" and "input_shape".\n\n Args:\n input_format (str): Define the final imgs format.\n '
def... |
@DATASETS.register_module()
class PoseDataset(BaseDataset):
"Pose dataset for action recognition.\n\n The dataset loads pose and apply specified transforms to return a\n dict containing pose information.\n\n The ann_file is a pickle file, the json file contains a list of\n annotations, the fields of a... |
@DATASETS.register_module()
class RawframeDataset(BaseDataset):
"Rawframe dataset for action recognition.\n\n The dataset loads raw frames and apply specified transforms to return a\n dict containing the frame tensors and other information.\n\n The ann_file is a text file with multiple lines, and each li... |
@DATASETS.register_module()
class RawVideoDataset(BaseDataset):
"RawVideo dataset for action recognition, used in the Project OmniSource.\n\n The dataset loads clips of raw videos and apply specified transforms to\n return a dict containing the frame tensors and other information. Not that\n for this dat... |
class DistributedSampler(_DistributedSampler):
'DistributedSampler inheriting from\n ``torch.utils.data.DistributedSampler``.\n\n In pytorch of lower versions, there is no ``shuffle`` argument. This child\n class will port one to DistributedSampler.\n '
def __init__(self, dataset, num_replicas=No... |
class ClassSpecificDistributedSampler(_DistributedSampler):
'ClassSpecificDistributedSampler inheriting from\n ``torch.utils.data.DistributedSampler``.\n\n Samples are sampled with a class specific probability, which should be an\n attribute of the dataset (dataset.class_prob, which is a dictionary that\... |
@DATASETS.register_module()
class VideoDataset(BaseDataset):
'Video dataset for action recognition.\n\n The dataset loads raw videos and apply specified transforms to return a\n dict containing the frame tensors and other information.\n\n The ann_file is a text file with multiple lines, and each line ind... |
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = (out_features or in_features)
hidden_features = (hidden_features or in_features)
self.fc1 = nn.Linear(in_features, hidden_... |
class LWAttention(nn.Module):
'\n LW-MSA: Local Window-based MSA\n '
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0, local_size=(1, 1, 1)):
super(LWAttention, self).__init__()
self.dim = dim
self.num_heads = num_heads
ass... |
class GPAttention(nn.Module):
'\n GP-MSA: Global Pyramid-based MSA.\n '
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0, fine_pysize=(8, 7, 7), coarse_pysize=(4, 4, 4), resolution=(16, 56, 56), stage=0):
super().__init__()
assert ((dim % ... |
class DualFormerBlock(nn.Module):
def __init__(self, dim, num_heads, resolution=(16, 7, 7), mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm, fine_pysize=(8, 7, 7), coarse_pysize=(4, 4, 4), local_size=(1, 1, 1), stage=0):
sup... |
class PatchEmbed(nn.Module):
' Image to Patch Embedding\n '
def __init__(self, video_size=(32, 224, 224), patch_size=(2, 4, 4), in_chans=3, embed_dim=768):
super().__init__()
self.video_size = video_size
self.patch_size = patch_size
assert (((video_size[0] % patch_size[0]) ... |
class PosCNN(nn.Module):
def __init__(self, in_chans, embed_dim=768, s=1):
super(PosCNN, self).__init__()
self.proj = nn.Sequential(nn.Conv3d(in_chans, embed_dim, 3, s, 1, bias=True, groups=embed_dim))
self.s = s
def forward(self, x, D, H, W):
(B, N, C) = x.shape
feat... |
@BACKBONES.register_module()
class DualFormer(nn.Module):
def __init__(self, pretrained=None, pretrained2d=True, video_size=(32, 224, 224), patch_size=(2, 4, 4), in_chans=3, num_classes=1000, embed_dims=[64, 128, 256, 512], num_heads=[1, 2, 4, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=False, qk_scale=None, drop_rate... |
def build_backbone(cfg):
'Build backbone.'
return BACKBONES.build(cfg)
|
def build_head(cfg):
'Build head.'
return HEADS.build(cfg)
|
def build_recognizer(cfg, train_cfg=None, test_cfg=None):
'Build recognizer.'
if ((train_cfg is not None) or (test_cfg is not None)):
warnings.warn('train_cfg and test_cfg is deprecated, please specify them in model. Details see this PR: https://github.com/open-mmlab/mmaction2/pull/629', UserWarning)
... |
def build_loss(cfg):
'Build loss.'
return LOSSES.build(cfg)
|
def build_localizer(cfg):
'Build localizer.'
return LOCALIZERS.build(cfg)
|
def build_model(cfg, train_cfg=None, test_cfg=None):
'Build model.'
args = cfg.copy()
obj_type = args.pop('type')
if (obj_type in LOCALIZERS):
return build_localizer(cfg)
if (obj_type in RECOGNIZERS):
return build_recognizer(cfg, train_cfg, test_cfg)
if (obj_type in DETECTORS):... |
def build_neck(cfg):
'Build neck.'
return NECKS.build(cfg)
|
@CONV_LAYERS.register_module()
class Conv2plus1d(nn.Module):
'(2+1)d Conv module for R(2+1)d backbone.\n\n https://arxiv.org/pdf/1711.11248.pdf.\n\n Args:\n in_channels (int): Same as nn.Conv3d.\n out_channels (int): Same as nn.Conv3d.\n kernel_size (int | tuple[int]): Same as nn.Conv3d... |
@CONV_LAYERS.register_module()
class ConvAudio(nn.Module):
"Conv2d module for AudioResNet backbone.\n\n <https://arxiv.org/abs/2001.08740>`_.\n\n Args:\n in_channels (int): Same as nn.Conv2d.\n out_channels (int): Same as nn.Conv2d.\n kernel_size (int | tuple[int]): Same as nn.Conv2... |
class LFB():
"Long-Term Feature Bank (LFB).\n\n LFB is proposed in `Long-Term Feature Banks for Detailed Video\n Understanding <https://arxiv.org/abs/1812.05038>`_\n\n The ROI features of videos are stored in the feature bank. The feature bank\n was generated by inferring with a lfb infer config.\n\n ... |
class TAM(nn.Module):
'Temporal Adaptive Module(TAM) for TANet.\n\n This module is proposed in `TAM: TEMPORAL ADAPTIVE MODULE FOR VIDEO\n RECOGNITION <https://arxiv.org/pdf/2005.06803>`_\n\n Args:\n in_channels (int): Channel num of input features.\n num_segments (int): Number of frame segm... |
@HEADS.register_module()
class AudioTSNHead(BaseHead):
"Classification head for TSN on audio.\n\n Args:\n num_classes (int): Number of classes to be classified.\n in_channels (int): Number of channels in input feature.\n loss_cls (dict): Config for building loss.\n Default: dict... |
class AvgConsensus(nn.Module):
'Average consensus module.\n\n Args:\n dim (int): Decide which dim consensus function to apply.\n Default: 1.\n '
def __init__(self, dim=1):
super().__init__()
self.dim = dim
def forward(self, x):
'Defines the computation per... |
class BaseHead(nn.Module, metaclass=ABCMeta):
"Base class for head.\n\n All Head should subclass it.\n All subclass should overwrite:\n - Methods:``init_weights``, initializing weights in some modules.\n - Methods:``forward``, supporting to forward both for training and testing.\n\n Args:\n ... |
@HEADS.register_module()
class I3DHead(BaseHead):
"Classification head for I3D.\n\n Args:\n num_classes (int): Number of classes to be classified.\n in_channels (int): Number of channels in input feature.\n loss_cls (dict): Config for building loss.\n Default: dict(type='CrossEn... |
@HEADS.register_module()
class I3DHead_Teacher(BaseHead):
"Classification head for I3D. Used for teacher model in token labelling.\n\n Args:\n num_classes (int): Number of classes to be classified.\n in_channels (int): Number of channels in input feature.\n loss_cls (dict): Config for buil... |
@HEADS.register_module()
class I3DHead_TL(BaseHead):
"Classification head for I3D. Used for student model in token labelling.\n Args:\n num_classes (int): Number of classes to be classified.\n in_channels (int): Number of channels in input feature.\n loss_cls (dict): Config for building lo... |
@HEADS.register_module()
class SlowFastHead(BaseHead):
"The classification head for SlowFast.\n\n Args:\n num_classes (int): Number of classes to be classified.\n in_channels (int): Number of channels in input feature.\n loss_cls (dict): Config for building loss.\n Default: dict... |
@HEADS.register_module()
class TPNHead(TSNHead):
"Class head for TPN.\n\n Args:\n num_classes (int): Number of classes to be classified.\n in_channels (int): Number of channels in input feature.\n loss_cls (dict): Config for building loss.\n Default: dict(type='CrossEntropyLoss'... |
@HEADS.register_module()
class TSMHead(BaseHead):
"Class head for TSM.\n\n Args:\n num_classes (int): Number of classes to be classified.\n in_channels (int): Number of channels in input feature.\n num_segments (int): Number of frame segments. Default: 8.\n loss_cls (dict): Config f... |
@HEADS.register_module()
class TSNHead(BaseHead):
"Class head for TSN.\n\n Args:\n num_classes (int): Number of classes to be classified.\n in_channels (int): Number of channels in input feature.\n loss_cls (dict): Config for building loss.\n Default: dict(type='CrossEntropyLoss... |
@HEADS.register_module()
class X3DHead(BaseHead):
"Classification head for I3D.\n\n Args:\n num_classes (int): Number of classes to be classified.\n in_channels (int): Number of channels in input feature.\n loss_cls (dict): Config for building loss.\n Default: dict(type='CrossEn... |
class BaseLocalizer(nn.Module, metaclass=ABCMeta):
'Base class for localizers.\n\n All localizers should subclass it. All subclass should overwrite:\n Methods:``forward_train``, supporting to forward when training.\n Methods:``forward_test``, supporting to forward when testing.\n '
def __init__(s... |
class BaseWeightedLoss(nn.Module, metaclass=ABCMeta):
'Base class for loss.\n\n All subclass should overwrite the ``_forward()`` method which returns the\n normal loss without loss weights.\n\n Args:\n loss_weight (float): Factor scalar multiplied on the loss.\n Default: 1.0.\n '
... |
def binary_logistic_regression_loss(reg_score, label, threshold=0.5, ratio_range=(1.05, 21), eps=1e-05):
'Binary Logistic Regression Loss.'
label = label.view((- 1)).to(reg_score.device)
reg_score = reg_score.contiguous().view((- 1))
pmask = (label > threshold).float().to(reg_score.device)
num_pos... |
@LOSSES.register_module()
class BinaryLogisticRegressionLoss(nn.Module):
'Binary Logistic Regression Loss.\n\n It will calculate binary logistic regression loss given reg_score and\n label.\n '
def forward(self, reg_score, label, threshold=0.5, ratio_range=(1.05, 21), eps=1e-05):
'Calculate ... |
@LOSSES.register_module()
class CrossEntropyLoss(BaseWeightedLoss):
"Cross Entropy Loss.\n\n Support two kinds of labels and their corresponding loss type. It's worth\n mentioning that loss type will be detected by the shape of ``cls_score``\n and ``label``.\n 1) Hard label: This label is an integer a... |
@LOSSES.register_module()
class BCELossWithLogits(BaseWeightedLoss):
'Binary Cross Entropy Loss with logits.\n\n Args:\n loss_weight (float): Factor scalar multiplied on the loss.\n Default: 1.0.\n class_weight (list[float] | None): Loss weight for each class. If set\n as No... |
@LOSSES.register_module()
class SoftCE_TL(nn.Module):
'\n Cross entropy loss for token labelling.\n '
def __init__(self):
super().__init__()
self.CE = CrossEntropyLoss()
self.k = 5
def forward(self, cls_score, target, **kwargs):
loss_model = self.CE(cls_score, targe... |
@LOSSES.register_module()
class NLLLoss(BaseWeightedLoss):
'NLL Loss.\n\n It will calculate NLL loss given cls_score and label.\n '
def _forward(self, cls_score, label, **kwargs):
'Forward function.\n\n Args:\n cls_score (torch.Tensor): The class score.\n label (tor... |
class Identity(nn.Module):
'Identity mapping.'
def forward(self, x):
return x
|
class DownSample(nn.Module):
"DownSample modules.\n\n It uses convolution and maxpooling to downsample the input feature,\n and specifies downsample position to determine `pool-conv` or `conv-pool`.\n\n Args:\n in_channels (int): Channel number of input features.\n out_channels (int): Chann... |
class LevelFusion(nn.Module):
"Level Fusion module.\n\n This module is used to aggregate the hierarchical features dynamic in\n visual tempos and consistent in spatial semantics. The top/bottom features\n for top-down/bottom-up flow would be combined to achieve two additional\n options, namely 'Cascad... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.