code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def loss(self, feats: Union[torch.Tensor, Tuple[torch.Tensor]], data_samples: SampleList, **kwargs) -> Dict: """Perform forward propagation of head and loss calculation on the features of the upstream network. Args: feats (torch.Tensor | tuple[torch.Tensor]): Features f...
Perform forward propagation of head and loss calculation on the features of the upstream network. Args: feats (torch.Tensor | tuple[torch.Tensor]): Features from upstream network. data_samples (list[:obj:`ActionDataSample`]): The batch data sample...
loss
python
open-mmlab/mmaction2
mmaction/models/heads/base.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/base.py
Apache-2.0
def loss_by_feat(self, cls_scores: torch.Tensor, data_samples: SampleList) -> Dict: """Calculate the loss based on the features extracted by the head. Args: cls_scores (torch.Tensor): Classification prediction results of all class, has shape (batch_size,...
Calculate the loss based on the features extracted by the head. Args: cls_scores (torch.Tensor): Classification prediction results of all class, has shape (batch_size, num_classes). data_samples (list[:obj:`ActionDataSample`]): The batch data samples. ...
loss_by_feat
python
open-mmlab/mmaction2
mmaction/models/heads/base.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/base.py
Apache-2.0
def predict(self, feats: Union[torch.Tensor, Tuple[torch.Tensor]], data_samples: SampleList, **kwargs) -> SampleList: """Perform forward propagation of head and predict recognition results on the features of the upstream network. Args: feats (torch.Tensor | tuple[tor...
Perform forward propagation of head and predict recognition results on the features of the upstream network. Args: feats (torch.Tensor | tuple[torch.Tensor]): Features from upstream network. data_samples (list[:obj:`ActionDataSample`]): The batch ...
predict
python
open-mmlab/mmaction2
mmaction/models/heads/base.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/base.py
Apache-2.0
def predict_by_feat(self, cls_scores: torch.Tensor, data_samples: SampleList) -> SampleList: """Transform a batch of output features extracted from the head into prediction results. Args: cls_scores (torch.Tensor): Classification scores, has a shape ...
Transform a batch of output features extracted from the head into prediction results. Args: cls_scores (torch.Tensor): Classification scores, has a shape (B*num_segs, num_classes) data_samples (list[:obj:`ActionDataSample`]): The annotation data o...
predict_by_feat
python
open-mmlab/mmaction2
mmaction/models/heads/base.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/base.py
Apache-2.0
def average_clip(self, cls_scores: torch.Tensor, num_segs: int = 1) -> torch.Tensor: """Averaging class scores over multiple clips. Using different averaging types ('score' or 'prob' or None, which defined in test_cfg) to computed the final averaged ...
Averaging class scores over multiple clips. Using different averaging types ('score' or 'prob' or None, which defined in test_cfg) to computed the final averaged class score. Only called in test mode. Args: cls_scores (torch.Tensor): Class scores to be averaged. ...
average_clip
python
open-mmlab/mmaction2
mmaction/models/heads/base.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/base.py
Apache-2.0
def forward(self, x: Tensor, num_segs: Optional[int] = None, **kwargs) -> Tensor: """Defines the computation performed at every call. Args: x (Tensor): The input data. num_segs (int): For 2D backbone. Number of segments into which ...
Defines the computation performed at every call. Args: x (Tensor): The input data. num_segs (int): For 2D backbone. Number of segments into which a video is divided. Defaults to None. Returns: Tensor: The output features after pooling.
forward
python
open-mmlab/mmaction2
mmaction/models/heads/feature_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/feature_head.py
Apache-2.0
def predict_by_feat(self, feats: Union[Tensor, Tuple[Tensor]], data_samples) -> Tensor: """Integrate multi-view features into one tensor. Args: feats (torch.Tensor | tuple[torch.Tensor]): Features from upstream network. data_samples (list[...
Integrate multi-view features into one tensor. Args: feats (torch.Tensor | tuple[torch.Tensor]): Features from upstream network. data_samples (list[:obj:`ActionDataSample`]): The batch data samples. Returns: Tensor: The integrated mul...
predict_by_feat
python
open-mmlab/mmaction2
mmaction/models/heads/feature_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/feature_head.py
Apache-2.0
def forward(self, x: torch.Tensor, **kwargs) -> torch.Tensor: """Forward features from the upstream network. Args: x (torch.Tensor): Features from the upstream network. Returns: torch.Tensor: Classification scores with shape (B, num_classes). """ N, M, ...
Forward features from the upstream network. Args: x (torch.Tensor): Features from the upstream network. Returns: torch.Tensor: Classification scores with shape (B, num_classes).
forward
python
open-mmlab/mmaction2
mmaction/models/heads/gcn_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/gcn_head.py
Apache-2.0
def forward(self, x: Tensor, **kwargs) -> Tensor: """Defines the computation performed at every call. Args: x (Tensor): The input data. Returns: Tensor: The classification scores for input samples. """ # [N, in_channels, 4, 7, 7] if self.avg_pool...
Defines the computation performed at every call. Args: x (Tensor): The input data. Returns: Tensor: The classification scores for input samples.
forward
python
open-mmlab/mmaction2
mmaction/models/heads/i3d_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/i3d_head.py
Apache-2.0
def pre_logits(self, feats: Tuple[List[Tensor]]) -> Tensor: """The process before the final classification head. The input ``feats`` is a tuple of list of tensor, and each tensor is the feature of a backbone stage. """ if self.with_cls_token: _, cls_token = feats[-1]...
The process before the final classification head. The input ``feats`` is a tuple of list of tensor, and each tensor is the feature of a backbone stage.
pre_logits
python
open-mmlab/mmaction2
mmaction/models/heads/mvit_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/mvit_head.py
Apache-2.0
def forward(self, x: Tuple[List[Tensor]], **kwargs) -> Tensor: """Defines the computation performed at every call. Args: x (Tuple[List[Tensor]]): The input data. Returns: Tensor: The classification scores for input samples. """ x = self.pre_logits(x) ...
Defines the computation performed at every call. Args: x (Tuple[List[Tensor]]): The input data. Returns: Tensor: The classification scores for input samples.
forward
python
open-mmlab/mmaction2
mmaction/models/heads/mvit_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/mvit_head.py
Apache-2.0
def loss_by_feat(self, cls_scores: Union[Tensor, Tuple[Tensor]], data_samples: SampleList) -> dict: """Calculate the loss based on the features extracted by the head. Args: cls_scores (Tensor): Classification prediction results of all class, has shape (b...
Calculate the loss based on the features extracted by the head. Args: cls_scores (Tensor): Classification prediction results of all class, has shape (batch_size, num_classes). data_samples (List[:obj:`ActionDataSample`]): The batch data samples. ...
loss_by_feat
python
open-mmlab/mmaction2
mmaction/models/heads/omni_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/omni_head.py
Apache-2.0
def loss(self, feats: Tuple[torch.Tensor], data_samples: SampleList, **kwargs) -> Dict: """Perform forward propagation of head and loss calculation on the features of the upstream network. Args: feats (tuple[torch.Tensor]): Features from upstream network. da...
Perform forward propagation of head and loss calculation on the features of the upstream network. Args: feats (tuple[torch.Tensor]): Features from upstream network. data_samples (list[:obj:`ActionDataSample`]): The batch data samples. Returns: ...
loss
python
open-mmlab/mmaction2
mmaction/models/heads/rgbpose_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/rgbpose_head.py
Apache-2.0
def loss_by_feat(self, cls_scores: Dict[str, torch.Tensor], data_samples: SampleList) -> Dict: """Calculate the loss based on the features extracted by the head. Args: cls_scores (dict[str, torch.Tensor]): The dict of classification scores, d...
Calculate the loss based on the features extracted by the head. Args: cls_scores (dict[str, torch.Tensor]): The dict of classification scores, data_samples (list[:obj:`ActionDataSample`]): The batch data samples. Returns: dict: A dict...
loss_by_feat
python
open-mmlab/mmaction2
mmaction/models/heads/rgbpose_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/rgbpose_head.py
Apache-2.0
def loss_by_scores(self, cls_scores: torch.Tensor, labels: torch.Tensor) -> Dict: """Calculate the loss based on the features extracted by the head. Args: cls_scores (torch.Tensor): Classification prediction results of all class, has shape (batch_size,...
Calculate the loss based on the features extracted by the head. Args: cls_scores (torch.Tensor): Classification prediction results of all class, has shape (batch_size, num_classes). labels (torch.Tensor): The labels used to calculate the loss. Returns: ...
loss_by_scores
python
open-mmlab/mmaction2
mmaction/models/heads/rgbpose_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/rgbpose_head.py
Apache-2.0
def predict(self, feats: Tuple[torch.Tensor], data_samples: SampleList, **kwargs) -> SampleList: """Perform forward propagation of head and predict recognition results on the features of the upstream network. Args: feats (tuple[torch.Tensor]): Features from upstream ...
Perform forward propagation of head and predict recognition results on the features of the upstream network. Args: feats (tuple[torch.Tensor]): Features from upstream network. data_samples (list[:obj:`ActionDataSample`]): The batch data samples. Returns:...
predict
python
open-mmlab/mmaction2
mmaction/models/heads/rgbpose_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/rgbpose_head.py
Apache-2.0
def predict_by_feat(self, cls_scores: Dict[str, torch.Tensor], data_samples: SampleList) -> SampleList: """Transform a batch of output features extracted from the head into prediction results. Args: cls_scores (dict[str, torch.Tensor]): The dict of ...
Transform a batch of output features extracted from the head into prediction results. Args: cls_scores (dict[str, torch.Tensor]): The dict of classification scores, data_samples (list[:obj:`ActionDataSample`]): The annotation data of every samples...
predict_by_feat
python
open-mmlab/mmaction2
mmaction/models/heads/rgbpose_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/rgbpose_head.py
Apache-2.0
def predict_by_scores(self, cls_scores: torch.Tensor, data_samples: SampleList) -> torch.Tensor: """Transform a batch of output features extracted from the head into prediction results. Args: cls_scores (torch.Tensor): Classification scores, has a shape ...
Transform a batch of output features extracted from the head into prediction results. Args: cls_scores (torch.Tensor): Classification scores, has a shape (B*num_segs, num_classes) data_samples (list[:obj:`ActionDataSample`]): The annotation data o...
predict_by_scores
python
open-mmlab/mmaction2
mmaction/models/heads/rgbpose_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/rgbpose_head.py
Apache-2.0
def forward(self, x: Tuple[Tensor], **kwargs) -> None: """Defines the computation performed at every call. Args: x (tuple[torch.Tensor]): The input data. Returns: Tensor: The classification scores for input samples. """ # ([N, channel_slow, T1, H, W], [(...
Defines the computation performed at every call. Args: x (tuple[torch.Tensor]): The input data. Returns: Tensor: The classification scores for input samples.
forward
python
open-mmlab/mmaction2
mmaction/models/heads/slowfast_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/slowfast_head.py
Apache-2.0
def forward(self, x, num_segs: Optional[int] = None, fcn_test: bool = False, **kwargs) -> Tensor: """Defines the computation performed at every call. Args: x (Tensor): The input data. num_segs (int, optional): Numbe...
Defines the computation performed at every call. Args: x (Tensor): The input data. num_segs (int, optional): Number of segments into which a video is divided. Defaults to None. fcn_test (bool): Whether to apply full convolution (fcn) testing. ...
forward
python
open-mmlab/mmaction2
mmaction/models/heads/tpn_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/tpn_head.py
Apache-2.0
def forward(self, x): """Defines the computation performed at every call. Args: x (Tensor): The input data. Returns: Tensor: The classification scores for input samples. """ # [N, num_segs * hidden_dim] x = x.view(x.size(0), -1) x = self.c...
Defines the computation performed at every call. Args: x (Tensor): The input data. Returns: Tensor: The classification scores for input samples.
forward
python
open-mmlab/mmaction2
mmaction/models/heads/trn_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/trn_head.py
Apache-2.0
def forward(self, x, num_segs, **kwargs): """Defines the computation performed at every call. Args: x (torch.Tensor): The input data. num_segs (int): Useless in TRNHead. By default, `num_segs` is equal to `clip_len * num_clips * num_crops`, which is ...
Defines the computation performed at every call. Args: x (torch.Tensor): The input data. num_segs (int): Useless in TRNHead. By default, `num_segs` is equal to `clip_len * num_clips * num_crops`, which is automatically generated in Recognizer forward phas...
forward
python
open-mmlab/mmaction2
mmaction/models/heads/trn_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/trn_head.py
Apache-2.0
def forward(self, x: Tensor, num_segs: int, **kwargs) -> Tensor: """Defines the computation performed at every call. Args: x (Tensor): The input data. num_segs (int): Useless in TSMHead. By default, `num_segs` is equal to `clip_len * num_clips * num_crops`, which...
Defines the computation performed at every call. Args: x (Tensor): The input data. num_segs (int): Useless in TSMHead. By default, `num_segs` is equal to `clip_len * num_clips * num_crops`, which is automatically generated in Recognizer forward phase and ...
forward
python
open-mmlab/mmaction2
mmaction/models/heads/tsm_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/tsm_head.py
Apache-2.0
def forward(self, x: Tensor, num_segs: int, **kwargs) -> Tensor: """Defines the computation performed at every call. Args: x (Tensor): The input data. num_segs (int): Number of segments into which a video is divided. Returns: Tensor: The class...
Defines the computation performed at every call. Args: x (Tensor): The input data. num_segs (int): Number of segments into which a video is divided. Returns: Tensor: The classification scores for input samples.
forward
python
open-mmlab/mmaction2
mmaction/models/heads/tsn_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/heads/tsn_head.py
Apache-2.0
def forward(self, inputs, data_samples, mode, **kwargs): """The unified entry for a forward process in both training and test. The method should accept three modes: - ``tensor``: Forward the whole network and return tensor or tuple of tensor without any post-processing, same as a commo...
The unified entry for a forward process in both training and test. The method should accept three modes: - ``tensor``: Forward the whole network and return tensor or tuple of tensor without any post-processing, same as a common nn.Module. - ``predict``: Forward and return the predictio...
forward
python
open-mmlab/mmaction2
mmaction/models/localizers/bmn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py
Apache-2.0
def loss(self, batch_inputs, batch_data_samples, **kwargs): """Calculate losses from a batch of inputs and data samples. Args: batch_inputs (Tensor): Raw Inputs of the recognizer. These should usually be mean centered and std scaled. batch_data_samples (List[:obj...
Calculate losses from a batch of inputs and data samples. Args: batch_inputs (Tensor): Raw Inputs of the recognizer. These should usually be mean centered and std scaled. batch_data_samples (List[:obj:`ActionDataSample`]): The batch data samples. It usual...
loss
python
open-mmlab/mmaction2
mmaction/models/localizers/bmn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py
Apache-2.0
def predict(self, batch_inputs, batch_data_samples, **kwargs): """Define the computation performed at every call when testing.""" confidence_map, start, end = self._forward(batch_inputs) start_scores = start[0].cpu().numpy() end_scores = end[0].cpu().numpy() cls_confidence = (con...
Define the computation performed at every call when testing.
predict
python
open-mmlab/mmaction2
mmaction/models/localizers/bmn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py
Apache-2.0
def _get_interp1d_bin_mask(seg_tmin, seg_tmax, tscale, num_samples, num_samples_per_bin): """Generate sample mask for a boundary-matching pair.""" plen = float(seg_tmax - seg_tmin) plen_sample = plen / (num_samples * num_samples_per_bin - 1.0) total_samples...
Generate sample mask for a boundary-matching pair.
_get_interp1d_bin_mask
python
open-mmlab/mmaction2
mmaction/models/localizers/bmn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py
Apache-2.0
def _get_interp1d_mask(self): """Generate sample mask for each point in Boundary-Matching Map.""" mask_mat = [] for start_index in range(self.tscale): mask_mat_vector = [] for duration_index in range(self.tscale): if start_index + duration_index < self.tsc...
Generate sample mask for each point in Boundary-Matching Map.
_get_interp1d_mask
python
open-mmlab/mmaction2
mmaction/models/localizers/bmn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py
Apache-2.0
def _temporal_anchors(self, tmin_offset=0., tmax_offset=1.): """Generate temporal anchors. Args: tmin_offset (int): Offset for the minimum value of temporal anchor. Default: 0. tmax_offset (int): Offset for the maximum value of temporal anchor. De...
Generate temporal anchors. Args: tmin_offset (int): Offset for the minimum value of temporal anchor. Default: 0. tmax_offset (int): Offset for the maximum value of temporal anchor. Default: 1. Returns: tuple[Sequence[float]]: The minim...
_temporal_anchors
python
open-mmlab/mmaction2
mmaction/models/localizers/bmn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py
Apache-2.0
def _forward(self, x): """Define the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The output of the module. """ # x.shape [batch_size, self.feat_dim, self.tscale] base_feature = self.x_1d_b(x)...
Define the computation performed at every call. Args: x (torch.Tensor): The input data. Returns: torch.Tensor: The output of the module.
_forward
python
open-mmlab/mmaction2
mmaction/models/localizers/bmn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bmn.py
Apache-2.0
def forward(self, inputs, data_samples, mode, **kwargs): """The unified entry for a forward process in both training and test. The method should accept three modes: - ``tensor``: Forward the whole network and return tensor or tuple of tensor without any post-processing, same as a commo...
The unified entry for a forward process in both training and test. The method should accept three modes: - ``tensor``: Forward the whole network and return tensor or tuple of tensor without any post-processing, same as a common nn.Module. - ``predict``: Forward and return the predictio...
forward
python
open-mmlab/mmaction2
mmaction/models/localizers/bsn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/bsn.py
Apache-2.0
def forward(self, x: Tensor) -> Tensor: """Forward call for LGTE. Args: x (torch.Tensor): The input tensor with shape (B, C, L) """ x = x.permute(2, 0, 1) mask = self.mask.repeat(x.size(1), 1, 1, 1) L = x.shape[0] x = self.atten(x, attn_mask=mask.resh...
Forward call for LGTE. Args: x (torch.Tensor): The input tensor with shape (B, C, L)
forward
python
open-mmlab/mmaction2
mmaction/models/localizers/tcanet.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/tcanet.py
Apache-2.0
def StartEndRegressor(sample_num: int, feat_dim: int) -> nn.Module: """Start and End Regressor in the Temporal Boundary Regressor. Args: sample_num (int): number of samples for the start & end. feat_dim (int): feature dimension. Returns: A pytorch module that works as the start and...
Start and End Regressor in the Temporal Boundary Regressor. Args: sample_num (int): number of samples for the start & end. feat_dim (int): feature dimension. Returns: A pytorch module that works as the start and end regressor. The input of the module should have a shape of (B, ...
StartEndRegressor
python
open-mmlab/mmaction2
mmaction/models/localizers/tcanet.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/tcanet.py
Apache-2.0
def CenterWidthRegressor(temporal_len: int, feat_dim: int) -> nn.Module: """Center Width in the Temporal Boundary Regressor. Args: temporal_len (int): temporal dimension of the inputs. feat_dim (int): feature dimension. Returns: A pytorch module that works as the start and end regr...
Center Width in the Temporal Boundary Regressor. Args: temporal_len (int): temporal dimension of the inputs. feat_dim (int): feature dimension. Returns: A pytorch module that works as the start and end regressor. The input of the module should have a shape of (B, feat_dim, temp...
CenterWidthRegressor
python
open-mmlab/mmaction2
mmaction/models/localizers/tcanet.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/tcanet.py
Apache-2.0
def generate_candidate_proposals(video_list, video_infos, tem_results_dir, temporal_scale, peak_threshold, tem_results_ext='.csv', ...
Generate Candidate Proposals with given temporal evaluation results. Each proposal file will contain: 'tmin,tmax,tmin_score,tmax_score,score,match_iou,match_ioa'. Args: video_list (list[int]): List of video indexes to generate proposals. video_infos (list[dict]): List of video_info dict tha...
generate_candidate_proposals
python
open-mmlab/mmaction2
mmaction/models/localizers/utils/bsn_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/utils/bsn_utils.py
Apache-2.0
def generate_bsp_feature(video_list, video_infos, tem_results_dir, pgm_proposals_dir, top_k=1000, bsp_boundary_ratio=0.2, num_sample_start=8, num...
Generate Boundary-Sensitive Proposal Feature with given proposals. Args: video_list (list[int]): List of video indexes to generate bsp_feature. video_infos (list[dict]): List of video_info dict that contains 'video_name'. tem_results_dir (str): Directory to load temporal evaluat...
generate_bsp_feature
python
open-mmlab/mmaction2
mmaction/models/localizers/utils/bsn_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/utils/bsn_utils.py
Apache-2.0
def temporal_iou(proposal_min, proposal_max, gt_min, gt_max): """Compute IoU score between a groundtruth bbox and the proposals. Args: proposal_min (list[float]): List of temporal anchor min. proposal_max (list[float]): List of temporal anchor max. gt_min (float): Groundtruth temporal b...
Compute IoU score between a groundtruth bbox and the proposals. Args: proposal_min (list[float]): List of temporal anchor min. proposal_max (list[float]): List of temporal anchor max. gt_min (float): Groundtruth temporal box min. gt_max (float): Groundtruth temporal box max. Ret...
temporal_iou
python
open-mmlab/mmaction2
mmaction/models/localizers/utils/proposal_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/utils/proposal_utils.py
Apache-2.0
def temporal_iop(proposal_min, proposal_max, gt_min, gt_max): """Compute IoP score between a groundtruth bbox and the proposals. Compute the IoP which is defined as the overlap ratio with groundtruth proportional to the duration of this proposal. Args: proposal_min (list[float]): List of tempor...
Compute IoP score between a groundtruth bbox and the proposals. Compute the IoP which is defined as the overlap ratio with groundtruth proportional to the duration of this proposal. Args: proposal_min (list[float]): List of temporal anchor min. proposal_max (list[float]): List of temporal a...
temporal_iop
python
open-mmlab/mmaction2
mmaction/models/localizers/utils/proposal_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/utils/proposal_utils.py
Apache-2.0
def soft_nms(proposals, alpha, low_threshold, high_threshold, top_k): """Soft NMS for temporal proposals. Args: proposals (np.ndarray): Proposals generated by network. alpha (float): Alpha value of Gaussian decaying function. low_threshold (float): Low threshold for soft nms. hi...
Soft NMS for temporal proposals. Args: proposals (np.ndarray): Proposals generated by network. alpha (float): Alpha value of Gaussian decaying function. low_threshold (float): Low threshold for soft nms. high_threshold (float): High threshold for soft nms. top_k (int): Top k...
soft_nms
python
open-mmlab/mmaction2
mmaction/models/localizers/utils/proposal_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/utils/proposal_utils.py
Apache-2.0
def post_processing(result, video_info, soft_nms_alpha, soft_nms_low_threshold, soft_nms_high_threshold, post_process_top_k, feature_extraction_interval): """Post process for temporal proposals generation. Args: result (np.ndarray): Proposals generated by network....
Post process for temporal proposals generation. Args: result (np.ndarray): Proposals generated by network. video_info (dict): Meta data of video. Required keys are 'duration_frame', 'duration_second'. soft_nms_alpha (float): Alpha value of Gaussian decaying function. soft...
post_processing
python
open-mmlab/mmaction2
mmaction/models/localizers/utils/proposal_utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/localizers/utils/proposal_utils.py
Apache-2.0
def forward(self, *args, **kwargs): """Defines the computation performed at every call. Args: *args: The positional arguments for the corresponding loss. **kwargs: The keyword arguments for the corresponding loss. Returns: tor...
Defines the computation performed at every call. Args: *args: The positional arguments for the corresponding loss. **kwargs: The keyword arguments for the corresponding loss. Returns: torch.Tensor: The calculated loss.
forward
python
open-mmlab/mmaction2
mmaction/models/losses/base.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/base.py
Apache-2.0
def forward(self, reg_score, label, threshold=0.5, ratio_range=(1.05, 21), eps=1e-5): """Calculate Binary Logistic Regression Loss. Args: reg_score (torch.Tensor): Predicted score by model. l...
Calculate Binary Logistic Regression Loss. Args: reg_score (torch.Tensor): Predicted score by model. label (torch.Tensor): Groundtruth labels. threshold (float): Threshold for positive instances. Default: 0.5. ratio_range (tupl...
forward
python
open-mmlab/mmaction2
mmaction/models/losses/binary_logistic_regression_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/binary_logistic_regression_loss.py
Apache-2.0
def tem_loss(pred_start, pred_end, gt_start, gt_end): """Calculate Temporal Evaluation Module Loss. This function calculate the binary_logistic_regression_loss for start and end respectively and returns the sum of their losses. Args: pred_start (torch.Tensor): Predicted sta...
Calculate Temporal Evaluation Module Loss. This function calculate the binary_logistic_regression_loss for start and end respectively and returns the sum of their losses. Args: pred_start (torch.Tensor): Predicted start score by BMN model. pred_end (torch.Tensor): Predi...
tem_loss
python
open-mmlab/mmaction2
mmaction/models/losses/bmn_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/bmn_loss.py
Apache-2.0
def pem_reg_loss(pred_score, gt_iou_map, mask, high_temporal_iou_threshold=0.7, low_temporal_iou_threshold=0.3): """Calculate Proposal Evaluation Module Regression Loss. Args: pred_score (torch.Tensor): Pred...
Calculate Proposal Evaluation Module Regression Loss. Args: pred_score (torch.Tensor): Predicted temporal_iou score by BMN. gt_iou_map (torch.Tensor): Groundtruth temporal_iou score. mask (torch.Tensor): Boundary-Matching mask. high_temporal_iou_threshold (float)...
pem_reg_loss
python
open-mmlab/mmaction2
mmaction/models/losses/bmn_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/bmn_loss.py
Apache-2.0
def pem_cls_loss(pred_score, gt_iou_map, mask, threshold=0.9, ratio_range=(1.05, 21), eps=1e-5): """Calculate Proposal Evaluation Module Classification Loss. Args: pred_score (torch.Tens...
Calculate Proposal Evaluation Module Classification Loss. Args: pred_score (torch.Tensor): Predicted temporal_iou score by BMN. gt_iou_map (torch.Tensor): Groundtruth temporal_iou score. mask (torch.Tensor): Boundary-Matching mask. threshold (float): Threshold of...
pem_cls_loss
python
open-mmlab/mmaction2
mmaction/models/losses/bmn_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/bmn_loss.py
Apache-2.0
def forward(self, pred_bm, pred_start, pred_end, gt_iou_map, gt_start, gt_end, bm_mask, weight_tem=1.0, weight_pem_reg=10.0, weight_pem_cls=1.0): """Cal...
Calculate Boundary Matching Network Loss. Args: pred_bm (torch.Tensor): Predicted confidence score for boundary matching map. pred_start (torch.Tensor): Predicted confidence score for start. pred_end (torch.Tensor): Predicted confidence score for end. ...
forward
python
open-mmlab/mmaction2
mmaction/models/losses/bmn_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/bmn_loss.py
Apache-2.0
def _forward(self, cls_score: torch.Tensor, label: torch.Tensor, **kwargs) -> torch.Tensor: """Forward function. Args: cls_score (torch.Tensor): The class score. label (torch.Tensor): The ground truth label. kwargs: Any keyword argument to be used to...
Forward function. Args: cls_score (torch.Tensor): The class score. label (torch.Tensor): The ground truth label. kwargs: Any keyword argument to be used to calculate CrossEntropy loss. Returns: torch.Tensor: The returned CrossEntropy loss...
_forward
python
open-mmlab/mmaction2
mmaction/models/losses/cross_entropy_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/cross_entropy_loss.py
Apache-2.0
def _forward(self, cls_score: torch.Tensor, label: torch.Tensor, **kwargs) -> torch.Tensor: """Forward function. Args: cls_score (torch.Tensor): The class score. label (torch.Tensor): The ground truth label. kwargs: Any keyword argument to be used to...
Forward function. Args: cls_score (torch.Tensor): The class score. label (torch.Tensor): The ground truth label. kwargs: Any keyword argument to be used to calculate bce loss with logits. Returns: torch.Tensor: The returned bce loss with ...
_forward
python
open-mmlab/mmaction2
mmaction/models/losses/cross_entropy_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/cross_entropy_loss.py
Apache-2.0
def _forward(self, cls_score, label, mask, category_mask): """Forward function. Args: cls_score (torch.Tensor): The class score. label (torch.Tensor): The ground truth label. mask (torch.Tensor): The mask of tags. 0 indicates that the category of this...
Forward function. Args: cls_score (torch.Tensor): The class score. label (torch.Tensor): The ground truth label. mask (torch.Tensor): The mask of tags. 0 indicates that the category of this tag is missing in the label of the video. category_mask (...
_forward
python
open-mmlab/mmaction2
mmaction/models/losses/hvu_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/hvu_loss.py
Apache-2.0
def forward(ctx, pred, labels, is_positive, ohem_ratio, group_size): """Calculate OHEM hinge loss. Args: pred (torch.Tensor): Predicted completeness score. labels (torch.Tensor): Groundtruth class label. is_positive (int): Set to 1 when proposals are positive and ...
Calculate OHEM hinge loss. Args: pred (torch.Tensor): Predicted completeness score. labels (torch.Tensor): Groundtruth class label. is_positive (int): Set to 1 when proposals are positive and set to -1 when proposals are incomplete. ohem_ratio (fl...
forward
python
open-mmlab/mmaction2
mmaction/models/losses/ohem_hinge_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/ohem_hinge_loss.py
Apache-2.0
def backward(ctx, grad_output): """Defines a formula for differentiating the operation with backward mode automatic differentiation.""" labels = ctx.labels slopes = ctx.slopes grad_in = torch.zeros(ctx.shape, device=ctx.slopes.device) for group in range(ctx.num_groups):...
Defines a formula for differentiating the operation with backward mode automatic differentiation.
backward
python
open-mmlab/mmaction2
mmaction/models/losses/ohem_hinge_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/ohem_hinge_loss.py
Apache-2.0
def forward(self, activity_score, completeness_score, bbox_pred, proposal_type, labels, bbox_targets, train_cfg): """Calculate Boundary Matching Network Loss. Args: activity_score (torch.Tensor): Predicted activity score. completeness_score (torch.Tensor): Predic...
Calculate Boundary Matching Network Loss. Args: activity_score (torch.Tensor): Predicted activity score. completeness_score (torch.Tensor): Predicted completeness score. bbox_pred (torch.Tensor): Predicted interval center and span of positive proposals. ...
forward
python
open-mmlab/mmaction2
mmaction/models/losses/ssn_loss.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/losses/ssn_loss.py
Apache-2.0
def forward(self, hidden_states: torch.Tensor): """forward function. Args: hidden_states (torch.Tensor): The input. Shape: [b,t,l,c] Returns: TODO """ b = hidden_states.shape[0] output = einops.rearrange(hidden_states, 'b t l c -> (b l) t c') output ...
forward function. Args: hidden_states (torch.Tensor): The input. Shape: [b,t,l,c] Returns: TODO
forward
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/beit3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/beit3d.py
Apache-2.0
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.BoolTensor] = None ) -> torch.Tensor: """ Args: pixel_values (torch.Tensor): The input image patches. Shape: [B, T, C, H, W]. """ t ...
Args: pixel_values (torch.Tensor): The input image patches. Shape: [B, T, C, H, W].
forward
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/beit3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/beit3d.py
Apache-2.0
def forward(self, x: torch.Tensor): """forward. Args: x (torch.Tensor): input features. Shape: [bs, nframes, l, c]. l = 1 + h*w Returns: features after adapter. The same shape as input. """ if x.shape[1] == 1: # for single frame, return itself. ...
forward. Args: x (torch.Tensor): input features. Shape: [bs, nframes, l, c]. l = 1 + h*w Returns: features after adapter. The same shape as input.
forward
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/temporal_model.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/temporal_model.py
Apache-2.0
def __init__(self, input_dim=768, droppath_rate=0.1): """ Kwargs: input_dim (int): The input feature dimension. """ super().__init__() self._input_dim = input_dim self.temporal_attn = MultiheadAttention( input_dim, num_heads=input_dim // 64) ...
Kwargs: input_dim (int): The input feature dimension.
__init__
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/temporal_model.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/temporal_model.py
Apache-2.0
def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None) -> List[int]: """Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens....
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: `[CLS] X` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids...
build_inputs_with_special_tokens
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/tokenizer.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/tokenizer.py
Apache-2.0
def interpolate_pos_embed_beit(state_dict, new_model): """interpolate the positional embeddings. The spatial pe is relative and temporal pe is absolute. additional temporal pe is padded with 0. Args: state_dict (dict): The state_dict. new_model (nn.Module): The created model. Returns: ...
interpolate the positional embeddings. The spatial pe is relative and temporal pe is absolute. additional temporal pe is padded with 0. Args: state_dict (dict): The state_dict. new_model (nn.Module): The created model. Returns: dict. The state_dict with updated positional embeddings.
interpolate_pos_embed_beit
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/utils.py
Apache-2.0
def load_temp_embed_with_mismatch(temp_embed_old, temp_embed_new, add_zero=True): """Add/Remove extra temporal_embeddings as needed. https://arxiv.org/abs/2104.00650 shows adding zero paddings works. temp_embed_old: (1, num_frames_old, 1, ...
Add/Remove extra temporal_embeddings as needed. https://arxiv.org/abs/2104.00650 shows adding zero paddings works. temp_embed_old: (1, num_frames_old, 1, d) temp_embed_new: (1, num_frames_new, 1, d) add_zero: bool, if True, add zero, else, interpolate trained embeddings.
load_temp_embed_with_mismatch
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/utils.py
Apache-2.0
def interpolate_pos_relative_bias_beit(state_dict_old, state_dict_new, patch_shape_new): """ Args: state_dict_old: loaded state dict state_dict_new: state dict for model with new image size patch_shape_new: new model patch_shape ref: https://git...
Args: state_dict_old: loaded state dict state_dict_new: state dict for model with new image size patch_shape_new: new model patch_shape ref: https://github.com/microsoft/unilm/blob/master/beit/run_class_finetuning.py # noqa: E501
interpolate_pos_relative_bias_beit
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/utils.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/utils.py
Apache-2.0
def forward(self, inputs, data_samples, mode: str = 'loss'): """The unified entry for a forward process in both training and test. The method should accept three modes: - ``tensor``: Forward the whole network and return tensor or tuple of tensor without any post-processing, same as a c...
The unified entry for a forward process in both training and test. The method should accept three modes: - ``tensor``: Forward the whole network and return tensor or tuple of tensor without any post-processing, same as a common nn.Module. - ``predict``: Forward and return the predictio...
forward
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/vindlu.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu.py
Apache-2.0
def encode_vision(self, image): """encode image / videos as features. Args: image (torch.Tensor): The input images. Returns: tuple. - vision_embeds (torch.Tensor): The features of all patches. Shape: [B,T,L,C]. - pooled_vision_embeds (torch.T...
encode image / videos as features. Args: image (torch.Tensor): The input images. Returns: tuple. - vision_embeds (torch.Tensor): The features of all patches. Shape: [B,T,L,C]. - pooled_vision_embeds (torch.Tensor): The pooled features. ...
encode_vision
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/vindlu.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu.py
Apache-2.0
def encode_text(self, text): """encode text. Args: text (dict): The output of huggingface's `PreTrainedTokenizer`. contains keys: - input_ids (torch.Tensor): Token ids to be fed to a model. Shape: [B,L]. - attention_mask (to...
encode text. Args: text (dict): The output of huggingface's `PreTrainedTokenizer`. contains keys: - input_ids (torch.Tensor): Token ids to be fed to a model. Shape: [B,L]. - attention_mask (torch.Tensor): The mask indicate padded to...
encode_text
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/vindlu.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu.py
Apache-2.0
def loss( self, inputs: torch.Tensor, data_samples: Optional[List[ActionDataSample]] = None, ) -> Dict[str, torch.tensor]: """Calculate losses from a batch of inputs and data samples. Args: inputs (dict): A batch of inputs. The input tensor with of ...
Calculate losses from a batch of inputs and data samples. Args: inputs (dict): A batch of inputs. The input tensor with of at least one modality. For image, the value is a tensor of shape (N, C, ...) in general. For text, the value is a dict of tokeni...
loss
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/vindlu_ret.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu_ret.py
Apache-2.0
def extract_feat( self, images: torch.Tensor = None, data_samples: List[ActionDataSample] = None, return_texts=True, ) -> Dict[str, torch.Tensor]: """Extract features from the input dict. Args: images (tensor, optional): The images to extract features. ...
Extract features from the input dict. Args: images (tensor, optional): The images to extract features. Defaults to None. data_samples (list, optional): The data samples containing texts to extract features. Defaults to None. return_texts (bool...
extract_feat
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/vindlu_ret.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu_ret.py
Apache-2.0
def compute_score_matrix_i2t(self, img_feats, img_embeds, text_feats, text_embeds, text_atts): """Compare the score matrix for image-to-text retrieval. Every image should compare to all the text features. Args: img_feats (torch.Tensor): The input img...
Compare the score matrix for image-to-text retrieval. Every image should compare to all the text features. Args: img_feats (torch.Tensor): The input img feats tensor with shape (M, C). M stands for numbers of samples on a single GPU. img_embeds (torch.Tensor): Th...
compute_score_matrix_i2t
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/vindlu_ret.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu_ret.py
Apache-2.0
def compute_score_matrix_t2i(self, img_feats, img_embeds, text_feats, text_embeds, text_atts): """Compare the score matrix for text-to-image retrieval. Every text should compare to all the image features. Args: img_feats (torch.Tensor): The input img...
Compare the score matrix for text-to-image retrieval. Every text should compare to all the image features. Args: img_feats (torch.Tensor): The input img feats tensor with shape (M, C). M stands for numbers of samples on a single GPU. img_embeds (torch.Tensor): Th...
compute_score_matrix_t2i
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/vindlu_ret.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu_ret.py
Apache-2.0
def _get_predictions(self, result: torch.Tensor, data_samples: List[ActionDataSample], mode: str = 'i2t'): """Post-process the output of retriever. Args: result (torch.Tensor): Score matrix of single retrieve, ...
Post-process the output of retriever. Args: result (torch.Tensor): Score matrix of single retrieve, either from image or text. data_samples (List[ActionDataSample], optional): The annotation data of every samples. mode (str): Retrieve mode, ei...
_get_predictions
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/vindlu_ret.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu_ret.py
Apache-2.0
def predict(self, inputs, data_samples, **kwargs): """Predict captions from a batch of inputs. Args: images (torch.Tensor): The input images tensor with shape (N, C, ...) in general. data_samples (List[DataSample], optional): The annotation data o...
Predict captions from a batch of inputs. Args: images (torch.Tensor): The input images tensor with shape (N, C, ...) in general. data_samples (List[DataSample], optional): The annotation data of every samples. Defaults to None. **kwargs: Other...
predict
python
open-mmlab/mmaction2
mmaction/models/multimodal/vindlu/vindlu_ret_mc.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/multimodal/vindlu/vindlu_ret_mc.py
Apache-2.0
def init_weights(self) -> None: """Default init_weights for conv(msra) and norm in ConvModule.""" for m in self.modules(): if isinstance(m, nn.Conv3d): xavier_init(m, distribution='uniform') if isinstance(m, nn.BatchNorm3d): constant_init(m, 1) ...
Default init_weights for conv(msra) and norm in ConvModule.
init_weights
python
open-mmlab/mmaction2
mmaction/models/necks/tpn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/necks/tpn.py
Apache-2.0
def loss(self, inputs: torch.Tensor, data_samples: SampleList, **kwargs) -> dict: """Calculate losses from a batch of inputs and data samples. Args: inputs (torch.Tensor): Raw Inputs of the recognizer. These should usually be mean centered and std scaled. ...
Calculate losses from a batch of inputs and data samples. Args: inputs (torch.Tensor): Raw Inputs of the recognizer. These should usually be mean centered and std scaled. data_samples (List[``ActionDataSample``]): The batch data samples. It usually includ...
loss
python
open-mmlab/mmaction2
mmaction/models/recognizers/base.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/base.py
Apache-2.0
def predict(self, inputs: torch.Tensor, data_samples: SampleList, **kwargs) -> SampleList: """Predict results from a batch of inputs and data samples with post- processing. Args: inputs (torch.Tensor): Raw Inputs of the recognizer. These should usuall...
Predict results from a batch of inputs and data samples with post- processing. Args: inputs (torch.Tensor): Raw Inputs of the recognizer. These should usually be mean centered and std scaled. data_samples (List[``ActionDataSample``]): The batch da...
predict
python
open-mmlab/mmaction2
mmaction/models/recognizers/base.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/base.py
Apache-2.0
def _forward(self, inputs: torch.Tensor, stage: str = 'backbone', **kwargs) -> ForwardResults: """Network forward process. Usually includes backbone, neck and head forward without any post-processing. Args: inputs (torch.Tensor): Ra...
Network forward process. Usually includes backbone, neck and head forward without any post-processing. Args: inputs (torch.Tensor): Raw Inputs of the recognizer. stage (str): Which stage to output the features. Returns: Union[tuple, torch.Tensor]: Features f...
_forward
python
open-mmlab/mmaction2
mmaction/models/recognizers/base.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/base.py
Apache-2.0
def extract_feat(self, inputs: torch.Tensor, stage: str = 'neck', data_samples: SampleList = None, test_mode: bool = False) -> tuple: """Extract features of different stages. Args: inputs (Tensor): The input...
Extract features of different stages. Args: inputs (Tensor): The input data. stage (str): Which stage to output the feature. Defaults to ``neck``. data_samples (List[:obj:`ActionDataSample`]): Action data samples, which are only needed in trai...
extract_feat
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer2d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer2d.py
Apache-2.0
def extract_feat(self, inputs: Tensor, stage: str = 'neck', data_samples: OptSampleList = None, test_mode: bool = False) -> tuple: """Extract features of different stages. Args: inputs (torch.Tensor): The in...
Extract features of different stages. Args: inputs (torch.Tensor): The input data. stage (str): Which stage to output the feature. Defaults to ``'neck'``. data_samples (list[:obj:`ActionDataSample`], optional): Action data samples, which are o...
extract_feat
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer3d.py
Apache-2.0
def extract_feat(self, inputs: Dict[str, torch.Tensor], stage: str = 'backbone', data_samples: OptSampleList = None, test_mode: bool = False) -> Tuple: """Extract features. Args: inputs (dict[str, torch.Tens...
Extract features. Args: inputs (dict[str, torch.Tensor]): The multi-modal input data. stage (str): Which stage to output the feature. Defaults to ``'backbone'``. data_samples (list[:obj:`ActionDataSample`], optional): Action data samples, whic...
extract_feat
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer3d_mm.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer3d_mm.py
Apache-2.0
def extract_feat(self, batch_inputs: Tensor, stage: str = 'backbone', **kwargs) -> tuple: """Extract features of different stages. Args: batch_inputs (Tensor): The input data. stage (str): Which stage to output the f...
Extract features of different stages. Args: batch_inputs (Tensor): The input data. stage (str): Which stage to output the feature. Defaults to ``backbone``. Returns: Tensor: The extracted features. dict: A dict recording the kwargs for do...
extract_feat
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer_audio.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer_audio.py
Apache-2.0
def extract_feat(self, inputs: torch.Tensor, stage: str = 'backbone', **kwargs) -> Tuple: """Extract features at the given stage. Args: inputs (torch.Tensor): The input skeleton with shape of `(B, num_clips, num_...
Extract features at the given stage. Args: inputs (torch.Tensor): The input skeleton with shape of `(B, num_clips, num_person, clip_len, num_joints, 3 or 2)`. stage (str): The stage to output the features. Defaults to ``'backbone'``. Returns: ...
extract_feat
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer_gcn.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer_gcn.py
Apache-2.0
def forward(self, *data_samples, mode: str, **kwargs) -> ForwardResults: """The unified entry for a forward process in both training and test. The method should accept three modes: - ``tensor``: Forward the whole network and return tensor or tuple of tensor without any post-processing,...
The unified entry for a forward process in both training and test. The method should accept three modes: - ``tensor``: Forward the whole network and return tensor or tuple of tensor without any post-processing, same as a common nn.Module. - ``predict``: Forward and return the predictio...
forward
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer_omni.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer_omni.py
Apache-2.0
def loss(self, data_samples: Sequence[SampleList]) -> dict: """Calculate losses from a batch of inputs and data samples. Args: data_samples (Sequence[SampleList]): a sequence of SampleList. Each SampleList contains data samples from the same data source. Returns: ...
Calculate losses from a batch of inputs and data samples. Args: data_samples (Sequence[SampleList]): a sequence of SampleList. Each SampleList contains data samples from the same data source. Returns: dict: A dictionary of loss components.
loss
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer_omni.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer_omni.py
Apache-2.0
def predict(self, data_samples: Sequence[SampleList]) -> SampleList: """Predict results from a batch of inputs and data samples with post- processing. Args: data_samples (Sequence[SampleList]): a sequence of SampleList. Each SampleList contains data samples from the ...
Predict results from a batch of inputs and data samples with post- processing. Args: data_samples (Sequence[SampleList]): a sequence of SampleList. Each SampleList contains data samples from the same data source. Returns: List[``ActionDataSample``]: Retu...
predict
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer_omni.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer_omni.py
Apache-2.0
def _forward(self, inputs: torch.Tensor, stage: str = 'backbone', **kwargs) -> ForwardResults: """Network forward process. Usually includes backbone, neck and head forward without any post-processing. Args: inputs (torch.Tensor): Ra...
Network forward process. Usually includes backbone, neck and head forward without any post-processing. Args: inputs (torch.Tensor): Raw Inputs of the recognizer. stage (str): Which stage to output the features. Returns: Union[tuple, torch.Tensor]: Features f...
_forward
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer_omni.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer_omni.py
Apache-2.0
def _run_forward(self, data: Union[dict, tuple, list], mode: str) -> Union[Dict[str, torch.Tensor], list]: """Unpacks data for :meth:`forward` Args: data (dict or tuple or list): Data sampled from dataset. mode (str): Mode of forward. Returns: ...
Unpacks data for :meth:`forward` Args: data (dict or tuple or list): Data sampled from dataset. mode (str): Mode of forward. Returns: dict or list: Results of training or testing mode.
_run_forward
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer_omni.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer_omni.py
Apache-2.0
def extract_feat(self, inputs: torch.Tensor, stage: str = 'backbone', test_mode: bool = False) -> tuple: """Extract features of different stages. Args: inputs (torch.Tensor): The input data. stage (str): Which stage ...
Extract features of different stages. Args: inputs (torch.Tensor): The input data. stage (str): Which stage to output the feature. Defaults to ``'backbone'``. test_mode (bool): Whether in test mode. Defaults to False. Returns: torch.T...
extract_feat
python
open-mmlab/mmaction2
mmaction/models/recognizers/recognizer_omni.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/recognizers/recognizer_omni.py
Apache-2.0
def loss(self, x: Union[Tensor, Tuple[Tensor]], rpn_results_list: InstanceList, data_samples: SampleList, **kwargs) -> dict: """Perform forward propagation and loss calculation of the detection roi on the features of the upstream network. Args: ...
Perform forward propagation and loss calculation of the detection roi on the features of the upstream network. Args: x (Tensor or Tuple[Tensor]): The image features extracted by the upstream network. rpn_results_list (List[:obj:`InstanceData`]): List of region ...
loss
python
open-mmlab/mmaction2
mmaction/models/roi_heads/roi_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/roi_head.py
Apache-2.0
def _bbox_forward(self, x: Union[Tensor, Tuple[Tensor]], rois: Tensor, batch_img_metas: List[dict], **kwargs) -> dict: """Box head forward function used in both training and testing. Args: x (Tensor or Tuple[Tensor]): The image features extracted by the...
Box head forward function used in both training and testing. Args: x (Tensor or Tuple[Tensor]): The image features extracted by the upstream network. rois (Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. ...
_bbox_forward
python
open-mmlab/mmaction2
mmaction/models/roi_heads/roi_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/roi_head.py
Apache-2.0
def bbox_loss(self, x: Union[Tensor, Tuple[Tensor]], sampling_results: List[SamplingResult], batch_img_metas: List[dict], **kwargs) -> dict: """Perform forward propagation and loss calculation of the bbox head on the features of the upstream network. Args: ...
Perform forward propagation and loss calculation of the bbox head on the features of the upstream network. Args: x (Tensor or Tuple[Tensor]): The image features extracted by the upstream network. sampling_results (List[SamplingResult]): Sampling results. ...
bbox_loss
python
open-mmlab/mmaction2
mmaction/models/roi_heads/roi_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/roi_head.py
Apache-2.0
def predict(self, x: Union[Tensor, Tuple[Tensor]], rpn_results_list: InstanceList, data_samples: SampleList, **kwargs) -> InstanceList: """Perform forward propagation of the roi head and predict detection results on the features of the upstream network. ...
Perform forward propagation of the roi head and predict detection results on the features of the upstream network. Args: x (Tensor or Tuple[Tensor]): The image features extracted by the upstream network. rpn_results_list (List[:obj:`InstanceData`]): list of regio...
predict
python
open-mmlab/mmaction2
mmaction/models/roi_heads/roi_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/roi_head.py
Apache-2.0
def predict_bbox(self, x: Tuple[Tensor], batch_img_metas: List[dict], rpn_results_list: InstanceList, rcnn_test_cfg: ConfigType) -> InstanceList: """Perform forward propagation of the bbox head and predict detection results on the features of the upstream networ...
Perform forward propagation of the bbox head and predict detection results on the features of the upstream network. Args: x (tuple[Tensor]): Feature maps of all scale level. batch_img_metas (list[dict]): List of image information. rpn_results_list (list[:obj:`Instanc...
predict_bbox
python
open-mmlab/mmaction2
mmaction/models/roi_heads/roi_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/roi_head.py
Apache-2.0
def forward(self, x: Tensor) -> Tensor: """Computes the classification logits given ROI features.""" if self.dropout_before_pool and self.dropout_ratio > 0: x = self.dropout(x) x = self.temporal_pool(x) x = self.spatial_pool(x) if not self.dropout_before_pool and se...
Computes the classification logits given ROI features.
forward
python
open-mmlab/mmaction2
mmaction/models/roi_heads/bbox_heads/bbox_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/bbox_heads/bbox_head.py
Apache-2.0
def get_recall_prec(pred_vec: Tensor, target_vec: Tensor) -> tuple: """Computes the Recall/Precision for both multi-label and single label scenarios. Note that the computation calculates the micro average. Note, that in both cases, the concept of correct/incorrect is the same. ...
Computes the Recall/Precision for both multi-label and single label scenarios. Note that the computation calculates the micro average. Note, that in both cases, the concept of correct/incorrect is the same. Args: pred_vec (tensor[N x C]): each element is either 0 or 1 ...
get_recall_prec
python
open-mmlab/mmaction2
mmaction/models/roi_heads/bbox_heads/bbox_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/bbox_heads/bbox_head.py
Apache-2.0
def topk_accuracy(self, pred: Tensor, target: Tensor, thr: float = 0.5) -> tuple: """Computes the Top-K Accuracies for both single and multi-label scenarios.""" # Define Target vector: target_bool = target > 0.5 #...
Computes the Top-K Accuracies for both single and multi-label scenarios.
topk_accuracy
python
open-mmlab/mmaction2
mmaction/models/roi_heads/bbox_heads/bbox_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/bbox_heads/bbox_head.py
Apache-2.0
def loss_and_target(self, cls_score: Tensor, rois: Tensor, sampling_results: List[SamplingResult], rcnn_train_cfg: ConfigDict, **kwargs) -> dict: """Calculate the loss based on the features extracted by the bbox head. Args: cls_score (Tensor):...
Calculate the loss based on the features extracted by the bbox head. Args: cls_score (Tensor): Classification prediction results of all class, has shape (batch_size * num_proposals_single_image, num_classes) rois (Tensor): RoIs with the shape ...
loss_and_target
python
open-mmlab/mmaction2
mmaction/models/roi_heads/bbox_heads/bbox_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/bbox_heads/bbox_head.py
Apache-2.0
def forward(self, feat: Union[Tensor, Tuple[Tensor]], rois: Tensor) -> tuple: """Forward function for extract roi features. Args: feat (Tensor or Tuple[Tensor]): The image features extracted by the upstream network. The shape of feat is N, C, T, H, W. ...
Forward function for extract roi features. Args: feat (Tensor or Tuple[Tensor]): The image features extracted by the upstream network. The shape of feat is N, C, T, H, W. rois (Tensor): Input RoIs, shape (k, 5). Returns: tuple: A tuple of roi feature...
forward
python
open-mmlab/mmaction2
mmaction/models/roi_heads/roi_extractors/single_straight3d.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/roi_extractors/single_straight3d.py
Apache-2.0
def forward(self, x, feat, rois, **kwargs): """Defines the computation performed at every call. Args: x (torch.Tensor): The extracted RoI feature. feat (torch.Tensor): The context feature. rois (torch.Tensor): The regions of interest. Returns: to...
Defines the computation performed at every call. Args: x (torch.Tensor): The extracted RoI feature. feat (torch.Tensor): The context feature. rois (torch.Tensor): The regions of interest. Returns: torch.Tensor: The RoI features that have interacted with ...
forward
python
open-mmlab/mmaction2
mmaction/models/roi_heads/shared_heads/acrn_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/shared_heads/acrn_head.py
Apache-2.0
def sample_lfb(self, rois, img_metas): """Sample long-term features for each ROI feature.""" inds = rois[:, 0].type(torch.int64) lt_feat_list = [] for ind in inds: lt_feat_list.append(self.lfb[img_metas[ind]['img_key']]) lt_feat = torch.stack(lt_feat_list, dim=0) ...
Sample long-term features for each ROI feature.
sample_lfb
python
open-mmlab/mmaction2
mmaction/models/roi_heads/shared_heads/fbo_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/shared_heads/fbo_head.py
Apache-2.0
def __getitem__(self, img_key): """Sample long term features like `lfb['0f39OWEqJ24,0902']` where `lfb` is a instance of class LFB.""" video_id, timestamp = img_key.split(',') return self.sample_long_term_features(video_id, int(timestamp))
Sample long term features like `lfb['0f39OWEqJ24,0902']` where `lfb` is a instance of class LFB.
__getitem__
python
open-mmlab/mmaction2
mmaction/models/roi_heads/shared_heads/lfb.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/shared_heads/lfb.py
Apache-2.0
def forward(self, x, rois, img_metas, **kwargs): """Defines the computation performed at every call. Args: x (torch.Tensor): The extracted RoI feature. rois (torch.Tensor): The regions of interest. img_metas (List[dict]): The meta information of the data. Re...
Defines the computation performed at every call. Args: x (torch.Tensor): The extracted RoI feature. rois (torch.Tensor): The regions of interest. img_metas (List[dict]): The meta information of the data. Returns: torch.Tensor: The RoI features that have ...
forward
python
open-mmlab/mmaction2
mmaction/models/roi_heads/shared_heads/lfb_infer_head.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/roi_heads/shared_heads/lfb_infer_head.py
Apache-2.0
def _freeze_stages(self) -> None: """Prevent all the parameters from being optimized before ``self.frozen_layers``.""" if self.frozen_layers >= 0: top_layers = [ 'ln_final', 'text_projection', 'logit_scale', 'visual.ln_post', 'visual.proj' ...
Prevent all the parameters from being optimized before ``self.frozen_layers``.
_freeze_stages
python
open-mmlab/mmaction2
mmaction/models/similarity/clip_similarity.py
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/models/similarity/clip_similarity.py
Apache-2.0