code
stringlengths
17
6.64M
def Ucf101(data_path: str, clip_sampler: ClipSampler, transform: (Callable[([dict[(str, Any)]], dict[(str, Any)])] | None)=None, video_path_prefix: str='', split_id: int=1, split_type: str='train', decode_audio: bool=True, decoder: str='pyav', decoder_args: DictConfig={}) -> LabeledVideoDataset: "A helper function to create ``LabeledVideoDataset`` object for the Ucf101 dataset.\n\n Args:\n data_path: Path to the data. The path type defines how the data\n should be read:\n\n * For a file path, the file is read and each line is parsed into a\n video path and label.\n * For a directory, the directory structure defines the classes\n (i.e. each subdirectory is a class).\n\n clip_sampler: Defines how clips should be sampled from each\n video. See the clip sampling documentation for more information.\n\n video_sampler: Sampler for the internal\n video container. This defines the order videos are decoded and,\n if necessary, the distributed split.\n\n transform: This callable is evaluated on the clip output before\n the clip is returned. It can be used for user defined preprocessing and\n augmentations to the clips. See the ``LabeledVideoDataset`` class for clip\n output format.\n\n video_path_prefix: Path to root directory with the videos that are\n loaded in ``LabeledVideoDataset``. All the video paths before loading\n are prefixed with this path.\n\n split_id: Fold id to be loaded. Options are: :math:`1`, :math:`2` or :math:`3`.\n\n split_type: Split/Fold type to be loaded. Options are: ``'train'`` or ``'test'``.\n\n decode_audio: If ``True``, also decode audio from video.\n\n decoder: Defines what type of decoder used to decode a video.\n\n decoder_args: Arguments to configure the decoder.\n\n Returns:\n The dataset instantiated.\n " torch._C._log_api_usage_once('PYTORCHVIDEO.dataset.Ucf101') labeled_video_paths = Ucf101LabeledVideoPaths.from_path(data_path, split_id=split_id, split_type=split_type, frames=(DecoderType(decoder) == DecoderType.FRAME)) labeled_video_paths.path_prefix = video_path_prefix dataset = LabeledVideoDataset(labeled_video_paths, clip_sampler, transform, decode_audio, decoder, decoder_args) return dataset
def get_time_difference_indices(indices: NDArray, add_last_indice: bool=False, max_last_indice: int=(- 1)) -> Tuple[(NDArray, NDArray)]: 'Get the indices of frames when time difference is desired. Time difference recquires to have subsequent\n indices to work. This function retrieves lacking indices and avoid duplicated.\n\n Args:\n indices: The base indices.\n add_last_indice: Whether to add last indice make the approximation.\n max_last_indice: In case to add last indice, make sure to not have an indice that goes further than this maximum.\n\n Returns:\n The new indices and the boolean mask of indices to keep after time difference is computed.\n ' indices = np.asarray(indices) recquire_new_indice = (indices != (np.roll(indices, (- 1)) - 1)) recquire_new_indice[(- 1)] = add_last_indice num_new_indices = (recquire_new_indice.sum() + (1 if (not add_last_indice) else 0)) new_indices = [None for i in range((len(indices) + num_new_indices))] keep_indices = [None for i in range((len(indices) + num_new_indices))] curr_element = 0 for i in range(len(indices)): new_indices[curr_element] = indices[i] keep_indices[curr_element] = True curr_element += 1 if recquire_new_indice[i]: new_indices[curr_element] = (indices[i] + 1) keep_indices[curr_element] = False curr_element += 1 if add_last_indice: keep_last = ((indices[(- 1)] + 1) > max_last_indice) new_indices[(- 1)] = (indices[(- 1)] + (1 if (not keep_last) else 0)) if (not keep_last): keep_indices[(- 1)] = False else: (keep_indices[(- 1)], keep_indices[(- 2)]) = (True, False) else: (keep_indices[(- 1)], keep_indices[(- 2)]) = (True, False) (new_indices[(- 1)], new_indices[(- 2)]) = (new_indices[(- 2)], (new_indices[(- 2)] - 1)) indices = np.array(new_indices) keep_indices = np.array(keep_indices) return (indices, keep_indices)
def get_video_to_frame_path_fn(fn_type: str='idx', zeros: int=8, incr: int=1) -> Callable: 'Get the function to get video frame paths.\n\n Args:\n fn_type: The function to use. Options are:\n\n * idx: it retrieves a video path frame from video path and the index of the frame.\n\n zeros: The number of zeroes used to name the frame path.\n\n Raises:\n NotImplementedError: If ``fn_type`` is not supported.\n\n Returns:\n The function to retrieve frames from a video path.\n ' if (fn_type == 'idx'): def fn(video_path, frame_idx): return f'{video_path}/{(frame_idx + incr):0{zeros}d}.jpg' return fn else: raise NotImplementedError(f'{fn_type} unknown.')
def random_subsample(x: List, num_samples: int=8, time_difference: bool=False) -> Tuple[NDArray]: 'Randomly subsample a list of indices.\n\n Args:\n x: The list to subsample\n num_samples: The number of samples to keep.\n time_difference: If ``True``, retrieve indices to be able to apply time difference.\n\n Returns:\n The indices and the boolean mask of indices to keep after time difference is computed.\n ' t = len(x) assert ((num_samples > 0) and (t > 0) and (t >= num_samples)) indices = np.linspace(0, (t - 1), num_samples) indices = np.clip(indices, 0, (t - 1)).astype(int) indices = np.sort(np.random.choice(indices, replace=False)) indices = np.array(x)[indices] if time_difference: (indices, keep_indices) = get_time_difference_indices(indices) else: keep_indices = np.array([True for i in range(len(indices))]) return (indices, keep_indices)
def uniform_subsample(x: List, num_samples: int=8, time_difference: bool=False) -> Tuple[NDArray]: 'Unformly subsample a list of indices.\n\n Args:\n x: The list to subsample\n num_samples: The number of samples to keep.\n time_difference: If ``True``, retrieve indices to be able to apply time_difference.\n\n Returns:\n The indices and the boolean mask of indices to keep after time difference is computed.\n ' t = len(x) assert ((num_samples > 0) and (t > 0)) indices = np.linspace(0, (t - 1), num_samples) indices = np.clip(indices, 0, (t - 1)).astype(int) indices = np.array(x)[indices] if time_difference: (indices, keep_indices) = get_time_difference_indices(indices) else: keep_indices = np.array([True for i in range(len(indices))]) return (indices, keep_indices)
def get_subsample_fn(subsample_type: str='uniform', num_samples: int=8, time_difference: bool=False) -> Callable: "Get the function to subsample video frame indices.\n\n Args:\n subsample_type: The function to use. Options are: ``'uniform'``, ``'random'``.\n num_samples: The number of samples to keep.\n time_difference: If ``True``, retrieve indices to be able to apply time_difference.\n\n Raises:\n NotImplementedError: If ``subsample_type`` is not supported.\n\n Returns:\n The function to subsamples indices.\n " if (subsample_type == 'uniform'): return partial(uniform_subsample, num_samples=num_samples, time_difference=time_difference) elif (subsample_type == 'random'): return partial(random_subsample, num_samples=num_samples, time_difference=time_difference) else: raise NotImplementedError(f'{subsample_type} unknown.')
def remove_suffix(file: Path) -> Path: 'Remove the suffix from a path.\n\n Args:\n file: The path.\n\n Returns:\n The path without the suffix.\n ' return Path(file).with_suffix('')
def get_raw_video_duration(root_folder: str, raw_video: str) -> int: 'Get the video duration from a video extracted in frames folder.\n\n Args:\n root_folder: The root folders of the videos.\n raw_video: The video path.\n\n Returns:\n The video duration.\n ' root_folder = Path(root_folder) video_path = remove_suffix((root_folder / raw_video)) duration = len(g_pathmgr.ls(video_path)) return duration
def get_shard_indices(dataset_size: int, shuffle_shards=True, seed: int=42) -> List[int]: 'Retrieve the indices for the shard.\n\n Args:\n dataset_size: Complete size of the dataset.\n shuffle_shards: Whether to shuffle before sharding.\n seed: Seed for sharding.\n\n Raises:\n NotImplementedError: If called without distributing initialized.\n\n Returns:\n The shard indices.\n ' if (get_world_size() == 1): return list(torch.arange(dataset_size, dtype=torch.int32)) if ((not dist.is_available()) or (not dist.is_initialized())): raise NotImplementedError('Sharding should only be performed during distributed training.') num_shards = dist.get_world_size() shard_id = dist.get_rank() if shuffle_shards: g = torch.Generator() g.manual_seed(seed) global_indices = torch.randperm(dataset_size, generator=g, dtype=torch.int32) else: global_indices = torch.arange(dataset_size, dtype=torch.int32) indices_per_shard = (dataset_size // num_shards) remainder_indices = (dataset_size % num_shards) start_indice_shard = 0 end_indice_shard = ((indices_per_shard - 1) + (1 if (remainder_indices > 0) else 0)) for id in range(0, shard_id): start_indice_shard = (end_indice_shard + 1) end_indice_shard = (((start_indice_shard + indices_per_shard) - 1) + (1 if ((id + 1) < remainder_indices) else 0)) indices = global_indices[torch.arange(start_indice_shard, (end_indice_shard + 1))] return list(indices)
class VideoPathHandler(): 'Utility class that handles all deciphering and caching of video paths for encoded and frame videos.' def __init__(self) -> None: self.path_order_cache = {} def video_from_path(self, filepath: str, decode_audio=False, decoder='pyav', num_frames: Optional[int]=None, **kwargs) -> Video: "Retrieve a video from the specified path.\n\n Args:\n filepath: The path to the video.\n decode_audio: If True, decode audio.\n decoder: The decoder to use. Options are:\n\n * ``'decord'``\n * ``'frame'``\n * ``'lmdb'``\n * ``'pyav'``\n * ``'torchvision'``\n\n num_frames: If not ``None, number of frames in the video for frame decoder.\n **kwargs: additional parameters given to the decoders.\n\n Raises:\n NotImplementedError: If lmdb is not installed and decoder is ``'lmdb'``.\n " if (DecoderType(decoder) == DecoderType.PYAV): from pytorchvideo.data.encoded_video_pyav import EncodedVideoPyAV with g_pathmgr.open(filepath, 'rb') as fh: video_file = io.BytesIO(fh.read()) return EncodedVideoPyAV(video_file, pathlib.Path(filepath).name, decode_audio, **kwargs) elif (DecoderType(decoder) == DecoderType.TORCHVISION): from pytorchvideo.data.encoded_video_torchvision import EncodedVideoTorchVision with g_pathmgr.open(filepath, 'rb') as fh: video_file = io.BytesIO(fh.read()) return EncodedVideoTorchVision(video_file, pathlib.Path(filepath).name, decode_audio, **kwargs) elif (DecoderType(decoder) == DecoderType.FRAME): from eztorch.datasets.decoders.frame_video import FrameVideo assert (not decode_audio), 'decode_audio must be False when using FrameVideo' return FrameVideo.from_directory(filepath, path_order_cache=self.path_order_cache, num_frames=num_frames, **kwargs) else: raise NotImplementedError(f'Unknown decoder type {decoder}')
class LinearClassifierEvaluation(pl.LightningModule): 'Linear classifier evaluation for self-supervised learning.\n\n Args:\n trunk: Config to build a trunk.\n classifier: Config to build a classifier.\n optimizer: Config to build an optimizer.\n pretrained_trunk_path: Path to the pretrained trunk file.\n trunk_pattern: Pattern to retrieve the trunk model in checkpoint state_dict and delete the key.\n train_transform: Config to perform transformation on train input.\n val_transform: Config to perform transformation on val input.\n test_transform: Config to perform transformation on test input.\n val_time_augmentation: If not ``None``, ensembling method for test time augmentation used at validation.\n test_time_augmentation: If not ``None``, ensembling method for test time augmentation used at test.\n ' def __init__(self, trunk: DictConfig, classifier: DictConfig, optimizer: DictConfig, pretrained_trunk_path: str, trunk_pattern: str='^(trunk\\.)', train_transform: Optional[DictConfig]=None, val_transform: Optional[DictConfig]=None, test_transform: Optional[DictConfig]=None, val_time_augmentation: Optional[DictConfig]=None, test_time_augmentation: Optional[DictConfig]=None) -> None: super().__init__() self.save_hyperparameters() self.optimizer_cfg = optimizer trunk_state_dict = get_sub_state_dict_from_pl_ckpt(checkpoint_path=pretrained_trunk_path, pattern=trunk_pattern) trunk_state_dict = remove_pattern_in_keys_from_dict(d=trunk_state_dict, pattern=trunk_pattern) self.trunk = hydra.utils.instantiate(trunk) self.trunk.load_state_dict(trunk_state_dict) for param in self.trunk.parameters(): param.requires_grad = False self.classifier = hydra.utils.instantiate(classifier) self.train_transform = (hydra.utils.instantiate(train_transform) if (train_transform is not None) else None) self.val_transform = (hydra.utils.instantiate(val_transform) if (val_transform is not None) else None) self.test_transform = (hydra.utils.instantiate(test_transform) if (test_transform is not None) else None) self.val_time_augmentation = (get_test_time_augmentation_fn(**val_time_augmentation) if val_time_augmentation else None) self.test_time_augmentation = (get_test_time_augmentation_fn(**test_time_augmentation) if test_time_augmentation else None) @property def learnable_params(self) -> List[Parameter]: 'List of learnable parameters.' params = list(self.classifier.parameters()) return params @property def num_layers(self) -> int: 'Number of layers of the model.' return self.classifier.num_layers def get_param_layer_id(self, name: str) -> int: 'Get the layer id of the named parameter.\n\n Args:\n name: The name of the parameter.\n ' self.classifier.get_param_layer_id(name[len('classifier.'):]) @property def training_steps_per_epoch(self) -> Optional[int]: 'Total training steps inferred from datamodule and devices.' if (self.trainer.datamodule is not None): return (self.trainer.datamodule.train_num_samples // self.trainer.datamodule.train_global_batch_size) else: return None def on_fit_start(self) -> None: num_classes = self.trainer.datamodule.num_classes task = ('binary' if (num_classes <= 2) else 'multiclass') self.train_acc_1 = Accuracy(task=task, num_classes=num_classes, top_k=1).to(self.device) self.train_acc_5 = Accuracy(task=task, num_classes=num_classes, top_k=5).to(self.device) self.val_acc_1 = Accuracy(task=task, num_classes=num_classes, top_k=1).to(self.device) self.val_acc_5 = Accuracy(task=task, num_classes=num_classes, top_k=5).to(self.device) def on_test_start(self) -> None: num_classes = self.trainer.datamodule.num_classes task = ('binary' if (num_classes <= 2) else 'multiclass') self.test_acc_1 = Accuracy(task=task, num_classes=num_classes, top_k=1).to(self.device) self.test_acc_5 = Accuracy(task=task, num_classes=num_classes, top_k=5).to(self.device) def forward(self, x: Tensor) -> Dict[(str, Any)]: with torch.no_grad(): h = self.trunk(x) preds = self.classifier(h) return {'preds': preds, 'h': h} def configure_optimizers(self) -> Dict[(Any, Any)]: (optimizer, scheduler) = hydra.utils.instantiate(self.optimizer_cfg, num_steps_per_epoch=self.training_steps_per_epoch, model=self) if (scheduler is None): return optimizer return {'optimizer': optimizer, 'lr_scheduler': scheduler} def shared_step(self, x: Tensor): with torch.no_grad(): h = self.trunk(x) preds = self.classifier(h) return preds def on_train_epoch_start(self) -> None: self.trunk.eval() def training_step(self, batch: Tensor, batch_idx: int) -> Tensor: (x, targets) = (batch['input'], batch['label']) if (self.train_transform is not None): with torch.no_grad(): with torch.cuda.amp.autocast(enabled=False): x = self.train_transform(x) preds = self.shared_step(x) loss = nn.functional.cross_entropy(preds, targets) acc_1 = self.train_acc_1(preds, targets) acc_5 = self.train_acc_5(preds, targets) self.log('train/loss', loss, on_epoch=True) self.log('train/acc_1', acc_1, on_epoch=True, prog_bar=True) self.log('train/acc_5', acc_5, on_epoch=True) return loss def validation_step(self, batch: Tensor, batch_idx: int) -> Tensor: if (self.val_time_augmentation is not None): (x, targets, ids) = (batch['input'], batch['label'], batch['idx']) if (self.val_transform is not None): with torch.no_grad(): with torch.cuda.amp.autocast(enabled=False): x = self.val_transform(x) preds = self.shared_step(x) preds = preds.softmax((- 1)) (preds, targets, ids) = self.val_time_augmentation(preds, targets, ids) else: (x, targets) = (batch['input'], batch['label']) if (self.val_transform is not None): with torch.no_grad(): with torch.cuda.amp.autocast(enabled=False): x = self.val_transform(x) preds = self.shared_step(x) loss = nn.functional.cross_entropy(preds, targets) self.val_acc_1(preds, targets) self.val_acc_5(preds, targets) self.log('val/loss', loss) self.log('val/acc_1', self.val_acc_1, prog_bar=True) self.log('val/acc_5', self.val_acc_5) return loss def test_step(self, batch: Tensor, batch_idx: int) -> Tensor: if (self.test_time_augmentation is not None): (x, targets, ids) = (batch['input'], batch['label'], batch['idx']) if (self.test_transform is not None): with torch.no_grad(): with torch.cuda.amp.autocast(enabled=False): x = self.test_transform(x) preds = self.shared_step(x) preds = preds.softmax((- 1)) (preds, targets, ids) = self.test_time_augmentation(preds, targets, ids) else: (x, targets) = (batch['input'], batch['label']) if (self.test_transform is not None): with torch.no_grad(): with torch.cuda.amp.autocast(enabled=False): x = self.test_transform(x) preds = self.shared_step(x) loss = nn.functional.cross_entropy(preds, targets) self.test_acc_1(preds, targets) self.test_acc_5(preds, targets) self.log('test/loss', loss) self.log('test/acc_1', self.test_acc_1, prog_bar=True) self.log('test/acc_5', self.test_acc_5) return loss
def compute_moco_loss(q: Tensor, k: Tensor, k_global: Tensor, use_keys: bool, queue: Tensor, temp: float=0.2, rank: int=0) -> Tensor: 'Compute the SCE loss.\n\n Args:\n q: The representations of the queries.\n k: The representations of the keys.\n k_global: The global representations of the keys.\n use_keys: Whether to use the non-positive elements from key.\n temp: Temperature applied to the query similarities.\n rank: Rank of the device for positive labels.\n\n Returns:\n The loss.\n ' batch_size = q.shape[0] if use_keys: labels = (torch.arange(batch_size, dtype=torch.long, device=q.device) + (batch_size * rank)) sim_k = torch.einsum('nc,mc->nm', [q, k_global]) if (queue is not None): sim_queue = torch.einsum('nc,ck->nk', [q, queue]) sim = torch.cat([sim_k, sim_queue], dim=1) else: sim = sim_k else: sim_k = torch.einsum('nc,nc->n', [q, k]).unsqueeze((- 1)) sim_queue = torch.einsum('nc,ck->nk', [q, queue]) sim = torch.cat([sim_k, sim_queue], dim=1) labels = torch.zeros(batch_size, dtype=torch.long, device=q.device) logits = (sim / temp) loss = nn.functional.cross_entropy(logits, labels) return loss
def compute_mocov3_loss(q: Tensor, k: Tensor, temp: float=1.0, rank: int=0) -> Tensor: 'Compute the MoCov3 loss.\n\n Args:\n q: The representations of the queries.\n k: The global representations of the keys.\n temp: Temperature for softmax.\n rank: Rank of the device for positive labels.\n\n Returns:\n The loss.\n ' batch_size = q.shape[0] labels = (torch.arange(batch_size, dtype=torch.long, device=q.device) + (batch_size * rank)) sim = torch.einsum('nc,mc->nm', [q, k]) logits = (sim / temp) loss = nn.functional.cross_entropy(logits, labels) return (loss * (2 * temp))
def compute_ressl_mask(batch_size: int, num_negatives: int, use_keys: bool=True, rank: int=0, world_size: int=1, device: Any='cpu') -> (Tensor | None): 'Precompute the mask for ReSSL.\n\n Args:\n batch_size: The local batch size.\n num_negatives: The number of negatives besides the non-positive key elements.\n use_keys: Whether to use the non-positive elements from the key as negatives.\n rank: Rank of the current process.\n world_size: Number of processes that perform training.\n device: Device that performs training.\n\n Returns:\n The mask.\n ' if (not use_keys): return None target_batch_size = (batch_size * world_size) labels = (torch.arange(batch_size, dtype=torch.long, device=device) + (batch_size * rank)) mask = nn.functional.one_hot(labels, (target_batch_size + num_negatives)) return mask
def compute_ressl_loss(q: Tensor, k: Tensor, k_global: Tensor, use_keys: bool, queue: Tensor, mask: (Tensor | None), temp: float=0.1, temp_m: float=0.04, LARGE_NUM: float=1000000000.0) -> Tensor: 'Compute the RESSL loss.\n\n Args:\n q: The representations of the queries.\n k: The representations of the keys.\n k_global: The global representations of the keys.\n use_keys: Whether to use the non-positive elements from key.\n queue: The queue of representations.\n mask: Mask of positives for the query.\n temp: Temperature applied to the query similarities.\n temp_m: Temperature applied to the keys similarities.\n LARGE_NUM: Large number to mask elements.\n\n Returns:\n The loss.\n ' batch_size = q.shape[0] if use_keys: sim_k_kglobal = torch.einsum('nc,kc->nk', [k, k_global]) sim_q_kglobal = torch.einsum('nc,kc->nk', [q, k_global]) else: sim_q_kglobal = torch.einsum('nc,nc->n', [q, k]).unsqueeze((- 1)) sim_k_kglobal = torch.zeros(batch_size, device=q.device).unsqueeze((- 1)) if (queue is not None): sim_k_queue = torch.einsum('nc,ck->nk', [k, queue]) sim_q_queue = torch.einsum('nc,ck->nk', [q, queue]) if use_keys: logits_k = torch.cat([sim_k_kglobal, sim_k_queue], dim=1) logits_q = torch.cat([sim_q_kglobal, sim_q_queue], dim=1) else: logits_k = sim_k_queue logits_q = sim_q_queue else: logits_k = sim_k_kglobal logits_q = sim_q_kglobal if use_keys: logits_k -= (LARGE_NUM * mask) logits_q -= (LARGE_NUM * mask) logits_q /= temp logits_k /= temp_m loss = (- torch.sum((nn.functional.softmax(logits_k.detach(), dim=1) * nn.functional.log_softmax(logits_q, dim=1)), dim=1).mean(dim=0)) return loss
def compute_sce_mask(batch_size: int, num_negatives: int, use_keys: bool=True, rank: int=0, world_size: int=1, device: Any='cpu') -> Tensor: 'Precompute the mask for SCE.\n\n Args:\n batch_size: The local batch size.\n num_negatives: The number of negatives besides the non-positive key elements.\n use_keys: Whether to use the non-positive elements from the key as negatives.\n rank: Rank of the current process.\n world_size: Number of processes that perform training.\n device: Device that performs training.\n\n Returns:\n The mask.\n ' if use_keys: target_batch_size = (batch_size * world_size) labels = (torch.arange(batch_size, dtype=torch.long, device=device) + (batch_size * rank)) else: labels = torch.zeros(batch_size, dtype=torch.long, device=device) target_batch_size = 1 mask = nn.functional.one_hot(labels, (target_batch_size + num_negatives)) return mask
def compute_sce_loss(q: Tensor, k: Tensor, k_global: Tensor, use_keys: bool, queue: Tensor, mask: Tensor, coeff: Tensor, temp: float=0.1, temp_m: float=0.07, LARGE_NUM: float=1000000000.0) -> Tensor: 'Compute the SCE loss.\n\n Args:\n q: The representations of the queries.\n k: The representations of the keys.\n k_global: The global representations of the keys.\n use_keys: Whether to use the non-positive elements from key.\n queue: The queue of representations.\n mask: Mask of positives for the query.\n coeff: Coefficient between the contrastive and relational aspects.\n temp: Temperature applied to the query similarities.\n temp_m: Temperature applied to the keys similarities.\n LARGE_NUM: Large number to mask elements.\n\n Returns:\n The loss.\n ' batch_size = q.shape[0] if use_keys: sim_k_kglobal = torch.einsum('nc,kc->nk', [k, k_global]) sim_q_kglobal = torch.einsum('nc,kc->nk', [q, k_global]) else: sim_q_kglobal = torch.einsum('nc,nc->n', [q, k]).unsqueeze((- 1)) sim_k_kglobal = torch.zeros(batch_size, device=q.device).unsqueeze((- 1)) if (queue is not None): sim_k_queue = torch.einsum('nc,ck->nk', [k, queue]) sim_q_queue = torch.einsum('nc,ck->nk', [q, queue]) logits_k = torch.cat([sim_k_kglobal, sim_k_queue], dim=1) logits_q = torch.cat([sim_q_kglobal, sim_q_queue], dim=1) else: logits_k = sim_k_kglobal logits_q = sim_q_kglobal if use_keys: logits_k -= (LARGE_NUM * mask) logits_q /= temp logits_k /= temp_m prob_k = nn.functional.softmax(logits_k, dim=1) prob_q = nn.functional.normalize(((coeff * mask) + ((1 - coeff) * prob_k)), p=1, dim=1) loss = (- torch.sum((prob_q * nn.functional.log_softmax(logits_q, dim=1)), dim=1).mean(dim=0)) return loss
class DummyModel(EztorchBaseModule, ABC): 'Dummy model to perform test such as profiling dataloading.\n\n Args:\n input_shape: The input shape of the data.\n transform: The configuration of a transform to apply to the data.\n ' def __init__(self, input_shape: int, transform: Optional[DictConfig]=None) -> None: super().__init__() self.transform = (hydra.utils.instantiate(transform) if (transform is not None) else None) self.save_hyperparameters() input_dim = math.prod(input_shape) self.layer = nn.Linear(input_dim, 1) def configure_optimizers(self) -> Dict[(Any, Any)]: return torch.optim.Adam(self.parameters(), 0.0001) def forward(self, x: Tensor) -> Tensor: x = torch.flatten(x, 1) x = self.layer(x) return x def training_step(self, batch: Iterable[Any], batch_idx: int): x = batch['input'] if (self.transform is not None): with torch.no_grad(): with torch.cuda.amp.autocast(enabled=False): x = self.transform(x) if (type(x) is list): pred = self.forward(x[0]) else: pred = self.forward(x) return torch.nn.functional.binary_cross_entropy_with_logits(pred, torch.ones_like(pred))
class EztorchBaseModule(pl.LightningModule): def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) def configure_gradient_clipping(self, optimizer: Optimizer, gradient_clip_val: ((int | float) | None)=None, gradient_clip_algorithm: (str | None)=None) -> None: return self.clip_gradients(optimizer, gradient_clip_val, gradient_clip_algorithm) def clip_gradients(self, optimizer: Optimizer, gradient_clip_val: Optional[Union[(int, float)]]=None, gradient_clip_algorithm: Optional[str]=None) -> None: 'Handles gradient clipping internally.\n\n Note:\n - Do not override this method. If you want to customize gradient clipping, consider using\n :meth:`configure_gradient_clipping` method.\n - For manual optimization (``self.automatic_optimization = False``), if you want to use\n gradient clipping, consider calling\n ``self.clip_gradients(opt, gradient_clip_val=0.5, gradient_clip_algorithm="norm")``\n manually in the training step.\n\n Args:\n optimizer: Current optimizer being used.\n gradient_clip_val: The value at which to clip gradients.\n gradient_clip_algorithm: The gradient clipping algorithm to use. Pass ``gradient_clip_algorithm="value"``\n to clip by value, and ``gradient_clip_algorithm="norm"`` to clip by norm.\n ' if (self.fabric is not None): self.fabric.clip_gradients(self, optimizer, clip_val=(gradient_clip_val if (gradient_clip_algorithm == GradClipAlgorithmType.VALUE) else None), max_norm=(None if (gradient_clip_algorithm == GradClipAlgorithmType.VALUE) else gradient_clip_val), error_if_nonfinite=False) return if (gradient_clip_val is None): gradient_clip_val = (self.trainer.gradient_clip_val or 0.0) elif ((self.trainer.gradient_clip_val is not None) and (self.trainer.gradient_clip_val != gradient_clip_val)): raise MisconfigurationException(f'You have set `Trainer(gradient_clip_val={self.trainer.gradient_clip_val!r})` and have passed `clip_gradients(gradient_clip_val={gradient_clip_val!r})`. Please use only one of them.') if (gradient_clip_algorithm is None): gradient_clip_algorithm = (self.trainer.gradient_clip_algorithm or 'norm') else: gradient_clip_algorithm = gradient_clip_algorithm.lower() if ((self.trainer.gradient_clip_algorithm is not None) and (self.trainer.gradient_clip_algorithm != gradient_clip_algorithm)): raise MisconfigurationException(f'You have set `Trainer(gradient_clip_algorithm={self.trainer.gradient_clip_algorithm.value!r})` and have passed `clip_gradients(gradient_clip_algorithm={gradient_clip_algorithm!r}) Please use only one of them.') if (not isinstance(gradient_clip_val, (int, float))): raise TypeError(f'`gradient_clip_val` should be an int or a float. Got {gradient_clip_val}.') if (not GradClipAlgorithmType.supported_type(gradient_clip_algorithm.lower())): raise MisconfigurationException(f'`gradient_clip_algorithm` {gradient_clip_algorithm} is invalid. Allowed algorithms: {GradClipAlgorithmType.supported_types()}.') gradient_clip_algorithm = GradClipAlgorithmType(gradient_clip_algorithm) self.trainer.precision_plugin.clip_gradients(optimizer, gradient_clip_val, gradient_clip_algorithm)
class FinetuningModel(EztorchBaseModule): 'Fine-tuning training.\n\n Args:\n trunk: Config to build a trunk.\n classifier: Config to build a classifier.\n optimizer: Config to build an optimizer for trunk.\n pretrained_trunk_path: Path to the pretrained trunk file.\n trunk_pattern: Pattern to retrieve the trunk model in checkpoint state_dict and delete the key.\n two_groups: If ``True``, use two groups of parameters for optimizer, the trunk and the head.\n freeze_trunk: If ``True``, freeze the trunk.\n train_transform: Config to perform transformation on train input.\n val_transform: Config to perform transformation on val input.\n test_transform: Config to perform transformation on test input.\n val_time_augmentation: Ensembling method for test time augmentation used at validation.\n test_time_augmentation: Ensembling method for test time augmentation used at test.\n update_bn_momentum: If ``True`` update batch norm statistics according to :math:`max(1 - 10/steps\\_per\\_epoch, 0.9)`.\n freeze_bn_layers: If ``True``, freeze the batch norm layers.\n\n Example::\n\n trunk = {...} # config to build a trunk\n classifier = {...} # config to build a classifier\n optimizer = {...} # config to build an optimizer\n pretrained_trunk_path = ... # path where the trunk has been saved\n\n model = FinetuningModel(trunk, classifier, optimizer, pretrained_trunk_path)\n ' def __init__(self, trunk: DictConfig, classifier: DictConfig, optimizer: DictConfig, pretrained_trunk_path: str, trunk_pattern: str='^(trunk\\.)', two_groups: bool=False, freeze_trunk: bool=False, train_transform: Optional[DictConfig]=None, val_transform: Optional[DictConfig]=None, test_transform: Optional[DictConfig]=None, val_time_augmentation: Optional[DictConfig]=None, test_time_augmentation: Optional[DictConfig]=None, update_bn_momentum: bool=False, freeze_bn_layers: bool=False) -> None: super().__init__() self.save_hyperparameters() self.optimizer_cfg = optimizer self.two_groups = two_groups self.freeze_trunk = freeze_trunk trunk_state_dict = get_sub_state_dict_from_pl_ckpt(checkpoint_path=pretrained_trunk_path, pattern=trunk_pattern) trunk_state_dict = remove_pattern_in_keys_from_dict(d=trunk_state_dict, pattern=trunk_pattern) self.trunk = hydra.utils.instantiate(trunk) self.trunk.load_state_dict(trunk_state_dict) self.classifier = hydra.utils.instantiate(classifier) self.train_transform = (hydra.utils.instantiate(train_transform) if (train_transform is not None) else None) self.val_transform = (hydra.utils.instantiate(val_transform) if (val_transform is not None) else None) self.test_transform = (hydra.utils.instantiate(test_transform) if (test_transform is not None) else None) self.val_time_augmentation = (get_test_time_augmentation_fn(**val_time_augmentation) if val_time_augmentation else None) self.test_time_augmentation = (get_test_time_augmentation_fn(**test_time_augmentation) if test_time_augmentation else None) self.update_bn_momentum = update_bn_momentum self.freeze_bn_layers = freeze_bn_layers if self.freeze_trunk: for param in self.trunk.parameters(): param.requires_grad = False if self.freeze_bn_layers: for module in self.modules(): if (module in _BN_LAYERS): for param in module.parameters(): param.requires_grad = False @property def learnable_params(self) -> List[Parameter]: 'Learnable parameters of the model.' params = [] if (not self.freeze_trunk): params.extend(self.trunk.parameters()) params.extend(self.classifier.parameters()) return params @property def num_layers(self) -> int: 'Number of layers of the model.' return (self.trunk.num_layers + self.classifier.num_layers) def get_param_layer_id(self, name: str) -> int: 'Get the layer id of the named parameter.\n\n Args:\n name: The name of the parameter.\n ' if name.startswith('trunk.'): return self.trunk.get_param_layer_id(name[len('trunk.'):]) elif name.startswith('classifier.'): return (self.trunk.num_layers + self.classifier.get_param_layer_id(name[len('classifier.'):])) else: raise NotImplementedError(f'{name} should not have been used.') @property def training_steps_per_epoch(self) -> Optional[int]: 'Total training steps inferred from datamodule and devices.' if ((self.trainer.datamodule is not None) and (self.trainer.datamodule.train_num_samples > 0)): return (self.trainer.datamodule.train_num_samples // self.trainer.datamodule.train_global_batch_size) else: return None def on_fit_start(self) -> None: if self.update_bn_momentum: new_value = max((1 - (10 / self.training_steps_per_epoch)), 0.9) rank_zero_info(f'Update Batch normalization momentum value to {new_value}.') for module in self.modules(): if (module in _BN_LAYERS): module.momentum = new_value num_classes = self.trainer.datamodule.num_classes task = ('binary' if (num_classes <= 2) else 'multiclass') self.train_acc_1 = Accuracy(task=task, num_classes=num_classes, top_k=1).to(self.device) self.train_acc_5 = Accuracy(task=task, num_classes=num_classes, top_k=5).to(self.device) self.val_acc_1 = Accuracy(task=task, num_classes=num_classes, top_k=1).to(self.device) self.val_acc_5 = Accuracy(task=task, num_classes=num_classes, top_k=5).to(self.device) def on_test_start(self) -> None: num_classes = self.trainer.datamodule.num_classes task = ('binary' if (num_classes <= 2) else 'multiclass') self.test_acc_1 = Accuracy(task=task, num_classes=num_classes, top_k=1).to(self.device) self.test_acc_5 = Accuracy(task=task, num_classes=num_classes, top_k=5).to(self.device) def on_train_epoch_start(self) -> None: if self.freeze_trunk: self.trunk.eval() if self.freeze_bn_layers: for module in self.modules(): if (module in _BN_LAYERS): module.eval() def forward(self, x: Tensor) -> Dict[(str, Any)]: h = self.trunk(x) preds = self.classifier(h) return {'preds': preds, 'h': h} def configure_optimizers(self) -> Dict[(Any, Any)]: if self.two_groups: (optimizer, scheduler) = hydra.utils.instantiate(self.optimizer_cfg, num_steps_per_epoch=self.optimizer_cfg.get('num_steps_per_epoch', self.training_steps_per_epoch), model1=self.trunk, model2=self.classifier) else: (optimizer, scheduler) = hydra.utils.instantiate(self.optimizer_cfg, num_steps_per_epoch=self.optimizer_cfg.get('num_steps_per_epoch', self.training_steps_per_epoch), model=self) if (scheduler is None): return optimizer return {'optimizer': optimizer, 'lr_scheduler': scheduler} def shared_step(self, x: Tensor): if self.freeze_trunk: with torch.no_grad(): h = self.trunk(x) else: h = self.trunk(x) preds = self.classifier(h) return preds def training_step(self, batch: Tensor, batch_idx: int) -> Tensor: (x, targets) = (batch['input'], batch['label']) if (self.train_transform is not None): with torch.no_grad(): with torch.cuda.amp.autocast(enabled=False): x = self.train_transform(x) preds = self.shared_step(x) loss = nn.functional.cross_entropy(preds, targets) acc_1 = self.train_acc_1(preds, targets) acc_5 = self.train_acc_5(preds, targets) self.log('train/loss', loss, on_epoch=True) self.log('train/acc_1', acc_1, on_epoch=True, prog_bar=True) self.log('train/acc_5', acc_5, on_epoch=True) return loss def validation_step(self, batch: Tensor, batch_idx: int) -> Tensor: if (self.val_time_augmentation is not None): (x, targets, idx) = (batch['input'], batch['label'], batch['idx']) if (self.val_transform is not None): with torch.no_grad(): with torch.cuda.amp.autocast(enabled=False): x = self.val_transform(x) preds = self.shared_step(x) preds = preds.softmax((- 1)) (preds, targets, idx) = self.val_time_augmentation(preds, targets, idx) else: (x, targets) = (batch['input'], batch['label']) if (self.val_transform is not None): with torch.no_grad(): with torch.cuda.amp.autocast(enabled=False): x = self.val_transform(x) preds = self.shared_step(x) loss = nn.functional.cross_entropy(preds, targets) self.val_acc_1(preds, targets) self.val_acc_5(preds, targets) self.log('val/loss', loss) self.log('val/acc_1', self.val_acc_1, prog_bar=True) self.log('val/acc_5', self.val_acc_5) return loss def test_step(self, batch: Tensor, batch_idx: int) -> Tensor: if (self.test_time_augmentation is not None): (x, targets, idx) = (batch['input'], batch['label'], batch['idx']) if (self.test_transform is not None): with torch.no_grad(): with torch.cuda.amp.autocast(enabled=False): x = self.test_transform(x) preds = self.shared_step(x) preds = preds.softmax((- 1)) (preds, targets, idx) = self.test_time_augmentation(preds, targets, idx) else: (x, targets) = (batch['input'], batch['label']) if (self.test_transform is not None): with torch.no_grad(): with torch.cuda.amp.autocast(enabled=False): x = self.test_transform(x) preds = self.shared_step(x) loss = nn.functional.cross_entropy(preds, targets) self.test_acc_1(preds, targets) self.test_acc_5(preds, targets) self.log('test/loss', loss) self.log('test/acc_1', self.test_acc_1, prog_bar=True) self.log('test/acc_5', self.test_acc_5) return loss
class LinearHead(Module): 'Build a Linear head with optional dropout and normalization.\n\n Args:\n affine: Use affine in normalization layer.\n bias: Use bias in linear layer. If ``norm_layer``, set to ``False``.\n dropout: Dropout probability, if :math:`0`, no dropout layer.\n dropout_inplace: Use inplace operation in dropout.\n input_dim: Input dimension for the linear head.\n norm_layer: Normalization layer after the linear layer, if ``str`` lookup for the module in ``_BN_LAYERS`` dictionary.\n output_dim: Output dimension for the linear head.\n init_normal: If ``True``, make normal initialization for linear layer.\n init_mean: Mean for the initialization.\n init_std: STD for the initialization.\n zero_bias: If ``True``, put zeros to bias for the initialization.\n\n Raises:\n NotImplementedError: If ``norm_layer`` is not supported.\n ' def __init__(self, affine: bool=True, bias: bool=True, dropout: float=0.0, dropout_inplace: bool=False, input_dim: int=2048, norm_layer: Optional[Union[(str, Module)]]=None, output_dim: int=1000, init_normal: bool=True, init_mean: float=0.0, init_std: float=0.01, zero_bias: bool=True) -> None: super().__init__() if (norm_layer is not None): norm = True if (type(norm_layer) is str): norm_layer = _BN_LAYERS[norm_layer] if (norm_layer in [BatchNorm2d, BatchNorm3d]): raise NotImplementedError('{norm_layer} not supported in LinearHead') else: norm = False layers = [] if (dropout > 0.0): layers.append(Dropout(p=dropout, inplace=dropout_inplace)) linear_layer = Linear(input_dim, output_dim, bias=(bias and (not norm))) if init_normal: linear_layer.weight.data.normal_(mean=init_mean, std=init_std) if (zero_bias and (bias and (not norm))): linear_layer.bias.data.zero_() layers.append(linear_layer) if norm: layers.append(norm_layer(num_features=output_dim, affine=affine)) self.layers = Sequential(*layers) def forward(self, x: Tensor) -> Tensor: return self.layers(x) @property def num_layers(self) -> int: 'Number of layers of the model.' return 1 def get_param_layer_id(self, name: str) -> int: 'Get the layer id of the named parameter.\n\n Args:\n name: The name of the parameter.\n ' return 0 @property def learnable_params(self) -> List[Parameter]: 'List of learnable parameters.' params = list(self.parameters()) return params
class Linear3DHead(Module): '\n Linear 3D head. This layer performs an optional pooling operation followed by an\n optional dropout, a fully-connected projection, an optional activation layer and a\n global spatiotemporal averaging.\n\n ::\n\n Pool3d\n ↓\n Normalization\n ↓\n Dropout\n ↓\n Projection\n\n Args:\n in_features: Input channel size of the resnet head.\n pool: Pooling module.\n dropout: Dropout module.\n bn: Batch normalization module.\n proj: Project module.\n norm: If ``True``, normalize features along first dimension.\n init_std: Init std for weights from pytorchvideo.\n view: If ``True``, apply reshape view to :math:`(-1, num features)`.\n ' def __init__(self, input_dim: int, pool: Module=None, dropout: Module=None, bn: Module=None, proj: Module=None, norm: bool=False, init_std: float=0.01, view: bool=True) -> None: super().__init__() self.input_dim = input_dim self.pool = pool self.dropout = dropout self.bn = bn self.proj = proj self.norm = norm self.view = view self.init_std = init_std self._init_weights() def forward(self, x: Tensor) -> Tensor: if (self.pool is not None): x = self.pool(x) if self.view: x = x.view((- 1), self.input_dim) if self.norm: x = F.normalize(x) if (self.bn is not None): x = self.bn(x) if (self.dropout is not None): x = self.dropout(x) if (self.proj is not None): x = self.proj(x) return x def _init_weights(self): for (name, param) in self.proj.named_parameters(): if ('bias' in name): nn.init.constant_(param, 0.0) elif ('weight' in name): nn.init.normal_(param, mean=0.0, std=self.init_std) if (self.bn is not None): self.bn.weight.data.fill_(1) self.bn.bias.data.zero_() @property def num_layers(self) -> int: 'Number of layers of the model.' return 1 def get_param_layer_id(self, name: str) -> int: 'Get the layer id of the named parameter.\n\n Args:\n name: The name of the parameter.\n ' return 0 @property def learnable_params(self) -> List[Parameter]: 'List of learnable parameters.' params = list(self.parameters()) return params
def create_linear3d_head(*, in_features: int, num_classes: int=400, bn: Union[(str, Callable)]=None, norm: bool=False, pool: Union[(str, Callable)]=AvgPool3d, pool_kernel_size: Tuple[int]=(1, 7, 7), pool_stride: Tuple[int]=(1, 1, 1), pool_padding: Tuple[int]=(0, 0, 0), output_size: Tuple[int]=(1, 1, 1), dropout_rate: float=0.5, view: bool=True) -> Module: '\n Creates ResNet basic head. This layer performs an optional pooling operation\n followed by an optional dropout, a fully-connected projection, an activation layer\n and a global spatiotemporal averaging.\n\n ::\n\n Pool3d\n ↓\n Normalization\n ↓\n Dropout\n ↓\n Projection\n\n Pool3d examples include: AvgPool3d, MaxPool3d, AdaptiveAvgPool3d, and None.\n\n Args:\n\n in_features: Input channel size of the resnet head.\n num_classes: Output channel size of the resnet head.\n bn: A callable that constructs a batch norm layer.\n norm: If ``True``, normalize features along first dimension.\n pool: A callable that constructs resnet head pooling layer,\n examples include: ``nn.AvgPool3d``, ``nn.MaxPool3d``, ``nn.AdaptiveAvgPool3d``, and\n ``None`` (not applying pooling).\n pool_kernel_size: Pooling kernel size(s) when not using adaptive\n pooling.\n pool_stride: Pooling stride size(s) when not using adaptive pooling.\n pool_padding: Pooling padding size(s) when not using adaptive\n pooling.\n output_size: Spatial temporal output size when using adaptive\n pooling.\n dropout_rate: Dropout rate.\n view: Whether to apply reshape view to :math:`(-1, num\\ features)`.\n\n ' if (type(pool) is str): pool = _POOL_LAYERS[pool] if (type(bn) is str): bn = _BN_LAYERS[bn] if (bn is not None): bn = bn(in_features) if (pool is None): pool_model = None elif (pool == AdaptiveAvgPool3d): pool_model = pool(output_size) else: pool_model = pool(kernel_size=pool_kernel_size, stride=pool_stride, padding=pool_padding) proj = Linear(in_features, num_classes) return Linear3DHead(in_features, bn=bn, norm=norm, proj=proj, pool=pool_model, dropout=(Dropout(dropout_rate) if (dropout_rate > 0) else None), view=view)
class MLPHead(Module): 'Build a MLP head with optional dropout and normalization.\n\n Args:\n activation_inplace: Inplace operation for activation layers.\n activation_layer: Activation layer, if str lookup for the module in ``_ACTIVATION_LAYERS`` dictionary.\n affine: If ``True``, use affine in normalization layer.\n bias: If ``True``, use bias in linear layer. If ``norm_layer``, set to ``False``.\n dropout: Dropout probability, if :math:`0`, no dropout layer.\n dropout_inplace: If ``True``, use inplace operation in dropout.\n hidden_dims: dimension of the hidden layers :math:`(num\\_layers - 1)`. If int, used for all hidden layers.\n input_dim: Input dimension for the MLP head.\n norm_layer: Normalization layer after the linear layer, if str lookup for the module in ``_BN_LAYERS`` dictionary.\n num_layers: Number of layers :math:`(number\\ of\\ hidden\\ layers + 1)`.\n last_bias: If ``True``, use bias in output layer. If ``last_norm`` and ``norm_layer`` set to ``False``.\n last_norm: If ``True``, Apply normalization to the last layer if ``norm_layer``.\n last_affine: If ``True``, use affine in output normalization layer.\n output_dim: Output dimension for the MLP head.\n last_init_normal: If ``True``, make normal initialization for last layer.\n init_mean: Mean for the last initialization.\n init_std: STD for the last initialization.\n zero_bias: If ``True``, put zeros to bias for the last initialization.\n\n Raises:\n NotImplementedError: If ``norm_layer`` is not supported.\n ' def __init__(self, activation_inplace: bool=True, activation_layer: Union[(str, Module)]=ReLU, affine: bool=True, bias: bool=True, dropout: Union[(float, Iterable[float])]=0.0, dropout_inplace: bool=False, hidden_dims: Union[(int, Iterable[int])]=2048, input_dim: int=2048, norm_layer: Optional[Union[(str, Module)]]=None, num_layers: int=2, last_bias: bool=True, last_norm: bool=False, last_affine: bool=False, output_dim: int=128, last_init_normal: bool=False, init_mean: float=0.0, init_std: float=0.01, zero_bias: bool=True) -> None: super().__init__() if (type(dropout) is float): dropout = ([dropout] * num_layers) if (type(activation_layer) is str): activation_layer = _ACTIVATION_LAYERS[activation_layer] if (type(hidden_dims) is int): hidden_dims = ([hidden_dims] * (num_layers - 1)) if (norm_layer is not None): norm = True if (type(norm_layer) is str): norm_layer = _BN_LAYERS[norm_layer] if (norm_layer not in [BatchNorm1d, SyncBatchNorm]): raise NotImplementedError('{norm_layer} not supported in MLPHead') else: norm = False assert (len(hidden_dims) == (num_layers - 1)) assert (len(dropout) == num_layers) layers = [] for i in range(num_layers): dim_in = (input_dim if (i == 0) else hidden_dims[(i - 1)]) dim_out = (output_dim if (i == (num_layers - 1)) else hidden_dims[i]) use_norm = (True if (norm and ((i < (num_layers - 1)) or (last_norm and (i == (num_layers - 1))))) else False) use_affine = (True if ((affine and (i < (num_layers - 1))) or (last_affine and (i == (num_layers - 1)))) else False) use_bias = (True if (((bias and (i < (num_layers - 1))) or (last_bias and (i == (num_layers - 1)))) and (not use_norm)) else False) use_activation = (True if (i < (num_layers - 1)) else False) if (dropout[i] > 0.0): layers.append(Dropout(p=dropout[i], inplace=dropout_inplace)) layers.append(Linear(dim_in, dim_out, bias=(use_bias and (not use_norm)))) if ((i == (num_layers - 1)) and last_init_normal): layers[(- 1)].weight.data.normal_(mean=init_mean, std=init_std) if (zero_bias and use_bias): layers[(- 1)].bias.data.zero_() if use_norm: layers.append(norm_layer(num_features=dim_out, affine=use_affine)) if use_activation: if activation_inplace: layers.append(activation_layer(inplace=True)) else: layers.append(activation_layer()) self.layers = Sequential(*layers) def forward(self, x: Tensor) -> Tensor: return self.layers(x) @property def learnable_params(self) -> List[Parameter]: 'List of learnable parameters.' params = list(self.parameters()) return params @property def num_layers(self) -> int: 'Number of layers of the model.' return len(self.layers) def get_param_layer_id(self, name: str) -> int: 'Get the layer id of the named parameter.\n\n Args:\n name: The name of the parameter.\n ' if name.startswith('layers.'): layer_id = int(name.split('.')[1]) return layer_id else: return (self.num_layers - 1)
class VideoResNetHead(Module): '\n ResNet basic head. This layer performs an optional pooling operation followed by an\n optional dropout, a fully-connected projection, an optional activation layer and a\n global spatiotemporal averaging.\n\n ::\n\n Pool3d\n ↓\n Dropout\n ↓\n Projection\n ↓\n Activation\n ↓\n Averaging\n\n The builder can be found in :func:`create_video_resnet_head`.\n\n Args:\n pool: Pooling module.\n dropout: dropout module.\n proj: project module.\n activation: activation module.\n output_pool: pooling module for output.\n init_std: init std for weights from pytorchvideo.\n ' def __init__(self, pool: Module=None, dropout: Module=None, proj: Module=None, activation: Module=None, output_pool: Module=None, init_std: float=0.01) -> None: super().__init__() self.pool = pool self.dropout = dropout self.proj = proj self.activation = activation self.output_pool = output_pool self.do_proj = ((proj is not None) and (type(proj) is not Identity)) init_net_weights(self, init_std, 'resnet') def forward(self, x: Tensor) -> Tensor: if (self.pool is not None): x = self.pool(x) if (self.dropout is not None): x = self.dropout(x) if self.do_proj: x = x.permute((0, 2, 3, 4, 1)) x = self.proj(x) x = x.permute((0, 4, 1, 2, 3)) if (self.activation is not None): x = self.activation(x) if (self.output_pool is not None): x = self.output_pool(x) x = x.view(x.shape[0], (- 1)) return x @property def num_layers(self) -> int: 'Number of layers of the model.' return 1 def get_param_layer_id(self, name: str) -> int: 'Get the layer id of the named parameter.\n\n Args:\n name: The name of the parameter.\n ' return 0 @property def learnable_params(self) -> List[Parameter]: 'List of learnable parameters.' params = list(self.parameters()) return params
class VideoResNetTemporalHead(Module): 'ResNet basic head for keeping temporal dimension. This layer performs an initial temporal pooling and\n reshape the output.\n\n Args:\n init_std: init std for weights from pytorchvideo.\n ' def __init__(self, init_std: float=0.01) -> None: super().__init__() init_net_weights(self, init_std, 'resnet') def forward(self, x: Tensor) -> Tensor: (b, d, t, h, w) = x.shape x = torch.nn.functional.adaptive_avg_pool3d(x, [t, 1, 1]) x = self.view(b, t, d) return x
def create_video_resnet_head(*, in_features: int, num_classes: int=400, pool: Union[(str, Callable)]=AvgPool3d, output_size: Tuple[int]=(1, 1, 1), pool_kernel_size: Tuple[int]=(1, 7, 7), pool_stride: Tuple[int]=(1, 1, 1), pool_padding: Tuple[int]=(0, 0, 0), dropout_rate: float=0.5, activation: Optional[Union[(str, Callable)]]=None, output_with_global_average: bool=True) -> Module: '\n Creates ResNet basic head. This layer performs an optional pooling operation\n followed by an optional dropout, a fully-connected projection, an activation layer\n and a global spatiotemporal averaging.\n\n ::\n\n\n Pooling\n ↓\n Dropout\n ↓\n Projection\n ↓\n Activation\n ↓\n Averaging\n\n Activation examples include: ``ReLU``, ``Softmax``, ``Sigmoid``, and ``None``.\n Pool3d examples include: ``AvgPool3d``, ``MaxPool3d``, ``AdaptiveAvgPool3d``, and ``None``.\n\n Args:\n\n in_features: Input channel size of the resnet head.\n num_classes: Output channel size of the resnet head.\n pool: A callable that constructs resnet head pooling layer,\n examples include: ``AvgPool3d``, ``MaxPool3d``, ``AdaptiveAvgPool3d``, and\n ``None`` (not applying pooling).\n pool_kernel_size: Pooling kernel size(s) when not using adaptive\n pooling.\n pool_stride: Pooling stride size(s) when not using adaptive pooling.\n pool_padding: Pooling padding size(s) when not using adaptive\n pooling.\n output_size: Spatial temporal output size when using adaptive\n pooling.\n\n activation: A callable that constructs resnet head activation\n layer, examples include: ``ReLU``, ``Softmax``, ``Sigmoid``, and ``None`` (not\n applying activation).\n\n dropout_rate: Dropout rate.\n\n output_with_global_average: If ``True``, perform global averaging on temporal\n and spatial dimensions and reshape output to :math:`batch\\_size \times out\\_features`.\n ' if (type(activation) is str): activation = _ACTIVATION_LAYERS[activation] if (type(pool) is str): pool = _POOL_LAYERS[pool] if (activation is None): activation_model = None elif (activation == Softmax): activation_model = activation(dim=1) else: activation_model = activation() if (pool is None): pool_model = None elif (pool == AdaptiveAvgPool3d): pool_model = pool(output_size) else: pool_model = pool(kernel_size=pool_kernel_size, stride=pool_stride, padding=pool_padding) if output_with_global_average: output_pool = AdaptiveAvgPool3d(1) else: output_pool = None if (num_classes > 0): proj = Linear(in_features, num_classes) else: proj = None return VideoResNetHead(proj=proj, activation=activation_model, pool=pool_model, dropout=(Dropout(dropout_rate) if (dropout_rate > 0) else None), output_pool=output_pool)
def create_video_resnet_temporal_head() -> Module: 'Creates ResNet basic head for keeping temporal dimension.\n\n This layer performs an initial temporal pooling and reshape the output.\n ' return VideoResNetTemporalHead()
class SiameseBaseModel(EztorchBaseModule, ABC): 'Abstract class to represent siamese models.\n\n Subclasses should implement training_step method.\n\n Args:\n trunk: Config to build a trunk.\n optimizer: Config to build optimizers and schedulers.\n projector: Config to build a project.\n predictor: Config to build a predictor.\n train_transform: Config to perform transformation on train input.\n val_transform: Config to perform transformation on val input.\n test_transform: Config to perform transformation on test input.\n normalize_outputs: If ``True``, normalize outputs.\n num_global_crops: Number of global crops which are the first elements\n of each batch.\n num_local_crops: Number of local crops which are the last elements\n of each batch.\n num_splits: Number of splits to apply to each crops.\n num_splits_per_combination: Number of splits used for combinations of features of each split.\n mutual_pass: If ``True``, perform one pass per crop resolution.\n ' def __init__(self, trunk: DictConfig, optimizer: DictConfig, projector: Optional[DictConfig]=None, predictor: Optional[DictConfig]=None, train_transform: Optional[DictConfig]=None, val_transform: Optional[DictConfig]=None, test_transform: Optional[DictConfig]=None, normalize_outputs: bool=True, num_global_crops: int=2, num_local_crops: int=0, num_splits: int=0, num_splits_per_combination: int=2, mutual_pass: bool=False) -> None: super().__init__() assert (not (mutual_pass and (num_splits > 0))), 'mutual_pass is not supported with num_splits > 0.' self.save_hyperparameters() self.trunk = hydra.utils.instantiate(trunk) self.projector = (hydra.utils.instantiate(projector) if (projector is not None) else None) self.predictor = (hydra.utils.instantiate(predictor) if ((predictor is not None) and (self.projector is not None)) else None) self.train_transform = (hydra.utils.instantiate(train_transform) if (train_transform is not None) else None) self.val_transform = (hydra.utils.instantiate(val_transform) if (val_transform is not None) else None) self.test_transform = (hydra.utils.instantiate(test_transform) if (test_transform is not None) else None) self.optimizer_cfg = optimizer self.normalize_outputs = normalize_outputs self.num_global_crops = num_global_crops self.num_local_crops = num_local_crops self.num_crops = (self.num_global_crops + self.num_local_crops) self.num_splits = num_splits self.num_splits_per_combination = num_splits_per_combination self.use_split = (num_splits > 0) self.mutual_pass = mutual_pass @property def learnable_params(self) -> List[Parameter]: 'List of learnable parameters.' params = [] params.extend(self.trunk.parameters()) if (self.projector is not None): params.extend(self.projector.parameters()) if (self.predictor is not None): params.extend(self.predictor.parameters()) return params @property def training_steps_per_epoch(self) -> Optional[int]: 'Total training steps inferred from datamodule and devices.' if ((self.trainer.datamodule is not None) and (self.trainer.datamodule.train_num_samples > 0)): return (self.trainer.datamodule.train_num_samples // self.trainer.datamodule.train_global_batch_size) else: return None def configure_optimizers(self) -> Dict[(Any, Any)]: (optimizer, scheduler) = hydra.utils.instantiate(self.optimizer_cfg, num_steps_per_epoch=self.optimizer_cfg.get('num_steps_per_epoch', self.training_steps_per_epoch), model=self) if (scheduler is None): return optimizer return {'optimizer': optimizer, 'lr_scheduler': scheduler} def forward(self, x: Tensor) -> Tensor: h = self.trunk(x) z = (self.projector(h) if (self.projector is not None) else h) q = (self.predictor(z) if (self.predictor is not None) else z) return q @torch.no_grad() def local_split(self, x): side_indent = ((x.size((- 2)) // self.num_splits), (x.size((- 1)) // self.num_splits)) col_splits = x.split(side_indent[1], dim=(- 1)) x = [split for col_split in col_splits for split in col_split.split(side_indent[0], dim=(- 2))] x = torch.cat(x, dim=0) return x def multi_crop_shared_step(self, x: List[Tensor]) -> List[Dict[(str, Tensor)]]: if self.mutual_pass: if (self.num_local_crops > 0): return self.multi_crop_with_local_shared_step(x) else: return self.multi_crop_global_shared_step(x) else: return [self.shared_step(x_i) for x_i in x] def multi_crop_global_shared_step(self, x: List[Tensor]) -> List[Dict[(str, Tensor)]]: outputs = [{} for _ in range(len(x))] global_tensor = torch.cat(x) global_output = self.shared_step(global_tensor) dict_keys = global_output.keys() for key in dict_keys: if (((key == 'z') and (self.projector is None)) or ((key == 'q') and (self.predictor is None))): continue global_key_output = global_output[key] chunked_global_key_output = global_key_output.chunk(len(x)) for i in range(len(x)): outputs[i][key] = chunked_global_key_output[i] if (self.projector is None): for i in range(len(x)): outputs[i]['z'] = outputs[i]['h'] if (self.predictor is None): for i in range(len(x)): outputs[i]['q'] = outputs[i]['z'] return outputs def multi_crop_with_local_shared_step(self, x: List[Tensor]) -> List[Dict[(str, Tensor)]]: idx_crops = torch.cumsum(torch.unique_consecutive(torch.tensor([inp.shape[(- 1)] for inp in x]), return_counts=True)[1], 0) (start_idx, h) = (0, torch.empty(0, device=x[0].device)) for (_, end_idx) in enumerate(idx_crops): h_output = self.trunk(torch.cat(x[start_idx:end_idx])) start_idx = end_idx h = torch.cat((h, h_output)) z = (self.projector(h) if (self.projector is not None) else h) if (self.predictor is not None): q = self.predictor(z) if self.hparams.normalize_outputs: z = nn.functional.normalize(z, dim=1) q = nn.functional.normalize(q, dim=1) else: if self.hparams.normalize_outputs: z = nn.functional.normalize(z, dim=1) q = z outputs = [{} for _ in range(len(x))] global_output = {'h': h, 'z': z, 'q': q} dict_keys = global_output.keys() for key in dict_keys: if (((key == 'z') and (self.projector is None)) or ((key == 'q') and (self.predictor is None))): continue global_key_output = global_output[key] chunked_global_key_output = global_key_output.chunk(len(x)) for i in range(len(x)): outputs[i][key] = chunked_global_key_output[i] if (self.projector is None): for i in range(len(x)): outputs[i]['z'] = outputs[i]['h'] if (self.predictor is None): for i in range(len(x)): outputs[i]['q'] = outputs[i]['z'] return outputs def shared_step(self, x: Tensor) -> Dict[(str, Tensor)]: 'Shared step that pass the input tensor in transforms, the trunk, projector and predictor.\n\n Args:\n x: The input tensor.\n\n Returns:\n The computed representations.\n ' if self.use_split: batch_size = x.size(0) x = self.local_split(x) h = self.trunk(x) if self.use_split: h_splits = list(h.split((h.size(0) // (self.num_splits ** 2)), dim=0)) h = torch.cat(list(map((lambda x: (sum(x) / self.num_splits_per_combination)), list(combinations(h_splits, r=self.num_splits_per_combination)))), dim=0) z = (self.projector(h) if (self.projector is not None) else h) if (self.predictor is not None): q = self.predictor(z) if self.hparams.normalize_outputs: z = nn.functional.normalize(z, dim=1) q = nn.functional.normalize(q, dim=1) if self.use_split: q_split = q.split(batch_size, dim=0) else: if self.hparams.normalize_outputs: z = nn.functional.normalize(z, dim=1) if self.use_split: q_split = z.split(batch_size, dim=0) q = z if self.use_split: return {'h': h, 'z': z, 'q': q, 'q_split': q_split} return {'h': h, 'z': z, 'q': q} def val_shared_step(self, x: Tensor) -> Dict[(str, Tensor)]: 'Validation shared step that pass the input tensor in the trunk, projector and predictor.\n\n Args:\n x: The input tensor.\n\n Returns:\n The computed representations.\n ' h = self.trunk(x) z = (self.projector(h) if (self.projector is not None) else h) if (self.predictor is not None): q = self.predictor(z) if self.hparams.normalize_outputs: z = nn.functional.normalize(z, dim=1) q = nn.functional.normalize(q, dim=1) else: if self.hparams.normalize_outputs: z = nn.functional.normalize(z, dim=1) q = z return {'h': h, 'z': z, 'q': q} def test_shared_step(self, x: Tensor) -> Dict[(str, Tensor)]: 'Test shared step that pass the input tensor in the trunk, projector and predictor.\n\n Args:\n x: The input tensor.\n\n Returns:\n The computed representations.\n ' h = self.trunk(x) z = (self.projector(h) if (self.projector is not None) else h) if (self.predictor is not None): q = self.predictor(z) if self.hparams.normalize_outputs: z = nn.functional.normalize(z, dim=1) q = nn.functional.normalize(q, dim=1) else: if self.hparams.normalize_outputs: z = nn.functional.normalize(z, dim=1) q = z return {'h': h, 'z': z, 'q': q} def up_to_projector_shared_step(self, x: Tensor) -> Dict[(str, Tensor)]: 'Shared step that pass the input tensor in the trunk and projector.\n\n Args:\n x: The input tensor.\n\n Returns:\n The computed representations.\n ' h = self.trunk(x) z = (self.projector(h) if (self.projector is not None) else h) if self.hparams.normalize_outputs: z = nn.functional.normalize(z, dim=1) return {'h': h, 'z': z} @abstractmethod def compute_loss(self): pass @abstractmethod def training_step(self, batch: Iterable[Any], batch_idx: int): pass def validation_step(self, batch: Iterable[Any], batch_idx: int): x = batch['input'] if (self.val_transform is not None): with torch.no_grad(): with torch.cuda.amp.autocast(enabled=False): x = self.val_transform(x) return self.val_shared_step(x) def test_step(self, batch: Iterable[Any], batch_idx: int): x = batch['input'] if (self.test_transform is not None): with torch.no_grad(): with torch.cuda.amp.autocast(enabled=False): x = self.test_transform(x) return self.test_shared_step(x) @property def num_layers(self) -> int: 'Number of layers of the model.' return ((self.trunk.num_layers + self.projector.num_layers) + (self.predictor.num_layers if (self.predictor is not None) else 0)) def get_param_layer_id(self, name: str) -> int: 'Get the layer id of the named parameter.\n\n Args:\n name: The name of the parameter.\n ' if name.startswith('trunk.'): return self.trunk.get_param_layer_id(name[len('trunk.'):]) elif name.startswith('projector.'): return (self.trunk.num_layers + self.projector.get_param_layer_id(name[len('projector.'):])) elif name.startswith('predictor.'): return ((self.trunk.num_layers + self.projector.num_layers) + self.predictor.get_param_layer_id(name[len('predictor.'):])) else: raise NotImplementedError(f'{name} not found.')
class MoCoModel(ShuffleMomentumQueueBaseModel): 'MoCo model with version 1, 2, 2+, 3 that can be configured.\n\n References:\n - MoCo: https://arxiv.org/abs/1911.05722\n - MoCov2: https://arxiv.org/abs/2003.04297\n - MoCov2+: https://arxiv.org/abs/2011.10566\n - MoCov3: https://arxiv.org/abs/2104.02057\n\n Args:\n trunk: Config to build a trunk.\n optimizer: Config to build optimizers and schedulers.\n\n projector: Config to build a project.\n predictor: Config to build a predictor.\n train_transform: Config to perform transformation on train input.\n val_transform: Config to perform transformation on val input.\n test_transform: Config to perform transformation on test input.\n normalize_outputs: If ``True``, normalize outputs.\n num_global_crops: Number of global crops which are the first elements of each batch.\n num_local_crops: Number of local crops which are the last elements of each batch.\n num_splits: Number of splits to apply to each crops.\n num_splits_per_combination: Number of splits used for combinations of features of each split.\n mutual_pass: If ``True``, perform one pass per branch per crop resolution.\n initial_momentum: initial value for the momentum update.\n scheduler_momentum: rule to update the momentum value.\n shuffle_bn: If ``True``, apply shuffle normalization trick from MoCo.\n num_devices: Number of devices used to train the model in each node.\n simulate_n_devices: Number of devices to simulate to apply shuffle trick. Requires ``shuffle_bn`` to be ``True`` and ``num_devices`` to be :math:`1`.\n queue: Config to build a queue.\n sym: If ``True``, symmetrised the loss.\n use_keys: If ``True``, add keys to negatives.\n temp: Temperature parameter to scale the online similarities.\n ' def __init__(self, trunk: DictConfig, optimizer: DictConfig, projector: Optional[DictConfig]=None, predictor: Optional[DictConfig]=None, train_transform: Optional[DictConfig]=None, val_transform: Optional[DictConfig]=None, test_transform: Optional[DictConfig]=None, normalize_outputs: bool=True, num_global_crops: int=2, num_local_crops: int=0, num_splits: int=0, num_splits_per_combination: int=2, mutual_pass: bool=False, initial_momentum: int=0.999, scheduler_momentum: str='constant', shuffle_bn: bool=True, num_devices: int=1, simulate_n_devices: int=8, queue: Optional[DictConfig]=None, sym: bool=False, use_keys: bool=False, temp: float=0.2) -> None: super().__init__(trunk=trunk, optimizer=optimizer, projector=projector, predictor=predictor, train_transform=train_transform, val_transform=val_transform, test_transform=test_transform, normalize_outputs=normalize_outputs, num_global_crops=num_global_crops, num_local_crops=num_local_crops, num_splits=num_splits, num_splits_per_combination=num_splits_per_combination, mutual_pass=mutual_pass, initial_momentum=initial_momentum, scheduler_momentum=scheduler_momentum, shuffle_bn=shuffle_bn, num_devices=num_devices, simulate_n_devices=simulate_n_devices, queue=queue, sym=sym, use_keys=use_keys) self.save_hyperparameters() self.temp = temp def compute_loss(self, q: Tensor, k: Tensor, k_global: Tensor, queue: (Tensor | None)) -> Tensor: 'Compute the MoCo loss.\n\n Args:\n q: The representations of the queries.\n k: The representations of the keys.\n k_global: The global representations of the keys.\n queue: The queue of representations.\n\n Returns:\n The loss.\n ' return compute_moco_loss(q, k, k_global, self.use_keys, queue, self.temp, self.global_rank) def on_train_epoch_start(self) -> None: super().on_train_epoch_start() self.log('pretrain/temp', self.temp, on_step=False, on_epoch=True)
class MoCov3Model(MomentumSiameseBaseModel): 'MoCov3 that can be configured as in the paper.\n\n References:\n - MoCov3: https://arxiv.org/abs/2104.02057\n\n Args:\n trunk: Config to build a trunk.\n optimizer: Config to build optimizers and schedulers.\n projector: Config to build a project.\n predictor: Config to build a predictor.\n train_transform: Config to perform transformation on train input.\n val_transform: Config to perform transformation on val input.\n test_transform: Config to perform transformation on test input.\n normalize_outputs: If ``True``, normalize outputs.\n num_global_crops: Number of global crops which are the first elements of each batch.\n num_local_crops: Number of local crops which are the last elements of each batch.\n num_splits: Number of splits to apply to each crops.\n num_splits_per_combination: Number of splits used for combinations of features of each split.\n mutual_pass: If ``True``, perform one pass per branch per crop resolution.\n initial_momentum: Initial value for the momentum update.\n scheduler_momentum: Rule to update the momentum value.\n temp: Temperature parameter to scale the online similarities.\n ' def __init__(self, trunk: DictConfig, optimizer: DictConfig, projector: Optional[DictConfig]=None, predictor: Optional[DictConfig]=None, train_transform: Optional[DictConfig]=None, val_transform: Optional[DictConfig]=None, test_transform: Optional[DictConfig]=None, normalize_outputs: bool=True, num_global_crops: int=2, num_local_crops: int=0, num_splits: int=0, num_splits_per_combination: int=2, mutual_pass: bool=False, initial_momentum: int=0.99, scheduler_momentum: str='cosine', temp: float=1.0) -> None: super().__init__(trunk=trunk, optimizer=optimizer, projector=projector, predictor=predictor, train_transform=train_transform, val_transform=val_transform, test_transform=test_transform, normalize_outputs=normalize_outputs, num_global_crops=num_global_crops, num_local_crops=num_local_crops, num_splits=num_splits, num_splits_per_combination=num_splits_per_combination, mutual_pass=mutual_pass, initial_momentum=initial_momentum, scheduler_momentum=scheduler_momentum) self.save_hyperparameters() assert (not self.use_split), 'Splits not supported for MoCov3' self.temp = temp def compute_loss(self, q: Tensor, k: Tensor) -> Tensor: 'Compute the MoCo loss.\n\n Args:\n q: The representations of the queries.\n k: The representations of the keys.\n\n Returns:\n The loss.\n ' k_global = concat_all_gather_without_backprop(k) return compute_mocov3_loss(q, k_global, self.device, self.temp, self.global_rank) def on_train_epoch_start(self) -> None: super().on_train_epoch_start() self.log('pretrain/temp', self.temp, on_step=False, on_epoch=True) def training_step(self, batch: Iterable[Any], batch_idx: int) -> Dict[(str, Tensor)]: X = batch['input'] X = ([X] if isinstance(X, Tensor) else X) assert (len(X) == self.num_crops) if (self.train_transform is not None): with torch.no_grad(): with torch.cuda.amp.autocast(enabled=False): X = self.transform(X) outs_online = self.multi_crop_shared_step(X) outs_momentum = self.multi_crop_momentum_shared_step(X[:self.num_global_crops]) tot_loss = 0 for i in range(self.num_global_crops): for j in range(self.num_crops): if (i == j): continue loss = self.compute_loss(outs_online[j]['q'], outs_momentum[i]['z']) tot_loss += loss outputs = {'loss': tot_loss} outputs.update(outs_online[0]) for (name_output, output) in outputs.items(): if (name_output != 'loss'): outputs[name_output] = output.detach() self.log('pretrain/loss', outputs['loss'], prog_bar=True, on_step=True, on_epoch=True) return outputs
class MomentumSiameseBaseModel(SiameseBaseModel, ABC): 'Abstract class to represent siamese models with a momentum branch.\n\n Subclasses should implement training_step method.\n\n Args:\n trunk: Config to build a trunk.\n optimizer: Config to build optimizers and schedulers.\n projector: Config to build a project.\n predictor: Config to build a predictor.\n train_transform: Config to perform transformation on train input.\n val_transform: Config to perform transformation on val input.\n test_transform: Config to perform transformation on test input.\n normalize_outputs: If ``True``, normalize outputs.\n num_global_crops: Number of global crops which are the first elements of each batch.\n num_local_crops: Number of local crops which are the last elements of each batch.\n num_splits: Number of splits to apply to each crops.\n num_splits_per_combination: Number of splits used for combinations of features of each split.\n mutual_pass: If ``True``, perform one pass per branch per crop resolution.\n initial_momentum: Initial value for the momentum update.\n scheduler_momentum: Rule to update the momentum value.\n ' def __init__(self, trunk: DictConfig, optimizer: DictConfig, projector: Optional[DictConfig]=None, predictor: Optional[DictConfig]=None, train_transform: Optional[DictConfig]=None, val_transform: Optional[DictConfig]=None, test_transform: Optional[DictConfig]=None, normalize_outputs: bool=True, num_global_crops: int=2, num_local_crops: int=0, num_splits: int=0, num_splits_per_combination: int=2, mutual_pass: bool=False, initial_momentum: int=0.996, scheduler_momentum: str='cosine') -> None: super().__init__(trunk=trunk, optimizer=optimizer, projector=projector, predictor=predictor, train_transform=train_transform, val_transform=val_transform, test_transform=test_transform, normalize_outputs=normalize_outputs, num_global_crops=num_global_crops, num_local_crops=num_local_crops, num_splits=num_splits, num_splits_per_combination=num_splits_per_combination, mutual_pass=mutual_pass) self.save_hyperparameters() self.momentum_trunk = hydra.utils.instantiate(trunk) self.momentum_projector = (hydra.utils.instantiate(projector) if (projector is not None) else None) self.initial_momentum = initial_momentum self.scheduler_momentum = scheduler_momentum self.current_momentum = initial_momentum for param in self.momentum_trunk.parameters(): param.requires_grad = False self.momentum_trunk.load_state_dict(self.trunk.state_dict()) if (self.momentum_projector is not None): for param in self.momentum_projector.parameters(): param.requires_grad = False self.momentum_projector.load_state_dict(self.projector.state_dict()) def _update_momentum(self) -> float: if (self.scheduler_momentum == 'constant'): return self.current_momentum elif (self.scheduler_momentum == 'cosine'): max_steps = ((self.training_steps_per_epoch * self.trainer.max_epochs) - 1) momentum = (1 - (((1 - self.initial_momentum) * (math.cos(((math.pi * self.global_step) / max_steps)) + 1)) / 2)) return momentum elif (self.scheduler_momentum == 'cosine_epoch'): max_steps = (self.trainer.max_epochs - 1) momentum = (1 - (((1 - self.initial_momentum) * (math.cos(((math.pi * self.current_epoch) / max_steps)) + 1)) / 2)) return momentum else: raise NotImplementedError(f'{self.scheduler_momentum} is not supported.') @torch.no_grad() def _update_weights(self, online: Union[(Module, Tensor)], target: Union[(Module, Tensor)]) -> None: for ((_, online_p), (_, target_p)) in zip(online.named_parameters(), target.named_parameters()): target_p.data = ((self.current_momentum * target_p.data) + ((1 - self.current_momentum) * online_p.data)) @torch.no_grad() def momentum_shared_step(self, x: Tensor) -> Dict[(str, Tensor)]: 'Momentum shared step that pass the input tensor in the momentum trunk and momentum projector.\n\n Args:\n x: The input tensor.\n\n Returns:\n The computed representations.\n ' h = self.momentum_trunk(x) z = (self.momentum_projector(h) if (self.momentum_projector is not None) else h) if self.normalize_outputs: z = nn.functional.normalize(z, dim=1) return {'h': h, 'z': z} @torch.no_grad() def multi_crop_momentum_shared_step(self, x: List[Tensor]) -> List[Dict[(str, Tensor)]]: if self.mutual_pass: outputs = [{} for _ in range(len(x))] global_tensor = torch.cat(x) global_output = self.momentum_shared_step(global_tensor) dict_keys = global_output.keys() for key in dict_keys: if ((key == 'z') and (self.momentum_projector is None)): continue global_key_output = global_output[key] chunked_global_key_output = global_key_output.chunk(len(x)) for i in range(len(x)): outputs[i][key] = chunked_global_key_output[i] if (self.momentum_projector is None): for i in range(len(x)): outputs[i]['z'] = outputs[i]['h'] return outputs else: return [self.momentum_shared_step(x_i) for x_i in x] def on_train_batch_end(self, outputs: Sequence, batch: Sequence, batch_idx: int) -> None: self._update_weights(self.trunk, self.momentum_trunk) if (self.projector is not None): self._update_weights(self.projector, self.momentum_projector) self.log('pretrain/momentum_value', self.current_momentum, on_step=True, on_epoch=False) self.current_momentum = self._update_momentum()
class ReSSLModel(ShuffleMomentumQueueBaseModel): 'ReSSL model.\n\n References:\n - ReSSL: https://proceedings.neurips.cc/paper/2021/file/14c4f36143b4b09cbc320d7c95a50ee7-Paper.pdf\n\n Args:\n trunk: Config tu build a trunk.\n optimizer: Config tu build optimizers and schedulers.\n projector: Config to build a project.\n predictor: Config to build a predictor.\n train_transform: Config to perform transformation on train input.\n val_transform: Config to perform transformation on val input.\n test_transform: Config to perform transformation on test input.\n normalize_outputs: If ``True``, normalize outputs.\n num_global_crops: Number of global crops which are the first elements of each batch.\n num_local_crops: Number of local crops which are the last elements of each batch.\n num_splits: Number of splits to apply to each crops.\n num_splits_per_combination: Number of splits used for combinations of features of each split.\n mutual_pass: If ``True``, perform one pass per branch per crop resolution.\n initial_momentum: Initial value for the momentum update.\n scheduler_momentum: Rule to update the momentum value.\n shuffle_bn: If ``True``, apply shuffle normalization trick from MoCo.\n num_devices: Number of devices used to train the model in each node.\n simulate_n_devices: Number of devices to simulate to apply shuffle trick. Requires ``shuffle_bn`` to be ``True`` and ``num_devices`` to be :math:`1`.\n queue: Config to build a queue.\n sym: If ``True``, symmetrised the loss.\n use_keys: If ``True``, add keys to negatives.\n temp: Temperature parameter to scale the online similarities.\n temp_m: Temperature parameter to scale the target similarities. Initial value if warmup applied.\n ' def __init__(self, trunk: DictConfig, optimizer: DictConfig, projector: Optional[DictConfig]=None, predictor: Optional[DictConfig]=None, train_transform: Optional[DictConfig]=None, val_transform: Optional[DictConfig]=None, test_transform: Optional[DictConfig]=None, normalize_outputs: bool=True, num_global_crops: int=2, num_local_crops: int=0, num_splits: int=0, num_splits_per_combination: int=2, mutual_pass: bool=False, initial_momentum: int=0.999, scheduler_momentum: str='constant', shuffle_bn: bool=True, num_devices: int=1, simulate_n_devices: int=8, queue: Optional[DictConfig]=None, sym: bool=False, use_keys: bool=False, temp: float=0.1, temp_m: float=0.04, initial_temp_m: float=0.04) -> None: super().__init__(trunk=trunk, optimizer=optimizer, projector=projector, predictor=predictor, train_transform=train_transform, val_transform=val_transform, test_transform=test_transform, normalize_outputs=normalize_outputs, num_global_crops=num_global_crops, num_local_crops=num_local_crops, num_splits=num_splits, num_splits_per_combination=num_splits_per_combination, mutual_pass=mutual_pass, initial_momentum=initial_momentum, scheduler_momentum=scheduler_momentum, shuffle_bn=shuffle_bn, num_devices=num_devices, simulate_n_devices=simulate_n_devices, queue=queue, sym=sym, use_keys=use_keys) self.save_hyperparameters() self.temp = temp self.temp_m = temp_m self.initial_temp_m = initial_temp_m self.final_temp_m = temp_m def _precompute_mask(self) -> None: batch_size = self.trainer.datamodule.train_local_batch_size self.mask = compute_ressl_mask(batch_size=batch_size, num_negatives=(self.queue.shape[1] if (self.queue is not None) else 0), use_keys=self.use_keys, rank=self.global_rank, world_size=self.trainer.world_size, device=self.device) def on_fit_start(self) -> None: super().on_fit_start() self._precompute_mask() def compute_loss(self, q: Tensor, k: Tensor, k_global: Tensor, queue: (Tensor | None)) -> Tensor: 'Compute the ReSSL loss.\n\n Args:\n q: The representations of the queries.\n k: The representations of the keys.\n k_global: The global representations of the keys.\n queue: The queue of representations if not None.\n\n Returns:\n The loss.\n ' k_loss = (k_global if self.use_keys else k) loss = compute_ressl_loss(q=q, k=k, k_global=k_loss, use_keys=self.use_keys, queue=queue, mask=self.mask, temp=self.temp, temp_m=self.temp_m, LARGE_NUM=LARGE_NUM) return loss
class SCEModel(ShuffleMomentumQueueBaseModel): "SCE model.\n\n References:\n - SCE: https://arxiv.org/pdf/2111.14585.pdf\n\n Args:\n trunk: Config tu build a trunk.\n optimizer: Config tu build optimizers and schedulers.\n projector: Config to build a project.\n predictor: Config to build a predictor.\n train_transform: Config to perform transformation on train input.\n val_transform: Config to perform transformation on val input.\n test_transform: Config to perform transformation on test input.\n normalize_outputs: If ``True``, normalize outputs.\n num_global_crops: Number of global crops which are the first elements of each batch.\n num_local_crops: Number of local crops which are the last elements of each batch.\n num_splits: Number of splits to apply to each crops.\n num_splits_per_combination: Number of splits used for combinations of features of each split.\n mutual_pass: If ``True``, perform one pass per branch per crop resolution.\n initial_momentum: initial value for the momentum update.\n scheduler_momentum: rule to update the momentum value.\n shuffle_bn: If ``True``, apply shuffle normalization trick from MoCo.\n num_devices: Number of devices used to train the model in each node.\n simulate_n_devices: Number of devices to simulate to apply shuffle trick. Requires ``shuffle_bn`` to be ``True`` and ``num_devices`` to be :math:`1`.\n queue: Config to build a queue.\n sym: If ``True``, symmetrised the loss.\n use_keys: If ``True``, add keys to negatives.\n temp: Temperature parameter to scale the online similarities.\n temp_m: Temperature parameter to scale the target similarities. Initial value if warmup applied.\n start_warmup_temp_m: Initial temperature parameter to scale the target similarities in case of warmup.\n warmup_epoch_temp_m: Number of warmup epochs for the target temperature.\n warmup_scheduler_temp_m: Type of scheduler for warming up the target temperature. Options are: ``'linear'``, ``'cosine'``.\n coeff: Coeff parameter between InfoNCE and relational aspects.\n warmup_scheduler_coeff: Type of scheduler for warming up the coefficient. Options are: ``'linear'``, ``'cosine'``.\n warmup_epoch_coeff: Number of warmup epochs for coefficient.\n start_warmup_coeff: Starting value of coefficient for warmup.\n scheduler_coeff: Type of scheduler for coefficient after warmup. Options are: ``'linear'``, ``'cosine'``.\n final_scheduler_coeff: Final value of scheduler coefficient.\n " def __init__(self, trunk: DictConfig, optimizer: DictConfig, projector: Optional[DictConfig]=None, predictor: Optional[DictConfig]=None, train_transform: Optional[DictConfig]=None, val_transform: Optional[DictConfig]=None, test_transform: Optional[DictConfig]=None, normalize_outputs: bool=True, num_global_crops: int=2, num_local_crops: int=0, num_splits: int=0, num_splits_per_combination: int=2, mutual_pass: bool=False, initial_momentum: int=0.999, scheduler_momentum: str='constant', shuffle_bn: bool=True, num_devices: int=1, simulate_n_devices: int=8, queue: Optional[DictConfig]=None, sym: bool=False, use_keys: bool=False, temp: float=0.1, temp_m: float=0.05, start_warmup_temp_m: float=0.05, warmup_epoch_temp_m: int=0, warmup_scheduler_temp_m: Optional[int]='cosine', coeff: float=0.5, warmup_scheduler_coeff: Optional[int]='linear', warmup_epoch_coeff: int=0, start_warmup_coeff: float=1.0, scheduler_coeff: Optional[str]=None, final_scheduler_coeff: float=0.0) -> None: super().__init__(trunk=trunk, optimizer=optimizer, projector=projector, predictor=predictor, train_transform=train_transform, val_transform=val_transform, test_transform=test_transform, normalize_outputs=normalize_outputs, num_global_crops=num_global_crops, num_local_crops=num_local_crops, num_splits=num_splits, num_splits_per_combination=num_splits_per_combination, mutual_pass=mutual_pass, initial_momentum=initial_momentum, scheduler_momentum=scheduler_momentum, shuffle_bn=shuffle_bn, num_devices=num_devices, simulate_n_devices=simulate_n_devices, queue=queue, sym=sym, use_keys=use_keys) self.save_hyperparameters() self.temp = temp self.temp_m = temp_m self.start_warmup_temp_m = start_warmup_temp_m self.final_temp_m = temp_m self.warmup_scheduler_temp_m = warmup_scheduler_temp_m self.warmup_epoch_temp_m = warmup_epoch_temp_m self.coeff = coeff self.initial_coeff = coeff self.warmup_scheduler_coeff = warmup_scheduler_coeff self.warmup_epoch_coeff = warmup_epoch_coeff self.start_warmup_coeff = start_warmup_coeff self.scheduler_coeff = scheduler_coeff self.final_scheduler_coeff = final_scheduler_coeff def _precompute_mask(self) -> None: batch_size = self.trainer.datamodule.train_local_batch_size self.mask = compute_sce_mask(batch_size=batch_size, num_negatives=(self.queue.shape[1] if (self.queue is not None) else 0), use_keys=self.use_keys, rank=self.global_rank, world_size=self.trainer.world_size, device=self.device) def on_fit_start(self) -> None: super().on_fit_start() self._precompute_mask() def compute_loss(self, q: Tensor, k: Tensor, k_global: Tensor, queue: (Tensor | None)) -> Tensor: 'Compute the SCE loss for several tokens as output.\n\n Args:\n q: The representations of the queries.\n k: The representations of the keys.\n k_global: The global representations of the keys.\n queue: The queue of representations if not None.\n\n Returns:\n The loss.\n ' k_loss = (k_global if self.use_keys else k) loss = compute_sce_loss(q=q, k=k, k_global=k_loss, use_keys=self.use_keys, queue=queue, mask=self.mask, coeff=self.coeff, temp=self.temp, temp_m=self.temp_m, LARGE_NUM=LARGE_NUM) return loss def on_train_batch_start(self, batch: Any, batch_idx: int) -> None: if (self.warmup_epoch_temp_m > 0): if (self.current_epoch >= self.warmup_epoch_temp_m): self.temp_m = self.final_temp_m else: self.temp_m = scheduler_value(self.warmup_scheduler_temp_m, self.start_warmup_temp_m, self.final_temp_m, self.global_step, ((self.warmup_epoch_temp_m * self.training_steps_per_epoch) - 1)) if (self.warmup_epoch_coeff > 0): if (self.current_epoch >= self.warmup_epoch_coeff): self.coeff = self.initial_coeff else: self.coeff = scheduler_value(self.warmup_scheduler_coeff, self.start_warmup_coeff, self.initial_coeff, self.global_step, ((self.warmup_epoch_coeff * self.training_steps_per_epoch) - 1)) if (self.scheduler_coeff is not None): if (self.warmup_epoch_coeff > 0): if (self.current_epoch >= self.warmup_epoch_coeff): self.coeff = scheduler_value(self.scheduler_coeff, self.initial_coeff, self.final_scheduler_coeff, (self.global_step - (self.warmup_epoch_coeff * self.training_steps_per_epoch)), (((self.trainer.max_epochs - self.warmup_epoch_coeff) * self.training_steps_per_epoch) - 1)) else: self.coeff = scheduler_value(self.scheduler_coeff, self.initial_coeff, self.final_scheduler_coeff, self.global_step, ((self.trainer.max_epochs * self.training_steps_per_epoch) - 1)) self.log('pretrain/temp', self.temp, on_step=True, on_epoch=True) self.log('pretrain/temp_m', self.temp_m, on_step=True, on_epoch=True) self.log('pretrain/coeff', self.coeff, on_step=True, on_epoch=True) return
class ShuffleMomentumSiameseBaseModel(MomentumSiameseBaseModel, ABC): 'Abstract class to represent siamese models with a momentum branch and possibility to shuffle input elements\n in momentum branch to apply normalization trick from MoCo.\n\n Subclasses should implement training_step method.\n\n Args:\n trunk: Config to build a trunk.\n optimizer: Config to build optimizers and schedulers.\n projector: Config to build a project.\n predictor: Config to build a predictor.\n train_transform: Config to perform transformation on train input.\n val_transform: Config to perform transformation on val input.\n test_transform: Config to perform transformation on test input.\n normalize_outputs: If ``True``, normalize outputs.\n num_global_crops: Number of global crops which are the first elements of each batch.\n num_local_crops: Number of local crops which are the last elements of each batch.\n num_splits: Number of splits to apply to each crops.\n num_splits_per_combination: Number of splits used for combinations of features of each split.\n mutual_pass: If ``True``, perform one pass per branch per crop resolution.\n initial_momentum: Initial value for the momentum update.\n scheduler_momentum: Rule to update the momentum value.\n shuffle_bn: If ``True``, apply shuffle normalization trick from MoCo.\n num_devices: Number of devices used to train the model in each node.\n simulate_n_devices: Number of devices to simulate to apply shuffle trick. Requires ``shuffle_bn`` to be ``True`` and ``num_devices`` to be :math:`1`.\n ' def __init__(self, trunk: DictConfig, optimizer: DictConfig, projector: Optional[DictConfig]=None, predictor: Optional[DictConfig]=None, train_transform: Optional[DictConfig]=None, val_transform: Optional[DictConfig]=None, test_transform: Optional[DictConfig]=None, normalize_outputs: bool=True, num_global_crops: int=2, num_local_crops: int=0, num_splits: int=0, num_splits_per_combination: int=2, mutual_pass: bool=False, initial_momentum: int=0.996, scheduler_momentum: str='cosine', shuffle_bn: bool=True, num_devices: int=1, simulate_n_devices: int=8) -> None: super().__init__(trunk=trunk, optimizer=optimizer, projector=projector, predictor=predictor, train_transform=train_transform, val_transform=val_transform, test_transform=test_transform, normalize_outputs=normalize_outputs, num_global_crops=num_global_crops, num_local_crops=num_local_crops, num_splits=num_splits, num_splits_per_combination=num_splits_per_combination, mutual_pass=mutual_pass, initial_momentum=initial_momentum, scheduler_momentum=scheduler_momentum) self.save_hyperparameters() self.num_devices = num_devices self.shuffle_bn = shuffle_bn self.simulate_n_devices = simulate_n_devices if ((self.num_devices == (- 1)) and self.shuffle_bn): rank_zero_info(f'In {__class__.__name__} when num_devices=-1, it is assumed that there are more than one device.') elif ((self.num_devices <= 1) and self.shuffle_bn): if (self.simulate_n_devices <= 1): AttributeError('if num_devices is 1 and shuffle_bn is True, the simulate_n_devices attribute should be superior to 1') self.momentum_trunk = convert_to_split_batchnorm(self.momentum_trunk, self.simulate_n_devices) if (self.momentum_projector is not None): self.momentum_projector = convert_to_split_batchnorm(self.momentum_projector, self.simulate_n_devices) def on_train_start(self): old_num_devices = self.num_devices self.num_devices = get_num_devices_in_trainer(self.trainer) if (old_num_devices != self.num_devices): rank_zero_info(f'Num devices passed to {__class__.__name__}: {old_num_devices} has been updated to {self.num_devices}.') @torch.no_grad() def momentum_shared_step(self, x: Tensor) -> Dict[(str, Tensor)]: "Momentum shared step that call either '_momentum_shared_step_n_devices' or\n '_momentum_shared_step_single_device' depending the number of devices used for training.\n\n Args:\n x: The input tensor.\n\n Returns:\n The computed representations.\n " if (self.num_devices > 1): return self._momentum_shared_step_n_devices(x) else: return self._momentum_shared_step_single_device(x) @torch.no_grad() def _momentum_shared_step_n_devices(self, x: Tensor) -> Dict[(str, Tensor)]: 'Momentum shared step with several devices passing input tensor in momentum trunk and momentum projector.\n If shuffle_bn is True, it gathers and shuffles the input tensors across devices following MoCo batch norm\n trick.\n\n *** Only support DistributedDataParallel (DDP) model. ***\n\n Args:\n x: The input tensor.\n\n Returns:\n The computed representations.\n ' if self.shuffle_bn: (x, idx_unshuffle) = self._batch_shuffle_ddp(x) h = self.momentum_trunk(x) z = (self.momentum_projector(h) if (self.momentum_projector is not None) else h) if self.shuffle_bn: z = self._batch_unshuffle_ddp(z, idx_unshuffle) if self.normalize_outputs: z = nn.functional.normalize(z, dim=1) return {'h': h, 'z': z} @torch.no_grad() def _momentum_shared_step_single_device(self, x: Tensor) -> Dict[(str, Tensor)]: 'Momentum shared step with one device passing input tensor in momentum trunk and momentum projector. If\n shuffle_bn is True, it shuffles the input tensor across device following MoCo batch norm trick which is\n simulated in the trunk.\n\n *** Only support DistributedDataParallel (DDP) model. ***\n\n Args:\n x: The input tensor.\n\n Returns:\n The computed representations.\n ' if self.shuffle_bn: (x, idx_unshuffle) = self._batch_shuffle_single_device(x) h = self.momentum_trunk(x) z = (self.momentum_projector(h) if (self.momentum_projector is not None) else h) if self.shuffle_bn: z = self._batch_unshuffle_single_device(z, idx_unshuffle) if self.normalize_outputs: z = nn.functional.normalize(z, dim=1) return {'h': h, 'z': z} @torch.no_grad() def _batch_shuffle_ddp(self, x: Tensor) -> Tuple[(Tensor, Tensor)]: 'Unshuffle the shuffled tensor along first dimension across devices.\n\n *** Only support DistributedDataParallel (DDP) model. ***\n\n Args:\n x: The shuffled tensor.\n idx_unshuffle: The unshuffle indices to retrieve original tensor before its shuffling.\n\n Returns:\n The shuffled tensor and the unshuffle indices.\n ' x_gather = concat_all_gather_without_backprop(x) batch_size_all = x_gather.shape[0] idx_shuffle = torch.randperm(batch_size_all, device=self.device) torch.distributed.broadcast(idx_shuffle, src=0) idx_unshuffle = torch.argsort(idx_shuffle) gpu_idx = torch.distributed.get_rank() idx_this = idx_shuffle.view(self.num_devices, (- 1))[gpu_idx] return (x_gather[idx_this], idx_unshuffle) @torch.no_grad() def _batch_unshuffle_ddp(self, x: Tensor, idx_unshuffle: Tensor) -> Tensor: 'Unshuffle the shuffled tensor along first dimension across devices.\n\n *** Only support DistributedDataParallel (DDP) model. ***\n\n Args:\n x: The shuffled tensor.\n idx_unshuffle: The unshuffle indices to retrieve original tensor before its shuffling.\n\n Returns:\n The unshuffled tensor.\n ' x_gather = concat_all_gather_without_backprop(x) gpu_idx = torch.distributed.get_rank() idx_this = idx_unshuffle.view(self.num_devices, (- 1))[gpu_idx] return x_gather[idx_this] @torch.no_grad() def _batch_shuffle_single_device(self, x: Tensor) -> Tuple[(Tensor, Tensor)]: 'Shuffle the input tensor along first dimension on current device.\n\n Args:\n x: The input tensor.\n\n Returns:\n The shuffled tensor and the unshuffle indices.\n ' batch_size = x.shape[0] idx_shuffle = torch.randperm(batch_size, device=self.device) idx_unshuffle = torch.argsort(idx_shuffle) return (x[idx_shuffle], idx_unshuffle) @torch.no_grad() def _batch_unshuffle_single_device(self, x: Tensor, idx_unshuffle: Tensor) -> Tensor: 'Unshuffle the shuffled tensor along first dimension on current device.\n\n Args:\n x: The shuffled tensor.\n idx_unshuffle: The unshuffle indices to retrieve original tensor before its shuffling.\n\n Returns:\n The unshuffled tensor.\n ' return x[idx_unshuffle]
class SimCLRModel(SiameseBaseModel): 'SimCLR model with version 1, 2 that can be configured.\n\n References:\n - SimCLR: https://arxiv.org/abs/2002.05709\n - SimCLRv2: https://arxiv.org/abs/2006.10029\n\n Args:\n trunk: Config tu build a trunk.\n optimizer: Config tu build optimizers and schedulers.\n projector: Config to build a project.\n train_transform: Config to perform transformation on train input.\n val_transform: Config to perform transformation on val input.\n test_transform: Config to perform transformation on test input.\n normalize_outputs: If ``True``, normalize outputs.\n num_global_crops: Number of global crops which are the first elements of each batch.\n num_local_crops: Number of local crops which are the last elements of each batch.\n num_splits: Number of splits to apply to each crops.\n num_splits_per_combination: Number of splits used for combinations of features of each split.\n mutual_pass: If ``True``, perform one pass per crop resolution.\n temp: Temperature parameter to scale the online similarities.\n ' def __init__(self, trunk: DictConfig, optimizer: DictConfig, projector: Optional[DictConfig]=None, train_transform: Optional[DictConfig]=None, val_transform: Optional[DictConfig]=None, test_transform: Optional[DictConfig]=None, normalize_outputs: bool=True, num_global_crops: int=2, num_local_crops: int=0, num_splits: int=0, num_splits_per_combination: int=2, mutual_pass: bool=False, temp: float=0.1) -> None: super().__init__(trunk=trunk, optimizer=optimizer, projector=projector, train_transform=train_transform, val_transform=val_transform, test_transform=test_transform, num_global_crops=num_global_crops, num_local_crops=num_local_crops, num_splits=num_splits, num_splits_per_combination=num_splits_per_combination, mutual_pass=mutual_pass, predictor=None, normalize_outputs=normalize_outputs) assert (not self.use_split), 'Splits not supported for SimCLR' self.temp = temp self.save_hyperparameters() def _precompute_mask(self) -> None: batch_size = self.trainer.datamodule.train_local_batch_size (self.pos_mask, self.neg_mask) = compute_simclr_masks(batch_size=batch_size, num_crops=self.num_crops, rank=self.global_rank, world_size=self.trainer.world_size, device=self.device) def on_fit_start(self) -> None: super().on_fit_start() self._precompute_mask() def compute_loss(self, z: Tensor, z_global: Tensor) -> Tensor: 'Compute the SimCLR loss.\n\n z_global is provided and not computed in the loss to prevent multiple gathering of z that require synchronisation among processes.\n\n Args:\n z: The representations of all crops.\n z_global: The global representations of all crops. Aggregated on all devices.\n\n Returns:\n The loss.\n ' return compute_simclr_loss(z, z_global, self.pos_mask, self.neg_mask, self.temp) def training_step(self, batch: Iterable[Any], batch_idx: int) -> Dict[(str, Tensor)]: X = batch['input'] assert (len(X) == self.num_crops) if (self.train_transform is not None): with torch.no_grad(): with torch.cuda.amp.autocast(enabled=False): X = self.train_transform(X) outs_online = self.multi_crop_shared_step(X) z = torch.cat([out_online['z'] for out_online in outs_online]) z_global = concat_all_gather_with_backprop(z) loss = self.compute_loss(z, z_global) outputs = {'loss': loss} outputs.update(outs_online[0]) for (name_output, output) in outputs.items(): if (name_output != 'loss'): outputs[name_output] = output.detach() self.log('pretrain/loss', outputs['loss'], prog_bar=True, on_step=True, on_epoch=True) return outputs
class SoccerNetSpottingModel(EztorchBaseModule): 'Model to perform action spotting.\n\n Args:\n trunk: Config to build a trunk.\n head_class: Config to build a head for classification.\n optimizer: Config to build an optimizer for trunk.\n pretrained_trunk_path: Path to the pretrained trunk file.\n pretrained_path: Path to the pretrained model.\n prediction_args: Arguments to configure predictions.\n loss_fn_args: Arguments for the loss function.\n eval_step_timestamp: Step between each timestamp.\n trunk_pattern: Pattern to retrieve the trunk model in checkpoint state_dict and delete the key.\n freeze_trunk: Whether to freeze the trunk.\n train_transform: Config to perform transformation on train input.\n val_transform: Config to perform transformation on val input.\n test_transform: Config to perform transformation on test input.\n save_val_preds_path: Path to store the validation predictions.\n save_test_preds_path: Path to store the test predictions.\n NMS_args: Arguments to configure the NMS.\n evaluation_args: Arguments to configure the evaluation.\n\n Example::\n\n trunk = {...} # config to build a trunk\n head_class = {...} # config to build a head for classification\n optimizer = {...} # config to build an optimizer\n pretrained_trunk_path = ... # path where the trunk has been saved\n\n model = SoccerNetSpottingModel(trunk, head_class, optimizer, pretrained_trunk_path)\n ' def __init__(self, trunk: DictConfig, head_class: DictConfig, optimizer: DictConfig, pretrained_path: (str | None)=None, pretrained_trunk_path: (str | None)=None, loss_fn_args: DictConfig=DictConfig({}), prediction_args: DictConfig=DictConfig({}), eval_step_timestamp: float=1.0, trunk_pattern: str='^(trunk\\.)', freeze_trunk: bool=False, train_transform: (DictConfig | None)=None, val_transform: (DictConfig | None)=None, test_transform: (DictConfig | None)=None, save_val_preds_path: (str | Path)='val_preds/', save_test_preds_path: (str | Path)='test_preds/', NMS_args: DictConfig=DictConfig({'window': 10, 'threshold': 0.001}), evaluation_args: DictConfig=DictConfig({}), do_compile: bool=False) -> None: super().__init__() self.save_hyperparameters() self.optimizer_cfg = optimizer self.freeze_trunk = freeze_trunk self.trunk: nn.Module = hydra.utils.instantiate(trunk) if (pretrained_trunk_path is not None): trunk_state_dict = get_sub_state_dict_from_pl_ckpt(checkpoint_path=pretrained_trunk_path, pattern=trunk_pattern) trunk_state_dict = remove_pattern_in_keys_from_dict(d=trunk_state_dict, pattern=trunk_pattern) (missing_keys, unexpected_keys) = self.trunk.load_state_dict(trunk_state_dict, strict=False) rank_zero_info(f'''Loaded {__class__.__name__} from pretrained trunk weights model. missing_keys:{missing_keys} unexpected_keys:{unexpected_keys}''') self.head_class: nn.Module = hydra.utils.instantiate(head_class) self.loss_fn = compute_spot_loss_fn if do_compile: self.trunk = torch.compile(self.trunk) self.loss_fn = torch.compile(self.loss_fn) self.train_transform = (hydra.utils.instantiate(train_transform) if (train_transform is not None) else None) self.val_transform = (hydra.utils.instantiate(val_transform) if (val_transform is not None) else None) self.test_transform = (hydra.utils.instantiate(test_transform) if (test_transform is not None) else None) self.loss_fn_args = dict(loss_fn_args) self.remove_inference_prediction_seconds = prediction_args.get('remove_inference_prediction_seconds', (- 1.0)) self.merge_predictions_type = prediction_args.get('merge_predictions_type', 'max') self.eval_step_timestamp = eval_step_timestamp self.save_val_preds_path = Path(save_val_preds_path) self.save_test_preds_path = Path(save_test_preds_path) self.NMS_args = NMS_args evaluation_args.pop('metric') self.evaluation_args = evaluation_args if (self.evaluation_args['split'] == 'val'): self.evaluation_args['split'] = 'valid' if (pretrained_path is not None): state_dict = torch.load(pretrained_path, map_location='cpu') if ('state_dict' in state_dict): state_dict = ['state_dict'] (missing_keys, unexpected_keys) = self.load_state_dict(state_dict, strict=False) rank_zero_info(f'''Loaded {__class__.__name__} from pretrained torch model. missing_keys:{missing_keys} unexpected_keys:{unexpected_keys}''') rank_zero_info(f'{__class__.__name__} loaded trunk weights from {pretrained_trunk_path}.') if self.freeze_trunk: for param in self.trunk.parameters(): param.requires_grad = False @property def learnable_params(self) -> List[Tuple[(str, Parameter)]]: 'Learnable parameters of the model.' params = [] if (not self.freeze_trunk): params.extend(self.trunk.parameters()) params.extend(self.head_class.parameters()) return params @property def training_steps_per_epoch(self) -> Optional[int]: 'Total training steps inferred from datamodule and devices.' if ((self.trainer.datamodule is not None) and (self.trainer.datamodule.train_num_samples > 0)): return (self.trainer.datamodule.train_num_samples // self.trainer.datamodule.train_global_batch_size) else: return None def on_fit_start(self) -> None: class_weights = self.loss_fn_args.get('class_weights', {'type': None}) self.loss_fn_args['class_weights'] = get_soccernet_weights(self.trainer.datamodule.train_dataset, device=self.device, **class_weights) def on_validation_epoch_start(self) -> None: shard_indices = get_shard_indices(self.trainer.datamodule.val_dataset.num_videos, shuffle_shards=False) if (len(shard_indices) == 0): self.min_video_index = self.max_video_index = (- 1) self.predictions = None return self.min_video_index = min(shard_indices) self.max_video_index = max(shard_indices) self.predictions = initialize_predictions(self.trainer.datamodule.val_dataset, self.eval_step_timestamp, self.max_video_index, self.min_video_index, self.device) return def on_test_start(self) -> None: shard_indices = get_shard_indices(self.trainer.datamodule.test_dataset.num_videos, shuffle_shards=False) if (len(shard_indices) == 0): self.min_video_index = self.max_video_index = (- 1) self.predictions = None return self.min_video_index = min(shard_indices) self.max_video_index = max(shard_indices) self.predictions = initialize_predictions(self.trainer.datamodule.test_dataset, self.eval_step_timestamp, self.max_video_index, self.min_video_index, self.device) return def on_train_epoch_start(self) -> None: if self.freeze_trunk: self.trunk.eval() def forward(self, x: Tensor) -> Dict[(str, Any)]: h = self.trunk(x) class_preds = self.head_class(h) return {'class_preds': class_preds, 'h': h} @property def num_layers(self) -> int: 'Number of layers of the model.' return self.num_layers_class @property def num_layers_class(self) -> int: return (self.trunk.num_layers + self.head_class.num_layers) def get_param_layer_id(self, name: str) -> int: 'Get the layer id of the named parameter.\n\n Args:\n name: The name of the parameter.\n ' if name.startswith('trunk.'): return self.trunk.get_param_layer_id(name[len('trunk.'):]) elif name.startswith('head_class.'): return (self.trunk.num_layers + self.head_class.get_param_layer_id(name[len('head_class.'):])) else: raise NotImplementedError(f'{name} should not have been used.') def configure_optimizers(self) -> Dict[(Any, Any)]: (optimizer, scheduler) = hydra.utils.instantiate(self.optimizer_cfg, num_steps_per_epoch=self.optimizer_cfg.get('num_steps_per_epoch', self.training_steps_per_epoch), model=self) if (scheduler is None): return optimizer return {'optimizer': optimizer, 'lr_scheduler': scheduler} def shared_step(self, x: Tensor, inversed_temporal_masked_indices: (Tensor | None)): if self.freeze_trunk: with torch.no_grad(): if (inversed_temporal_masked_indices is not None): h: Tensor = self.trunk(x, inversed_temporal_masked_indices=inversed_temporal_masked_indices) else: h = self.trunk(x) elif (inversed_temporal_masked_indices is not None): h = self.trunk(x, inversed_temporal_masked_indices=inversed_temporal_masked_indices) else: h = self.trunk(x) if (h.ndim == 2): h = h.reshape(h.shape[0], 1, h.shape[1]) class_preds: Tensor = self.head_class(h) return class_preds def training_step(self, batch: Dict[(Any, Any)], batch_idx: int) -> Tensor: if (self.train_transform is not None): with torch.no_grad(): with torch.cuda.amp.autocast(enabled=False): batch = self.train_transform(batch) (x, labels, has_label, ignore_class) = (batch['input'], batch['labels'], batch['has_label'], batch['ignore_class']) inversed_temporal_masked_indices: (Tensor | None) = batch.get('inversed_temporal_masked_indices', None) class_preds = self.shared_step(x, inversed_temporal_masked_indices) loss = self.loss_fn(class_preds=class_preds, class_target=labels, ignore_class=ignore_class, has_label=has_label, mixup_weights=batch.get('mixup_weights', None), **self.loss_fn_args) self.log('train/loss', loss, prog_bar=True, on_step=True, on_epoch=True, sync_dist=False) return loss def validation_step(self, batch: Tensor, batch_idx: int) -> Tensor: if (self.val_transform is not None): with torch.no_grad(): with torch.cuda.amp.autocast(enabled=False): batch = self.val_transform(batch) (x, timestamps, labels, has_label, ignore_class, video_indexes, halves_indexes, halves_duration) = (batch['input'], batch['timestamps'], batch['labels'], batch['has_label'], batch['ignore_class'], batch['video_index'], batch['half_index'], batch['half_duration']) num_timestamps = (torch.round((halves_duration / self.eval_step_timestamp)) - 1).to(dtype=torch.long) class_preds = self.shared_step(x, inversed_temporal_masked_indices=None) loss = self.loss_fn(class_preds=class_preds, class_target=labels, ignore_class=ignore_class, has_label=has_label, mixup_weights=batch.get('mixup_weights', None), **self.loss_fn_args) class_preds = class_preds.sigmoid() kept_tensors = aggregate_and_filter_clips(class_preds, timestamps, num_timestamps, video_indexes, halves_indexes, self.max_video_index, self.min_video_index) if (kept_tensors is not None): (class_preds, timestamps, num_timestamps, video_indexes, halves_indexes) = kept_tensors add_clips_predictions(self.predictions, class_preds, timestamps, num_timestamps, video_indexes, halves_indexes, self.eval_step_timestamp, self.remove_inference_prediction_seconds, self.merge_predictions_type) self.log('val/loss', loss, prog_bar=True, on_step=False, on_epoch=True, sync_dist=True) return loss def test_step(self, batch: Tensor, batch_idx: int) -> Tensor: if (self.test_transform is not None): with torch.no_grad(): with torch.cuda.amp.autocast(enabled=False): batch = self.test_transform(batch) (x, timestamps, halves_duration, video_indexes, halves_indexes) = (batch['input'], batch['timestamps'], batch['half_duration'], batch['video_index'], batch['half_index']) num_timestamps = (torch.round((halves_duration / self.eval_step_timestamp)) - 1).to(dtype=torch.long) class_preds = self.shared_step(x, inversed_temporal_masked_indices=None) class_preds = class_preds.sigmoid() kept_tensors = aggregate_and_filter_clips(class_preds, timestamps, num_timestamps, video_indexes, halves_indexes, self.max_video_index, self.min_video_index) if (kept_tensors is not None): (class_preds, timestamps, num_timestamps, video_indexes, halves_indexes) = kept_tensors add_clips_predictions(self.predictions, class_preds, timestamps, num_timestamps, video_indexes, halves_indexes, self.eval_step_timestamp, self.remove_inference_prediction_seconds, self.merge_predictions_type) return 0 def _make_evaluation(self, predictions_path: (str | Path), task: SoccerNetTask=SoccerNetTask.ACTION, logger: bool=False) -> None: if (self.evaluation_args.get('split', None) not in ['valid', 'test']): return if (task == SoccerNetTask.ACTION): dataset = 'SoccerNet' eval_task = 'spotting' num_classes = 17 label_files = 'Labels-v2.json' framerate = 2 REVERSE_LABELS = REVERSE_ACTION_SPOTTING_LABELS default_metrics = ['tight', 'at1', 'at2', 'at3', 'at4', 'at5', 'loose'] elif (task == SoccerNetTask.BALL): dataset = 'Ball' eval_task = 'spotting' num_classes = 2 label_files = 'Labels-ball.json' framerate = 25 REVERSE_LABELS = REVERSE_BALL_SPOTTING_LABELS default_metrics = ['tight', 'at1', 'at2', 'at3', 'at4', 'at5'] metrics = self.evaluation_args.pop('metrics', default_metrics) for metric in metrics: log_result = (self.global_rank == 0) if log_result: results = evaluate(Predictions_path=str(predictions_path), dataset=dataset, task=eval_task, label_files=label_files, num_classes=num_classes, framerate=framerate, metric=metric, **self.evaluation_args) if logger: self.log(f'{metric}_Average_mAP/mAP', (results['a_mAP'] if log_result else 0), prog_bar=True, on_step=False, on_epoch=True, sync_dist=False) rank_zero_info(f"{metric}_Average_mAP/mAP: {(results['a_mAP'] if log_result else 0):.04f}") for c in range(num_classes): map_class = (results['a_mAP_per_class'][c] if log_result else 0) if logger: self.log(f'{metric}_Average_mAP/mAP_{REVERSE_LABELS[c]}', map_class, on_step=False, on_epoch=True, sync_dist=False) rank_zero_info(f'{metric}_Average_mAP/mAP_{REVERSE_LABELS[c]}: {map_class:.04f}') if logger: self.log(f'{metric}_Average_mAP_visible/mAP', (results['a_mAP_visible'] if log_result else 0), on_step=False, on_epoch=True, sync_dist=False) rank_zero_info(f"{metric}_Average_mAP_visible/mAP: {(results['a_mAP_visible'] if log_result else 0):.04f}") for c in range(num_classes): map_class = (results['a_mAP_per_class_visible'][c] if log_result else 0) if logger: self.log(f'{metric}_Average_mAP_visible/mAP_{REVERSE_LABELS[c]}', map_class, on_step=False, on_epoch=True, sync_dist=False) rank_zero_info(f'{metric}_Average_mAP_visible/mAP_{REVERSE_LABELS[c]}: {map_class:.04f}') if logger: self.log(f'{metric}_Average_mAP_unshown/mAP', (results['a_mAP_unshown'] if log_result else 0), on_step=False, on_epoch=True, sync_dist=False) rank_zero_info(f"{metric}_Average_mAP_unshown/mAP: {(results['a_mAP_unshown'] if log_result else 0):.04f}") for c in range(num_classes): map_class = (results['a_mAP_per_class_unshown'][c] if log_result else 0) if logger: self.log(f'{metric}_Average_mAP_unshown/mAP_{REVERSE_LABELS[c]}', map_class, on_step=False, on_epoch=True, sync_dist=False) rank_zero_info(f'{metric}_Average_mAP_unshown/mAP_{REVERSE_LABELS[c]}: {map_class:.04f}') self.evaluation_args['metrics'] = metrics def on_validation_epoch_end(self) -> None: preds_path = (self.save_val_preds_path / str(self.trainer.current_epoch)) raw_preds_path = (self.save_val_preds_path / f'{self.trainer.current_epoch}_raw') if (self.predictions is not None): save_raw_spotting_predictions(self.predictions, raw_preds_path, make_zip=False) save_spotting_predictions(self.predictions, preds_path, self.trainer.datamodule.val_dataset, self.eval_step_timestamp, self.NMS_args, make_zip=False) if (dist.is_available() and dist.is_initialized()): dist.barrier() if (self.global_rank == 0): shutil.make_archive(str(preds_path), 'zip', preds_path) shutil.make_archive(raw_preds_path, 'zip', raw_preds_path) shutil.rmtree(preds_path) shutil.rmtree(raw_preds_path) if (dist.is_available() and dist.is_initialized()): dist.barrier() if (not self.trainer.sanity_checking): self._make_evaluation((self.save_val_preds_path / f'{self.trainer.current_epoch}.zip'), task=self.trainer.datamodule.val_dataset.task, logger=True) return super().on_validation_epoch_end() def on_test_end(self) -> None: raw_preds_path = (self.save_test_preds_path.parent / f'{self.save_test_preds_path.name}_raw') save_raw_spotting_predictions(self.predictions, raw_preds_path, make_zip=False) save_spotting_predictions(self.predictions, self.save_test_preds_path, self.trainer.datamodule.test_dataset, self.eval_step_timestamp, self.NMS_args, make_zip=False) if (dist.is_available() and dist.is_initialized()): dist.barrier() if (self.global_rank == 0): shutil.make_archive(str(self.save_test_preds_path), 'zip', self.save_test_preds_path) shutil.make_archive(str(raw_preds_path), 'zip', raw_preds_path) shutil.rmtree(self.save_test_preds_path) shutil.rmtree(raw_preds_path) if (dist.is_available() and dist.is_initialized()): dist.barrier() self._make_evaluation((self.save_test_preds_path.parent / f'{self.save_test_preds_path.name}.zip'), task=self.trainer.datamodule.test_dataset.task, logger=False) return super().on_test_end()
class SpottingModel(EztorchBaseModule): 'Model to perform spotting.\n\n Args:\n trunk: Config to build a trunk.\n head_class: Config to build a head for classification.\n optimizer: Config to build an optimizer for trunk.\n pretrained_trunk_path: Path to the pretrained trunk file.\n pretrained_path: Path to the pretrained model.\n prediction_args: Arguments to configure predictions.\n loss_fn_args: Arguments for the loss function.\n trunk_pattern: Pattern to retrieve the trunk model in checkpoint state_dict and delete the key.\n freeze_trunk: Whether to freeze the trunk.\n train_transform: Config to perform transformation on train input.\n val_transform: Config to perform transformation on val input.\n test_transform: Config to perform transformation on test input.\n save_val_preds_path: Path to store the validation predictions.\n save_test_preds_path: Path to store the test predictions.\n NMS_args: Arguments to configure the NMS.\n evaluation_args: Arguments to configure the evaluation.\n\n Example::\n\n trunk = {...} # config to build a trunk\n head_class = {...} # config to build a head for classification\n optimizer = {...} # config to build an optimizer\n pretrained_trunk_path = ... # path where the trunk has been saved\n\n model = SpottingModel(trunk, head_class, optimizer, pretrained_trunk_path)\n ' def __init__(self, trunk: DictConfig, head_class: DictConfig, optimizer: DictConfig, pretrained_path: (str | None)=None, pretrained_trunk_path: (str | None)=None, loss_fn_args: DictConfig=DictConfig({}), prediction_args: DictConfig=DictConfig({}), trunk_pattern: str='^(trunk\\.)', freeze_trunk: bool=False, train_transform: (DictConfig | None)=None, val_transform: (DictConfig | None)=None, test_transform: (DictConfig | None)=None, save_val_preds_path: (str | Path)='val_preds/', save_test_preds_path: (str | Path)='test_preds/', NMS_args: DictConfig=DictConfig({'window': 10, 'threshold': 0.001}), evaluation_args: DictConfig=DictConfig({}), do_compile: bool=False) -> None: super().__init__() self.save_hyperparameters() self.optimizer_cfg = optimizer self.freeze_trunk = freeze_trunk self.trunk: nn.Module = hydra.utils.instantiate(trunk) if (pretrained_trunk_path is not None): trunk_state_dict = get_sub_state_dict_from_pl_ckpt(checkpoint_path=pretrained_trunk_path, pattern=trunk_pattern) trunk_state_dict = remove_pattern_in_keys_from_dict(d=trunk_state_dict, pattern=trunk_pattern) (missing_keys, unexpected_keys) = self.trunk.load_state_dict(trunk_state_dict, strict=False) rank_zero_info(f'''Loaded {__class__.__name__} from pretrained trunk weights model. missing_keys:{missing_keys} unexpected_keys:{unexpected_keys}''') self.head_class: nn.Module = hydra.utils.instantiate(head_class) self.loss_fn = compute_spot_loss_fn if do_compile: self.trunk = torch.compile(self.trunk) self.head_class = torch.compile(self.head_class) self.loss_fn = torch.compile(self.loss_fn) self.train_transform = (hydra.utils.instantiate(train_transform) if (train_transform is not None) else None) self.val_transform = (hydra.utils.instantiate(val_transform) if (val_transform is not None) else None) self.test_transform = (hydra.utils.instantiate(test_transform) if (test_transform is not None) else None) self.loss_fn_args = dict(loss_fn_args) self.remove_frames_predictions = prediction_args.get('remove_frames_predictions', (- 1.0)) self.merge_predictions_type = prediction_args.get('merge_predictions_type', 'max') self.save_val_preds_path = Path(save_val_preds_path) self.save_test_preds_path = Path(save_test_preds_path) self.NMS_args = NMS_args self.evaluation_args = evaluation_args if (pretrained_path is not None): state_dict = torch.load(pretrained_path, map_location='cpu')['state_dict'] (missing_keys, unexpected_keys) = self.load_state_dict(state_dict, strict=False) rank_zero_info(f'''Loaded {__class__.__name__} from pretrained torch model. missing_keys:{missing_keys} unexpected_keys:{unexpected_keys}''') rank_zero_info(f'{__class__.__name__} loaded trunk weights from {pretrained_trunk_path}.') if self.freeze_trunk: for param in self.trunk.parameters(): param.requires_grad = False @property def learnable_params(self) -> List[Tuple[(str, Parameter)]]: 'Learnable parameters of the model.' params = [] if (not self.freeze_trunk): params.extend(self.trunk.parameters()) params.extend(self.head_class.parameters()) return params @property def training_steps_per_epoch(self) -> Optional[int]: 'Total training steps inferred from datamodule and devices.' if ((self.trainer.datamodule is not None) and (self.trainer.datamodule.train_num_samples > 0)): return (self.trainer.datamodule.train_num_samples // self.trainer.datamodule.train_global_batch_size) else: return None def on_validation_epoch_start(self) -> None: local_shard_indices = get_shard_indices(self.trainer.datamodule.val_dataset.num_videos, shuffle_shards=False) self.min_video_index = min(local_shard_indices) self.max_video_index = max(local_shard_indices) self.predictions = initialize_predictions(self.trainer.datamodule.val_dataset, self.max_video_index, self.min_video_index, self.device) return def on_test_start(self) -> None: local_shard_indices = get_shard_indices(self.trainer.datamodule.test_dataset.num_videos, shuffle_shards=False) self.min_video_index = min(local_shard_indices) self.max_video_index = max(local_shard_indices) self.predictions = initialize_predictions(self.trainer.datamodule.test_dataset, self.max_video_index, self.min_video_index, self.device) return def on_train_epoch_start(self) -> None: if self.freeze_trunk: self.trunk.eval() def forward(self, x: Tensor) -> Dict[(str, Any)]: h = self.trunk(x) class_preds = self.head_class(h) return {'class_preds': class_preds, 'h': h} @property def num_layers(self) -> int: 'Number of layers of the model.' return self.num_layers_class @property def num_layers_class(self) -> int: return (self.trunk.num_layers + self.head_class.num_layers) def get_param_layer_id(self, name: str) -> int: 'Get the layer id of the named parameter.\n\n Args:\n name: The name of the parameter.\n ' if name.startswith('trunk.'): return self.trunk.get_param_layer_id(name[len('trunk.'):]) elif name.startswith('head_class.'): return (self.trunk.num_layers + self.head_class.get_param_layer_id(name[len('head_class.'):])) else: raise NotImplementedError(f'{name} should not have been used.') def configure_optimizers(self) -> Dict[(Any, Any)]: (optimizer, scheduler) = hydra.utils.instantiate(self.optimizer_cfg, num_steps_per_epoch=self.optimizer_cfg.get('num_steps_per_epoch', self.training_steps_per_epoch), model=self) if (scheduler is None): return optimizer return {'optimizer': optimizer, 'lr_scheduler': scheduler} def shared_step(self, x: Tensor, inversed_temporal_masked_indices: (Tensor | None)): if self.freeze_trunk: with torch.no_grad(): if (inversed_temporal_masked_indices is not None): h: Tensor = self.trunk(x, inversed_temporal_masked_indices=inversed_temporal_masked_indices) else: h = self.trunk(x) elif (inversed_temporal_masked_indices is not None): h = self.trunk(x, inversed_temporal_masked_indices=inversed_temporal_masked_indices) else: h = self.trunk(x) if (h.ndim == 2): h = h.reshape(h.shape[0], 1, h.shape[1]) class_preds: Tensor = self.head_class(h) return class_preds def training_step(self, batch: Dict[(Any, Any)], batch_idx: int) -> Tensor: if (self.train_transform is not None): with torch.no_grad(): with torch.cuda.amp.autocast(enabled=False): batch = self.train_transform(batch) (x, labels, has_label, ignore_class) = (batch['input'], batch['labels'], batch['has_label'], batch['ignore_class']) inversed_temporal_masked_indices: (Tensor | None) = batch.get('inversed_temporal_masked_indices', None) class_preds = self.shared_step(x, inversed_temporal_masked_indices) loss = self.loss_fn(class_preds=class_preds, class_target=labels, ignore_class=ignore_class, has_label=has_label, mixup_weights=batch.get('mixup_weights', None), **self.loss_fn_args) self.log('train/loss', loss, prog_bar=True, on_step=True, on_epoch=True, sync_dist=False) return loss def validation_step(self, batch: Tensor, batch_idx: int) -> Tensor: if (self.val_transform is not None): with torch.no_grad(): with torch.cuda.amp.autocast(enabled=False): batch = self.val_transform(batch) (x, frames, labels, has_label, ignore_class, video_indexes, num_frames) = (batch['input'], batch['frame_indices'], batch['labels'], batch['has_label'], batch['ignore_class'], batch['video_index'], batch['num_frames']) class_preds = self.shared_step(x, inversed_temporal_masked_indices=None) loss = self.loss_fn(class_preds=class_preds, class_target=labels, ignore_class=ignore_class, has_label=has_label, mixup_weights=batch.get('mixup_weights', None), **self.loss_fn_args) class_preds = class_preds.sigmoid() kept_tensors = aggregate_and_filter_clips(class_preds, frames, num_frames, video_indexes, self.max_video_index, self.min_video_index) if (kept_tensors is not None): (class_preds, frames, num_frames, video_indexes) = kept_tensors add_clips_predictions(self.predictions, class_preds, frames, num_frames, video_indexes, self.remove_frames_predictions, self.merge_predictions_type) self.log('val/loss', loss, prog_bar=True, on_step=False, on_epoch=True, sync_dist=True) return loss def test_step(self, batch: Tensor, batch_idx: int) -> Tensor: if (self.test_transform is not None): with torch.no_grad(): with torch.cuda.amp.autocast(enabled=False): batch = self.test_transform(batch) (x, frames, num_frames, video_indexes) = (batch['input'], batch['frame_indices'], batch['num_frames'], batch['video_index']) class_preds = self.shared_step(x, inversed_temporal_masked_indices=None) class_preds = class_preds.sigmoid() kept_tensors = aggregate_and_filter_clips(class_preds, frames, num_frames, video_indexes, self.max_video_index, self.min_video_index) if (kept_tensors is not None): (class_preds, frames, num_frames, video_indexes) = kept_tensors add_clips_predictions(self.predictions, class_preds, frames, num_frames, video_indexes, self.remove_frames_predictions, self.merge_predictions_type) return 0 def _make_evaluation(self, predictions_path: (str | Path), logger: bool=False) -> None: pred = load_json(predictions_path) truth = load_json(self.evaluation_args.get('truth_path')) (maps, tolerances, header, rows) = compute_mAPs(truth, pred, self.evaluation_args['tolerances'], self.evaluation_args.get('plot_pr', False)) rank_zero_info(tabulate(rows, headers=header, floatfmt='0.2f')) rank_zero_info(f'Avg mAP (across tolerances): {(np.mean(maps) * 100):0.2f}') if logger: self.log('eval/Avg-mAP', (np.mean(maps) * 100), on_step=False, on_epoch=True, sync_dist=False) for (map, tolerance) in zip(maps, tolerances): self.log(f'eval/mAP@{tolerance}', (map * 100), on_step=False, on_epoch=True, sync_dist=False) def on_validation_epoch_end(self) -> None: if self.trainer.sanity_checking: return preds_path = (self.save_val_preds_path / str(self.trainer.current_epoch)) raw_preds_path = (self.save_val_preds_path / f'{self.trainer.current_epoch}_raw') save_raw_spotting_predictions(self.predictions, raw_preds_path, make_zip=False) if (dist.is_available() and dist.is_initialized()): dist.barrier() if (self.global_rank == 0): shutil.make_archive(str(raw_preds_path), 'zip', raw_preds_path) shutil.rmtree(raw_preds_path) predictions = load_raw_spotting_predictions((str(raw_preds_path) + '.zip'), list(range(self.trainer.datamodule.val_dataset.num_videos)), self.device) save_spotting_predictions(predictions, preds_path, self.trainer.datamodule.val_dataset, self.NMS_args) if (dist.is_available() and dist.is_initialized()): dist.barrier() self._make_evaluation((preds_path / f'predictions.json'), logger=True) return super().on_validation_epoch_end() def on_test_end(self) -> None: raw_preds_path = (self.save_test_preds_path.parent / f'{self.save_test_preds_path.name}_raw') save_raw_spotting_predictions(self.predictions, raw_preds_path, make_zip=False) if (dist.is_available() and dist.is_initialized()): dist.barrier() if (self.global_rank == 0): shutil.make_archive(str(raw_preds_path), 'zip', raw_preds_path) shutil.rmtree(raw_preds_path) predictions = load_raw_spotting_predictions((str(raw_preds_path) + '.zip'), list(range(self.trainer.datamodule.test_dataset.num_videos)), self.device) save_spotting_predictions(predictions, self.save_test_preds_path, self.trainer.datamodule.test_dataset, self.NMS_args) if (dist.is_available() and dist.is_initialized()): dist.barrier() self._make_evaluation((self.save_test_preds_path / f'predictions.json'), logger=False) return super().on_test_end()
class SupervisedModel(EztorchBaseModule): 'Supervised model.\n\n Args:\n model: Config to build a model.\n optimizer: Config to build optimizers and schedulers.\n train_transform: Config to perform transformation on train input.\n val_transform: Config to perform transformation on val input.\n test_transform: Config to perform transformation on test input.\n val_time_augmentation: Ensembling method for test time augmentation used at validation.\n test_time_augmentation: Ensembling method for test time augmentation used at test.\n ' def __init__(self, model: DictConfig, optimizer: DictConfig, train_transform: Optional[DictConfig]=None, val_transform: Optional[DictConfig]=None, test_transform: Optional[DictConfig]=None, val_time_augmentation: Optional[DictConfig]=None, test_time_augmentation: Optional[DictConfig]=None) -> None: super().__init__() self.save_hyperparameters() self.model = hydra.utils.instantiate(model) self.optimizer_cfg = optimizer self.train_transform = (hydra.utils.instantiate(train_transform) if (train_transform is not None) else None) self.val_transform = (hydra.utils.instantiate(val_transform) if (val_transform is not None) else None) self.test_transform = (hydra.utils.instantiate(test_transform) if (test_transform is not None) else None) self.val_time_augmentation = (get_test_time_augmentation_fn(**val_time_augmentation) if val_time_augmentation else None) self.test_time_augmentation = (get_test_time_augmentation_fn(**test_time_augmentation) if test_time_augmentation else None) @property def learnable_params(self) -> List[Parameter]: 'Learnable parameters of the model.' return list(self.model.parameters()) @property def training_steps_per_epoch(self) -> Optional[int]: 'Total training steps inferred from datamodule and devices.' if ((self.trainer.datamodule is not None) and (self.trainer.datamodule.train_num_samples > 0)): return (self.trainer.datamodule.train_num_samples // self.trainer.datamodule.train_global_batch_size) else: return None def on_fit_start(self) -> None: num_classes = self.trainer.datamodule.num_classes task = ('binary' if (num_classes <= 2) else 'multiclass') self.train_acc_1 = Accuracy(task=task, num_classes=num_classes, top_k=1).to(self.device) self.train_acc_5 = Accuracy(task=task, num_classes=num_classes, top_k=5).to(self.device) self.val_acc_1 = Accuracy(task=task, num_classes=num_classes, top_k=1).to(self.device) self.val_acc_5 = Accuracy(task=task, num_classes=num_classes, top_k=5).to(self.device) def on_test_start(self) -> None: num_classes = self.trainer.datamodule.num_classes task = ('binary' if (num_classes <= 2) else 'multiclass') self.test_acc_1 = Accuracy(task=task, num_classes=num_classes, top_k=1).to(self.device) self.test_acc_5 = Accuracy(task=task, num_classes=num_classes, top_k=5).to(self.device) def configure_optimizers(self) -> Dict[(Any, Any)]: (optimizer, scheduler) = hydra.utils.instantiate(self.optimizer_cfg, num_steps_per_epoch=self.optimizer_cfg.get('num_steps_per_epoch', self.training_steps_per_epoch), model=self) if (scheduler is None): return optimizer return {'optimizer': optimizer, 'lr_scheduler': scheduler} def forward(self, x: Tensor) -> Tensor: x = self.model(x) return x def training_step(self, batch: Tensor, batch_idx: int) -> Tensor: (x, targets) = (batch['input'], batch['label']) if (self.train_transform is not None): with torch.no_grad(): with torch.cuda.amp.autocast(enabled=False): x = self.train_transform(x) preds = self(x) loss = nn.functional.cross_entropy(preds, targets) acc_1 = self.train_acc_1(preds, targets) acc_5 = self.train_acc_5(preds, targets) self.log('train/loss', loss, on_epoch=True) self.log('train/acc_1', acc_1, on_epoch=True, prog_bar=True) self.log('train/acc_5', acc_5, on_epoch=True) return loss @property def num_layers(self) -> int: 'Number of layers of the model.' return self.model.num_layers def get_param_layer_id(self, name: str) -> int: 'Get the layer id of the named parameter.\n\n Args:\n name: The name of the parameter.\n ' if name.startswith('model.'): return self.model.get_param_layer_id(name[len('model.'):]) else: raise NotImplementedError(f'{name} should not have been used.') def validation_step(self, batch: Tensor, batch_idx: int) -> Tensor: if (self.val_time_augmentation is not None): (x, targets, idx) = (batch['input'], batch['label'], batch['idx']) if (self.val_transform is not None): with torch.no_grad(): with torch.cuda.amp.autocast(enabled=False): x = self.val_transform(x) preds = self(x) preds = preds.softmax((- 1)) (preds, targets, idx) = self.val_time_augmentation(preds, targets, idx) else: (x, targets) = (batch['input'], batch['label']) if (self.val_transform is not None): with torch.no_grad(): with torch.cuda.amp.autocast(enabled=False): x = self.val_transform(x) preds = self(x) loss = nn.functional.cross_entropy(preds, targets) self.val_acc_1(preds, targets) self.val_acc_5(preds, targets) self.log('val/loss', loss) self.log('val/acc_1', self.val_acc_1, prog_bar=True) self.log('val/acc_5', self.val_acc_5) return loss def test_step(self, batch: Tensor, batch_idx: int) -> Tensor: if (self.test_time_augmentation is not None): (x, targets, idx) = (batch['input'], batch['label'], batch['idx']) if (self.test_transform is not None): with torch.no_grad(): with torch.cuda.amp.autocast(enabled=False): x = self.test_transform(x) preds = self(x) preds = preds.softmax((- 1)) (preds, targets, idx) = self.test_time_augmentation(preds, targets, idx) else: (x, targets) = (batch['input'], batch['label']) if (self.test_transform is not None): with torch.no_grad(): with torch.cuda.amp.autocast(enabled=False): x = self.test_transform(x) preds = self(x) loss = nn.functional.cross_entropy(preds, targets) self.test_acc_1(preds, targets) self.test_acc_5(preds, targets) self.log('test/loss', loss) self.log('test/acc_1', self.test_acc_1, prog_bar=True) self.log('test/acc_5', self.test_acc_5) return loss
def create_r2plus1d(*, input_channel: int=3, model_depth: int=50, model_num_class: int=400, dropout_rate: float=0.0, norm: Callable=BatchNorm3d, norm_eps: float=1e-05, norm_momentum: float=0.1, activation: Callable=ReLU, stem_dim_out: int=64, stem_conv_kernel_size: Tuple[int]=(1, 7, 7), stem_conv_stride: Tuple[int]=(1, 2, 2), stage_conv_a_kernel_size: Tuple[Tuple[int]]=((1, 1, 1), (1, 1, 1), (1, 1, 1), (1, 1, 1)), stage_conv_b_kernel_size: Tuple[Tuple[int]]=((3, 3, 3), (3, 3, 3), (3, 3, 3), (3, 3, 3)), stage_conv_b_num_groups: Tuple[int]=(1, 1, 1, 1), stage_conv_b_dilation: Tuple[Tuple[int]]=((1, 1, 1), (1, 1, 1), (1, 1, 1), (1, 1, 1)), stage_spatial_stride: Tuple[int]=(2, 2, 2, 2), stage_temporal_stride: Tuple[int]=(1, 1, 2, 2), stage_bottleneck: Tuple[Callable]=(create_2plus1d_bottleneck_block, create_2plus1d_bottleneck_block, create_2plus1d_bottleneck_block, create_2plus1d_bottleneck_block), head: Callable=create_res_basic_head, head_pool: Callable=AvgPool3d, head_pool_kernel_size: Tuple[int]=(4, 7, 7), head_output_size: Tuple[int]=(1, 1, 1), head_activation: Callable=Softmax, head_output_with_global_average: bool=True) -> Module: 'Build the R(2+1)D network from:: A closer look at spatiotemporal convolutions for action recognition. Du\n Tran, Heng Wang, Lorenzo Torresani, Jamie Ray, Yann LeCun, Manohar Paluri. CVPR 2018.\n\n R(2+1)D follows the ResNet style architecture including three parts: Stem,\n Stages and Head. The three parts are assembled in the following order:\n\n ::\n\n Input\n ↓\n Stem\n ↓\n Stage 1\n ↓\n .\n .\n .\n ↓\n Stage N\n ↓\n Head\n\n Args:\n\n input_channel: Number of channels for the input video clip.\n\n model_depth: The depth of the resnet.\n model_num_class: The number of classes for the video dataset.\n dropout_rate: Dropout rate.\n\n norm: A callable that constructs normalization layer.\n norm_eps: Normalization epsilon.\n norm_momentum: Normalization momentum.\n\n activation: A callable that constructs activation layer.\n\n stem_dim_out: Output channel size for stem.\n stem_conv_kernel_size: Convolutional kernel size(s) of stem.\n stem_conv_stride: Convolutional stride size(s) of stem.\n\n stage_conv_a_kernel_size: Convolutional kernel size(s) for conv_a.\n stage_conv_b_kernel_size: Convolutional kernel size(s) for conv_b.\n stage_conv_b_num_groups: Number of groups for groupwise convolution\n for conv_b. 1 for ResNet, and larger than 1 for ResNeXt.\n stage_conv_b_dilation: Dilation for 3D convolution for conv_b.\n stage_spatial_stride: The spatial stride for each stage.\n stage_temporal_stride: The temporal stride for each stage.\n stage_bottleneck: A callable that constructs bottleneck block layer\n for each stage. Examples include: :func:`create_bottleneck_block`,\n :func:`create_2plus1d_bottleneck_block`.\n\n head_pool: A callable that constructs resnet head pooling layer.\n head_pool_kernel_size: The pooling kernel size.\n head_output_size: The size of output tensor for head.\n head_activation: A callable that constructs activation layer.\n head_output_with_global_average: If ``True``, perform global averaging on\n the head output.\n\n Returns:\n Basic resnet.\n ' torch._C._log_api_usage_once('PYTORCHVIDEO.model.create_r2plus1d') _MODEL_STAGE_DEPTH = {50: (3, 4, 6, 3), 101: (3, 4, 23, 3), 152: (3, 8, 36, 3)} assert (model_depth in _MODEL_STAGE_DEPTH.keys()), f'{model_depth} is not in {_MODEL_STAGE_DEPTH.keys()}' stage_depths = _MODEL_STAGE_DEPTH[model_depth] blocks = [] stem = create_res_basic_stem(in_channels=input_channel, out_channels=stem_dim_out, conv_kernel_size=stem_conv_kernel_size, conv_stride=stem_conv_stride, conv_padding=[(size // 2) for size in stem_conv_kernel_size], pool=None, norm=norm, activation=activation) blocks.append(stem) stage_dim_in = stem_dim_out stage_dim_out = (stage_dim_in * 4) for idx in range(len(stage_depths)): stage_dim_inner = (stage_dim_out // 4) depth = stage_depths[idx] stage_conv_b_stride = (stage_temporal_stride[idx], stage_spatial_stride[idx], stage_spatial_stride[idx]) stage = create_res_stage(depth=depth, dim_in=stage_dim_in, dim_inner=stage_dim_inner, dim_out=stage_dim_out, bottleneck=stage_bottleneck[idx], conv_a_kernel_size=stage_conv_a_kernel_size[idx], conv_a_stride=[1, 1, 1], conv_a_padding=[(size // 2) for size in stage_conv_a_kernel_size[idx]], conv_b_kernel_size=stage_conv_b_kernel_size[idx], conv_b_stride=stage_conv_b_stride, conv_b_padding=[(size // 2) for size in stage_conv_b_kernel_size[idx]], conv_b_num_groups=stage_conv_b_num_groups[idx], conv_b_dilation=stage_conv_b_dilation[idx], norm=norm, activation=activation) blocks.append(stage) stage_dim_in = stage_dim_out stage_dim_out = (stage_dim_out * 2) if (head is not None): head = head(in_features=stage_dim_in, out_features=model_num_class, pool=head_pool, output_size=head_output_size, pool_kernel_size=head_pool_kernel_size, dropout_rate=dropout_rate, activation=head_activation, output_with_global_average=head_output_with_global_average) blocks.append(head) return Net(blocks=ModuleList(blocks))
class LargeR2Plus1dStem(Sequential): 'R(2+1)D stem is different than the default one as it uses separated 3D convolution.' def __init__(self) -> None: super().__init__(Conv3d(3, 83, kernel_size=(1, 7, 7), stride=(1, 2, 2), padding=(0, 3, 3), bias=False), BatchNorm3d(83), ReLU(inplace=True), Conv3d(83, 64, kernel_size=(3, 1, 1), stride=(1, 1, 1), padding=(1, 0, 0), bias=False), BatchNorm3d(64), ReLU(inplace=True))
def create_r2plus1d_18(downsample: bool=True, num_classes: int=101, layers: List[int]=[1, 1, 1, 1], progress: bool=True, pretrained: bool=False, stem: Union[(str, Module)]=LargeR2Plus1dStem, **kwargs) -> Module: 'Build R2+1D_18 from torchvision for video.\n\n Args:\n num_classes: If not :math:`0`, replace the last fully connected layer with ``num_classes`` output, if :math:`0` replace by identity.\n pretrained: If ``True``, returns a model pre-trained on ImageNet.\n progress: If ``True``, displays a progress bar of the download to stderr\n layers: Number of layers per block.\n stem: Stem to use for input.\n **kwargs: arguments specific to torchvision constructors for ResNet.\n\n Returns:\n Basic resnet.\n ' if (type(stem) is str): stem = _STEMS[stem] if downsample: model = R2Plus1DDownSample(stem(), layers) model.num_features = 512 else: model = _video_resnet('r2plus1d_18', pretrained, progress, block=BasicBlock, conv_makers=([Conv2Plus1D] * 4), layers=layers, stem=stem, **kwargs) model.num_features = model.inplanes if (num_classes == 0): model.fc = Identity() else: model.fc = Linear(model.fc.in_features, num_classes) return model
class SpatioTemporalConv(nn.Module): 'Applies a factored 3D convolution over an input signal composed of several input planes with distinct\n spatial and time axes, by performing a 2D convolution over the spatial axes to an intermediate subspace,\n followed by a 1D convolution over the time axis to produce the final output.\n\n Args:\n in_chans: Number of channels in the input tensor\n out_channels: Number of channels produced by the convolution\n kernel_size: Size of the convolving kernel\n stride: Stride of the convolution.\n padding: Zero-padding added to the sides of the input during their respective convolutions.\n bias: If ``True``, adds a learnable bias to the output.\n ' def __init__(self, in_chans, out_channels, kernel_size, stride=1, padding=0, bias=False, first_conv=False): super().__init__() kernel_size = _triple(kernel_size) stride = _triple(stride) padding = _triple(padding) spatial_kernel_size = (1, kernel_size[1], kernel_size[2]) spatial_stride = (1, stride[1], stride[2]) spatial_padding = (0, padding[1], padding[2]) temporal_kernel_size = (kernel_size[0], 1, 1) temporal_stride = (stride[0], 1, 1) temporal_padding = (padding[0], 0, 0) intermed_channels = int(math.floor((((((kernel_size[0] * kernel_size[1]) * kernel_size[2]) * in_chans) * out_channels) / (((kernel_size[1] * kernel_size[2]) * in_chans) + (kernel_size[0] * out_channels))))) self.spatial_conv = nn.Conv3d(in_chans, intermed_channels, spatial_kernel_size, stride=spatial_stride, padding=spatial_padding, bias=bias) self.bn = nn.BatchNorm3d(intermed_channels) self.relu = nn.ReLU() self.temporal_conv = nn.Conv3d(intermed_channels, out_channels, temporal_kernel_size, stride=temporal_stride, padding=temporal_padding, bias=bias) def forward(self, x): x = self.relu(self.bn(self.spatial_conv(x))) x = self.temporal_conv(x) return x
class SpatioTemporalResBlock(nn.Module): 'Single block for the ResNet network.\n\n Uses SpatioTemporalConv in\n the standard ResNet block layout (conv->batchnorm->ReLU->conv->batchnorm->sum->ReLU)\n Args:\n in_chans: Number of channels in the input tensor.\n out_channels: Number of channels in the output produced by the block.\n kernel_size: Size of the convolving kernels.\n downsample: If ``True``, the output size is to be smaller than the input.\n ' def __init__(self, in_chans, out_channels, kernel_size, downsample=False): super().__init__() self.downsample = downsample padding = (kernel_size // 2) if self.downsample: self.downsampleconv = SpatioTemporalConv(in_chans, out_channels, 1, stride=2) self.downsamplebn = nn.BatchNorm3d(out_channels) self.conv1 = SpatioTemporalConv(in_chans, out_channels, kernel_size, padding=padding, stride=2) else: self.conv1 = SpatioTemporalConv(in_chans, out_channels, kernel_size, padding=padding) self.bn1 = nn.BatchNorm3d(out_channels) self.relu1 = nn.ReLU() self.conv2 = SpatioTemporalConv(out_channels, out_channels, kernel_size, padding=padding) self.bn2 = nn.BatchNorm3d(out_channels) self.outrelu = nn.ReLU() def forward(self, x): res = self.relu1(self.bn1(self.conv1(x))) res = self.bn2(self.conv2(res)) if self.downsample: x = self.downsamplebn(self.downsampleconv(x)) return self.outrelu((x + res))
class SpatioTemporalResLayer(nn.Module): 'Forms a single layer of the ResNet network, with a number of repeating\n blocks of same output size stacked on top of each other\n Args:\n in_chans: Number of channels in the input tensor.\n out_channels: Number of channels in the output produced by the layer.\n kernel_size: Size of the convolving kernels.\n layer_size: Number of blocks to be stacked to form the layer\n block_type: Type of block that is to be used to form the layer.\n downsample: If ``True``, the first block in layer will implement downsampling.\n ' def __init__(self, in_chans, out_channels, kernel_size, layer_size, block_type=SpatioTemporalResBlock, downsample=False): super().__init__() self.block1 = block_type(in_chans, out_channels, kernel_size, downsample) self.blocks = nn.ModuleList([]) for i in range((layer_size - 1)): self.blocks += [block_type(out_channels, out_channels, kernel_size)] def forward(self, x): x = self.block1(x) for block in self.blocks: x = block(x) return x
class R2Plus1DDownSample(nn.Module): 'Forms the overall ResNet feature extractor by initializng 5 layers, with the number of blocks in each layer\n set by layer_sizes, and by performing a global average pool at the end producing a 512-dimensional vector for\n each element in the batch.\n\n Args:\n stem: stem used or input\n layer_sizes: An iterable containing the number of blocks in each layer\n block_type: Type of block that is to be used to form the layers. Default: SpatioTemporalResBlock.\n num_classes: number of classes for classification.\n ' def __init__(self, stem: Module, layer_sizes: List[int]=[1, 1, 1, 1], block_type: Module=SpatioTemporalResBlock, num_classes: int=101): super().__init__() self.stem = stem self.layer1 = SpatioTemporalResLayer(64, 64, 3, layer_sizes[0], block_type=block_type) self.layer2 = SpatioTemporalResLayer(64, 128, 3, layer_sizes[1], block_type=block_type, downsample=True) self.layer3 = SpatioTemporalResLayer(128, 256, 3, layer_sizes[2], block_type=block_type, downsample=True) self.layer4 = SpatioTemporalResLayer(256, 512, 3, layer_sizes[3], block_type=block_type, downsample=True) self.avgpool = nn.AdaptiveAvgPool3d((1, 1, 1)) self.fc = nn.Linear(512, num_classes) self._initialize_weights def _initialize_weights(self) -> None: for m in self.modules(): if isinstance(m, nn.Conv3d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if (m.bias is not None): nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm3d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias, 0) def forward(self, x): x = self.stem(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = x.view((- 1), 512) x = self.fc(x) return x
def create_resnet(name: str, num_classes: int=1000, progress: bool=True, pretrained: bool=False, small_input: bool=False, **kwargs) -> Module: 'Build ResNet from torchvision for image.\n\n Args:\n name: name of the resnet model (such as resnet18).\n num_classes: If not :math:`0`, replace the last fully connected layer with num_classes output, if :math:`0` replace by identity.\n pretrained: If ``True``, returns a model pre-trained on ImageNet.\n progress: If ``True``, displays a progress bar of the download to stderr.\n small_input: If ``True``, replace the first conv2d for small images and replace first maxpool by identity.\n **kwargs: arguments specific to torchvision constructors for ResNet.\n\n Returns:\n Basic resnet.\n ' assert (name in _ResNets), f'ResNet {name} is not supported please add the corresponding entry in _ResNets directory or provide the right name.' func = _ResNets[name] model = func(pretrained=pretrained, progress=progress, **kwargs) if (num_classes == 0): model.fc = Identity() else: model.fc = Linear(model.fc.in_features, num_classes) if small_input: model.conv1 = Conv2d(3, model.conv1.out_channels, kernel_size=3, stride=1, padding=1, bias=False) model.maxpool = Identity() model.num_features = model.inplanes return model
def create_basic_block(*, dim_in: int, dim_out: int, conv_a_kernel_size: Tuple[int]=(3, 3, 3), conv_a_stride: Tuple[int]=(2, 2, 2), conv_a_padding: Tuple[int]=(1, 1, 1), conv_a: Callable=Conv3d, conv_b_kernel_size: Tuple[int]=(3, 3, 3), conv_b_stride: Tuple[int]=(2, 2, 2), conv_b_padding: Tuple[int]=(1, 1, 1), conv_b: Callable=Conv3d, norm: Callable=BatchNorm3d, norm_eps: float=1e-05, norm_momentum: float=0.1, activation: Callable=ReLU) -> Module: '\n Basic block: a sequence of spatiotemporal Convolution, Normalization,\n and Activations repeated in the following order:\n\n ::\n\n Conv3d (conv_a)\n ↓\n Normalization (norm_a)\n ↓\n Activation (act_a)\n ↓\n Conv3d (conv_b)\n ↓\n Normalization (norm_b)\n\n\n Normalization examples include: ``BatchNorm3d`` and ``None`` (no normalization).\n Activation examples include: ``ReLU``, ``Softmax``, ``Sigmoid``, and ``None`` (no activation).\n\n Args:\n dim_in: Input channel size to the basicblock block.\n dim_inner: Intermediate channel size of the basicblock.\n dim_out: Output channel size of the basicblock.\n basicblock: A callable that constructs basicblock block layer.\n Examples include: :func:`create_basicblock_block`.\n conv_a_kernel_size: Convolutional kernel size(s) for ``conv_a``.\n conv_a_stride: Convolutional stride size(s) for ``conv_a``.\n conv_a_padding: Convolutional padding(s) for ``conv_a``.\n conv_a: A callable that constructs the ``conv_a`` conv layer, examples\n include ``Conv3d``, ``OctaveConv``, etc\n conv_b_kernel_size: Convolutional kernel size(s) for ``conv_b``.\n conv_b_stride: Convolutional stride size(s) for ``conv_b``.\n conv_b_padding: Convolutional padding(s) for ``conv_b``.\n conv_b: A callable that constructs the ``conv_b`` conv layer, examples\n include ``Conv3d``, ``OctaveConv``, etc\n\n norm: A callable that constructs normalization layer, examples\n include ``BatchNorm3d``, ``None`` (not performing normalization).\n norm_eps: Normalization epsilon.\n norm_momentum: Normalization momentum.\n\n activation_basicblock: A callable that constructs activation layer, examples\n include: ``ReLU``, ``Softmax``, ``Sigmoid``, and ``None`` (not performing\n activation).\n activation_block: A callable that constructs activation layer used\n at the end of the block. Examples include: ``ReLU``, ``Softmax``, ``Sigmoid``,\n and None (not performing activation).\n\n Returns:\n Resnet basicblock block.\n ' conv_a = conv_a(in_channels=dim_in, out_channels=dim_out, kernel_size=conv_a_kernel_size, stride=conv_a_stride, padding=conv_a_padding, bias=False) norm_a = (None if (norm is None) else norm(num_features=dim_out, eps=norm_eps, momentum=norm_momentum)) act_a = (None if (activation is None) else activation()) conv_b = conv_b(in_channels=dim_out, out_channels=dim_out, kernel_size=conv_b_kernel_size, stride=conv_b_stride, padding=conv_b_padding, bias=False) norm_b = (None if (norm is None) else norm(num_features=dim_out, eps=norm_eps, momentum=norm_momentum)) return BasicBlock(conv_a=conv_a, norm_a=norm_a, act_a=act_a, conv_b=conv_b, norm_b=norm_b)
def create_res_basic_block(*, dim_in: int, dim_out: int, basicblock: Callable, use_shortcut: bool=False, branch_fusion: Callable=_trivial_sum, conv_a_kernel_size: Tuple[int]=(3, 3, 3), conv_a_stride: Tuple[int]=(2, 2, 2), conv_a_padding: Tuple[int]=(1, 1, 1), conv_a: Callable=Conv3d, conv_b_kernel_size: Tuple[int]=(3, 3, 3), conv_b_stride: Tuple[int]=(2, 2, 2), conv_b_padding: Tuple[int]=(1, 1, 1), conv_b: Callable=Conv3d, conv_skip: Callable=Conv3d, norm: Callable=BatchNorm3d, norm_eps: float=1e-05, norm_momentum: float=0.1, activation_basicblock: Callable=ReLU, activation_block: Callable=ReLU) -> Module: '\n Residual block. Performs a summation between an identity shortcut in branch1 and a\n main block in branch2. When the input and output dimensions are different, a\n convolution followed by a normalization will be performed.\n\n ::\n\n\n Input\n |-------+\n ↓ |\n Block |\n ↓ |\n Summation ←-+\n ↓\n Activation\n\n Normalization examples include: ``BatchNorm3d`` and ``None`` (no normalization).\n Activation examples include: ``ReLU``, ``Softmax``, ``Sigmoid``, and ``None`` (no activation).\n Transform examples include: :class:`BottleneckBlock`.\n\n Args:\n dim_in: Input channel size to the bottleneck block.\n dim_out: Output channel size of the bottleneck.\n bottleneck: A callable that constructs bottleneck block layer.\n Examples include: create_bottleneck_block.\n use_shortcut: If ``True``, use conv and norm layers in skip connection.\n branch_fusion: A callable that constructs summation layer.\n Examples include: lambda x, y: x + y, OctaveSum.\n\n conv_a_kernel_size: Convolutional kernel size(s) for ``conv_a``.\n conv_a_stride: Convolutional stride size(s) for ``conv_a``.\n conv_a_padding: Convolutional padding(s) for ``conv_a``.\n conv_a: A callable that constructs the ``conv_a`` conv layer, examples\n include ``Conv3d``, ``OctaveConv``, etc\n conv_b_kernel_size: Convolutional kernel size(s) for ``conv_b``.\n conv_b_stride: Convolutional stride size(s) for ``conv_b``.\n conv_b_padding: Convolutional padding(s) for ``conv_b``.\n conv_b: A callable that constructs the ``conv_b`` conv layer, examples\n include ``Conv3d``, ``OctaveConv``, etc\n conv_skip: A callable that constructs the ``conv_skip`` conv layer,\n examples include ``Conv3d``, ``OctaveConv``, etc\n\n norm: A callable that constructs normalization layer. Examples\n include BatchNorm3d, None (not performing normalization).\n norm_eps: Normalization epsilon.\n norm_momentum: Normalization momentum.\n\n activation_basicblock: A callable that constructs activation layer in\n basicblock. Examples include: ``ReLU``, ``Softmax``, ``Sigmoid``, and ``None``\n (not performing activation).\n activation_block: A callable that constructs activation layer used\n at the end of the block. Examples include: ``ReLU``, ``Softmax``, ``Sigmoid``,\n and ``None`` (not performing activation).\n\n Returns:\n Resnet basic block layer.\n ' branch1_conv_stride = tuple(map(np.prod, zip(conv_a_stride, conv_b_stride))) norm_model = None if (use_shortcut or ((norm is not None) and ((dim_in != dim_out) or (np.prod(branch1_conv_stride) != 1)))): norm_model = norm(num_features=dim_out, eps=norm_eps, momentum=norm_momentum) return ResBlock(branch1_conv=(conv_skip(dim_in, dim_out, kernel_size=(1, 1, 1), stride=branch1_conv_stride, bias=False) if (((dim_in != dim_out) or (np.prod(branch1_conv_stride) != 1)) or use_shortcut) else None), branch1_norm=norm_model, branch2=basicblock(dim_in=dim_in, dim_out=dim_out, conv_a_kernel_size=conv_a_kernel_size, conv_a_stride=conv_a_stride, conv_a_padding=conv_a_padding, conv_a=conv_a, conv_b_kernel_size=conv_b_kernel_size, conv_b_stride=conv_b_stride, conv_b_padding=conv_b_padding, conv_b=conv_b, norm=norm, norm_eps=norm_eps, norm_momentum=norm_momentum, activation=activation_basicblock), activation=(None if (activation_block is None) else activation_block()), branch_fusion=branch_fusion)
def create_res_basic_stage(*, depth: int, dim_in: int, dim_out: int, basicblock: Callable, conv_a_kernel_size: Union[(Tuple[int], List[Tuple[int]])]=(3, 3, 3), conv_a_stride: Tuple[int]=(2, 2, 2), conv_a_padding: Union[(Tuple[int], List[Tuple[int]])]=(1, 1, 1), conv_a: Callable=Conv3d, conv_b_kernel_size: Tuple[int]=(3, 3, 3), conv_b_stride: Tuple[int]=(2, 2, 2), conv_b_padding: Tuple[int]=(1, 1, 1), conv_b: Callable=Conv3d, norm: Callable=BatchNorm3d, norm_eps: float=1e-05, norm_momentum: float=0.1, activation: Callable=ReLU) -> Module: '\n Create Residual Stage, which composes sequential blocks that make up a ResNet. These\n blocks could be, for example, Residual blocks, Non-Local layers, or\n Squeeze-Excitation layers.\n\n ::\n\n\n Input\n ↓\n ResBlock\n ↓\n .\n .\n .\n ↓\n ResBlock\n\n Normalization examples include: ``BatchNorm3d`` and ``None`` (no normalization).\n Activation examples include: ``ReLU``, ``Softmax``, ``Sigmoid``, and ``None`` (no activation).\n basicblock examples include: :func:`create_basicblock_block`.\n\n Args:\n depth: Number of blocks to create.\n\n dim_in: Input channel size to the basicblock block.\n dim_out: Output channel size of the basicblock.\n basicblock: A callable that constructs basicblock block layer.\n Examples include: :func:`create_basicblock_block`.\n\n conv_a_kernel_size: Convolutional kernel size(s)\n for conv_a. If ``conv_a_kernel_size`` is a tuple, use it for all blocks in\n the stage. If ``conv_a_kernel_size`` is a list of tuple, the kernel sizes\n will be repeated until having same length of depth in the stage. For\n example, for conv_a_kernel_size = [(3, 1, 1), (1, 1, 1)], the kernel\n size for the first 6 blocks would be [(3, 1, 1), (1, 1, 1), (3, 1, 1),\n (1, 1, 1), (3, 1, 1)].\n conv_a_stride: Convolutional stride size(s) for ``conv_a``.\n conv_a_padding: Convolutional padding(s) for\n ``conv_a``. If ``conv_a_padding`` is a tuple, use it for all blocks in\n the stage. If ``conv_a_padding`` is a list of tuple, the padding sizes\n will be repeated until having same length of depth in the stage.\n conv_a: A callable that constructs the conv_a conv layer, examples\n include Conv3d, OctaveConv, etc\n conv_b_kernel_size: Convolutional kernel size(s) for ``conv_b``.\n conv_b_stride: Convolutional stride size(s) for ``conv_b``.\n conv_b_padding: Convolutional padding(s) for ``conv_b``.\n conv_b: A callable that constructs the ``conv_b`` conv layer, examples\n include Conv3d, OctaveConv, etc\n\n norm: A callable that constructs normalization layer. Examples\n include ``BatchNorm3d``, and ``None`` (not performing normalization).\n norm_eps: Normalization epsilon.\n norm_momentum: Normalization momentum.\n\n activation: A callable that constructs activation layer. Examples\n include: ``ReLU``, ``Softmax``, ``Sigmoid``, and ``None`` (not performing\n activation).\n\n Returns:\n resnet basic stage layer.\n ' res_blocks = [] if isinstance(conv_a_kernel_size[0], int): conv_a_kernel_size = [conv_a_kernel_size] if isinstance(conv_a_padding[0], int): conv_a_padding = [conv_a_padding] conv_a_kernel_size = (conv_a_kernel_size * depth)[:depth] conv_a_padding = (conv_a_padding * depth)[:depth] for ind in range(depth): block = create_res_basic_block(dim_in=(dim_in if (ind == 0) else dim_out), dim_out=dim_out, basicblock=basicblock, conv_a_kernel_size=conv_a_kernel_size[ind], conv_a_stride=(conv_a_stride if (ind == 0) else (1, 1, 1)), conv_a_padding=conv_a_padding[ind], conv_a=conv_a, conv_b_kernel_size=conv_b_kernel_size, conv_b_stride=(conv_b_stride if (ind == 0) else (1, 1, 1)), conv_b_padding=conv_b_padding, conv_b=conv_b, norm=norm, norm_eps=norm_eps, norm_momentum=norm_momentum, activation_basicblock=activation, activation_block=activation) res_blocks.append(block) return ResStage(res_blocks=ModuleList(res_blocks))
def create_resnet3d_basic(*, input_channel: int=3, model_depth: int=50, model_num_class: int=400, dropout_rate: float=0.5, norm: Callable=BatchNorm3d, activation: Callable=ReLU, stem_activation: Optional[Callable]=ReLU, stem_dim_out: int=64, stem_conv_kernel_size: Tuple[int]=(1, 7, 7), stem_conv_stride: Tuple[int]=(1, 2, 2), stem_pool: Optional[Callable]=MaxPool3d, stem_pool_kernel_size: Tuple[int]=(1, 3, 3), stem_pool_stride: Tuple[int]=(1, 2, 2), stem: Optional[Callable]=create_res_basic_stem, stage1_pool: Callable=None, stage1_pool_kernel_size: Tuple[int]=(2, 1, 1), stage_conv_a_kernel_size: Union[(Tuple[int], Tuple[Tuple[int]])]=((1, 3, 3), (1, 3, 3), (3, 3, 3), (3, 3, 3)), stage_conv_b_kernel_size: Union[(Tuple[int], Tuple[Tuple[int]])]=((1, 3, 3), (1, 3, 3), (1, 3, 3), (1, 3, 3)), stage_spatial_h_stride: Tuple[int]=(1, 2, 2, 2), stage_spatial_w_stride: Tuple[int]=(1, 2, 2, 2), stage_temporal_stride: Tuple[int]=(1, 1, 1, 1), basicblock: Union[(Tuple[Callable], Callable)]=create_basic_block, head: Callable=create_res_basic_head, head_pool: Callable=AvgPool3d, head_pool_kernel_size: Tuple[int]=(4, 7, 7), head_output_size: Tuple[int]=(1, 1, 1), head_activation: Callable=None, head_output_with_global_average: bool=True) -> Module: '\n Build ResNet style models for video recognition. ResNet has three parts:\n Stem, Stages and Head. Stem is the first Convolution layer (Conv1) with an\n optional pooling layer. Stages are grouped residual blocks. There are usually\n multiple stages and each stage may include multiple residual blocks. Head\n may include pooling, dropout, a fully-connected layer and global spatial\n temporal averaging. The three parts are assembled in the following order:\n\n ::\n\n Input\n ↓\n Stem\n ↓\n Stage 1\n ↓\n .\n .\n .\n ↓\n Stage N\n ↓\n Head\n\n Args:\n\n input_channel: Number of channels for the input video clip.\n\n model_depth: The depth of the resnet. Options include: :math:`18, 50, 101, 152`.\n model_num_class: The number of classes for the video dataset.\n dropout_rate: Dropout rate.\n\n\n norm: A callable that constructs normalization layer.\n\n activation: A callable that constructs activation layer.\n\n stem_activation: A callable that constructs activation layer of stem.\n stem_dim_out: Output channel size to stem.\n stem_conv_kernel_size: Convolutional kernel size(s) of stem.\n stem_conv_stride: Convolutional stride size(s) of stem.\n stem_pool: A callable that constructs resnet head pooling layer.\n stem_pool_kernel_size: Pooling kernel size(s).\n stem_pool_stride: Pooling stride size(s).\n stem: A callable that constructs stem layer.\n Examples include: :func:`create_res_video_stem`.\n\n stage_conv_a_kernel_size: Convolutional kernel size(s) for ``conv_a``.\n stage_conv_b_kernel_size: Convolutional kernel size(s) for ``conv_b``.\n stage_spatial_h_stride: The spatial height stride for each stage.\n stage_spatial_w_stride: The spatial width stride for each stage.\n stage_temporal_stride: The temporal stride for each stage.\n basicblock: A callable that constructs basicblock block layer.\n Examples include: :func:`create_basicblock_block`.\n\n head: A callable that constructs the resnet-style head.\n Ex: create_res_basic_head\n head_pool: A callable that constructs resnet head pooling layer.\n head_pool_kernel_size: The pooling kernel size.\n head_output_size: The size of output tensor for head.\n head_activation: A callable that constructs activation layer.\n head_output_with_global_average: if ``True``, perform global averaging on\n the head output.\n\n Returns:\n Basic resnet.\n ' torch._C._log_api_usage_once('PYTORCHVIDEO.model.create_resnet3d_basic') assert (model_depth in _MODEL_STAGE_DEPTH.keys()), f'{model_depth} is not in {_MODEL_STAGE_DEPTH.keys()}' stage_depths = _MODEL_STAGE_DEPTH[model_depth] if isinstance(stage_conv_a_kernel_size[0], int): stage_conv_a_kernel_size = ((stage_conv_a_kernel_size,) * len(stage_depths)) if isinstance(stage_conv_b_kernel_size[0], int): stage_conv_b_kernel_size = ((stage_conv_b_kernel_size,) * len(stage_depths)) if isinstance(basicblock, Callable): basicblock = ([basicblock] * len(stage_depths)) blocks = [] stem = stem(in_channels=input_channel, out_channels=stem_dim_out, conv_kernel_size=stem_conv_kernel_size, conv_stride=stem_conv_stride, conv_padding=[(size // 2) for size in stem_conv_kernel_size], pool=stem_pool, pool_kernel_size=stem_pool_kernel_size, pool_stride=stem_pool_stride, pool_padding=[(size // 2) for size in stem_pool_kernel_size], norm=norm, activation=stem_activation) blocks.append(stem) stage_dim_in = stem_dim_out stage_dim_out = stage_dim_in for idx in range(len(stage_depths)): depth = stage_depths[idx] stage_conv_a_kernel = stage_conv_a_kernel_size[idx] stage_conv_a_stride = (stage_temporal_stride[idx], stage_spatial_h_stride[idx], stage_spatial_w_stride[idx]) stage_conv_a_padding = ([1, 1, 1] if (idx > 1) else [0, 1, 1]) stage_conv_b_kernel = stage_conv_b_kernel_size[idx] stage_conv_b_stride = (1, 1, 1) stage_conv_b_padding = [0, 1, 1] stage = create_res_basic_stage(depth=depth, dim_in=stage_dim_in, dim_out=stage_dim_out, basicblock=basicblock[idx], conv_a_kernel_size=stage_conv_a_kernel, conv_a_stride=stage_conv_a_stride, conv_a_padding=stage_conv_a_padding, conv_b_kernel_size=stage_conv_b_kernel, conv_b_stride=stage_conv_b_stride, conv_b_padding=stage_conv_b_padding, norm=norm, activation=activation) blocks.append(stage) stage_dim_in = stage_dim_out stage_dim_out = (stage_dim_out * 2) if ((idx == 0) and (stage1_pool is not None)): blocks.append(stage1_pool(kernel_size=stage1_pool_kernel_size, stride=stage1_pool_kernel_size, padding=(0, 0, 0))) if (head is not None): head = head(in_features=stage_dim_in, out_features=model_num_class, pool=head_pool, output_size=head_output_size, pool_kernel_size=head_pool_kernel_size, dropout_rate=dropout_rate, activation=head_activation, output_with_global_average=head_output_with_global_average) blocks.append(head) return Net(blocks=ModuleList(blocks))
class ResBlock(Module): '\n Residual block. Performs a summation between an identity shortcut in branch1 and a\n main block in branch2. When the input and output dimensions are different, a\n convolution followed by a normalization will be performed.\n\n ::\n\n\n Input\n |-------+\n ↓ |\n Block |\n ↓ |\n Summation ←-+\n ↓\n Activation\n\n The builder can be found in `create_res_block`.\n ' def __init__(self, branch1_conv: Module=None, branch1_norm: Module=None, branch2: Module=None, activation: Module=None, branch_fusion: Callable=None) -> Module: '\n Args:\n branch1_conv: Convolutional module in branch1.\n branch1_norm: Normalization module in branch1.\n branch2: Basicblock block module in branch2.\n activation: Activation module.\n branch_fusion: A callable or layer that combines branch1\n and branch2.\n ' super().__init__() set_attributes(self, locals()) assert (self.branch2 is not None) def forward(self, x) -> torch.Tensor: if (self.branch1_conv is None): x = self.branch_fusion(x, self.branch2(x)) else: shortcut = self.branch1_conv(x) if (self.branch1_norm is not None): shortcut = self.branch1_norm(shortcut) x = self.branch_fusion(shortcut, self.branch2(x)) if (self.activation is not None): x = self.activation(x) return x
class BasicBlock(Module): '\n basicblock block: a sequence of spatiotemporal Convolution, Normalization,\n and Activations repeated in the following order:\n\n ::\n\n\n Conv3d (conv_a)\n ↓\n Normalization (norm_a)\n ↓\n Activation (act_a)\n ↓\n Conv3d (conv_b)\n ↓\n Normalization (norm_b)\n\n\n The builder can be found in :func:`create_basicblock_block`.\n ' def __init__(self, *, conv_a: Module=None, norm_a: Module=None, act_a: Module=None, conv_b: Module=None, norm_b: Module=None) -> None: '\n Args:\n conv_a: Convolutional module.\n norm_a: Normalization module.\n act_a: Activation module.\n conv_b: Convolutional module.\n norm_b: Normalization module.\n ' super().__init__() set_attributes(self, locals()) assert all(((op is not None) for op in (self.conv_a, self.conv_b))) if (self.norm_b is not None): self.norm_b.block_final_bn = True def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.conv_a(x) if (self.norm_a is not None): x = self.norm_a(x) if (self.act_a is not None): x = self.act_a(x) x = self.conv_b(x) if (self.norm_b is not None): x = self.norm_b(x) return x
class Swin(nn.Module): 'Swin Transformer\n A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -\n https://arxiv.org/pdf/2103.14030\n Args:\n img_size (int | tuple(int)): Input image size. Default 224\n patch_size (int | tuple(int)): Patch size. Default: 4\n in_chans (int): Number of input image channels. Default: 3\n num_classes (int): Number of classes for classification head. Default: 1000\n embed_dim (int): Patch embedding dimension. Default: 96\n depths (tuple(int)): Depth of each Swin Transformer layer.\n num_heads (tuple(int)): Number of attention heads in different layers.\n head_dim (int, tuple(int)):\n window_size (int): Window size. Default: 7\n mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4\n qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True\n drop_rate (float): Dropout rate. Default: 0\n attn_drop_rate (float): Attention dropout rate. Default: 0\n drop_path_rate (float): Stochastic depth rate. Default: 0.1\n norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.\n ape (bool): If True, add absolute position embedding to the patch embedding. Default: False\n patch_norm (bool): If True, add normalization after patch embedding. Default: True\n ' def __init__(self, img_size: (int | Tuple[(int, int)])=224, patch_size: (int | Tuple[(int, int)])=4, in_chans: int=3, global_pool: str='avg', embed_dim: int=96, depths: Tuple[int]=(2, 2, 6, 2), num_heads: Tuple[int]=(3, 6, 12, 24), window_size: int=7, mlp_ratio: float=4.0, qkv_bias: bool=True, drop_rate: float=0.0, attn_drop_rate: float=0.0, drop_path_rate: float=0.1, norm_layer: (nn.Module | None)=None, ape: bool=False, patch_norm: bool=True, weight_init: str='', conv_type: str='Conv2d', tube_size: int=2, **kwargs): super().__init__() assert (global_pool in ('', 'avg')) self.global_pool = global_pool self.num_swin_layers = len(depths) self.embed_dim = embed_dim self.num_features = int((embed_dim * (2 ** (self.num_swin_layers - 1)))) norm_layer = (norm_layer or partial(nn.LayerNorm, eps=1e-06)) if (type(img_size) is int): img_size = _pair(img_size) if (type(patch_size) is int): patch_size = _pair(patch_size) self.img_size = img_size self.patch_size = patch_size self.depths = depths self.patch_grid = ((img_size[0] // patch_size[0]), (img_size[1] // patch_size[1])) patch_embed = get_patch_embed(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, norm_layer=(norm_layer if patch_norm else None), conv_type=conv_type, tube_size=tube_size) self.patch_embed = (patch_embed if (type(patch_embed) is not nn.Identity) else None) if (self.patch_embed is not None): num_patches = self.patch_embed.num_patches self.absolute_pos_embed = (nn.Parameter(torch.zeros(1, num_patches, embed_dim)) if (ape and (self.patch_embed is not None)) else None) self.pos_drop = (nn.Dropout(p=drop_rate) if (self.patch_embed is not None) else None) if (not isinstance(embed_dim, (tuple, list))): embed_dim = [int((embed_dim * (2 ** i))) for i in range(self.num_swin_layers)] embed_out_dim = (embed_dim[1:] + [None]) head_dim = to_ntuple(self.num_swin_layers)(None) window_size = to_ntuple(self.num_swin_layers)(window_size) mlp_ratio = to_ntuple(self.num_swin_layers)(mlp_ratio) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] layers = [] for i in range(self.num_swin_layers): layers += [BasicLayer(dim=embed_dim[i], out_dim=embed_out_dim[i], input_resolution=((self.patch_grid[0] // (2 ** i)), (self.patch_grid[1] // (2 ** i))), depth=depths[i], num_heads=num_heads[i], head_dim=head_dim[i], window_size=window_size[i], mlp_ratio=mlp_ratio[i], qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[sum(depths[:i]):sum(depths[:(i + 1)])], norm_layer=norm_layer, downsample=(PatchMerging if (i < (self.num_swin_layers - 1)) else None))] self.layers = nn.Sequential(*layers) self.norm = norm_layer(self.num_features) if (weight_init != 'skip'): self.init_weights(weight_init) @torch.jit.ignore def init_weights(self, mode=''): assert (mode in ('jax', 'jax_nlhb', 'moco', '')) if (self.absolute_pos_embed is not None): trunc_normal_(self.absolute_pos_embed, std=0.02) named_apply(get_init_weights_vit(mode, head_bias=0), self) @property def num_layers(self) -> int: 'Number of layers of the model.' sum_depths = sum(self.depths) return (sum_depths + (1 if (self.patch_embed is not None) else 0)) def get_param_layer_id(self, name: str) -> int: 'Get the layer id of the named parameter.\n\n Args:\n name: The name of the parameter.\n ' if name.startswith('patch_embed.'): return 0 elif (name in 'absolute_pos_embed'): return 0 elif name.startswith('layers.'): add = (1 if (self.patch_embed is not None) else 0) splitted_name = name.split('.') if (splitted_name[3] in ['norm', 'reduction']): return ((add + sum(self.depths[:(int(splitted_name[1]) + 1)])) - 1) (layer_id, block_id) = (int(splitted_name[1]), int(splitted_name[3])) return ((add + sum(self.depths[:layer_id])) + block_id) else: return (self.num_layers - 1) def forward(self, x): if (self.patch_embed is not None): x = self.patch_embed(x) if (self.absolute_pos_embed is not None): x = (x + self.absolute_pos_embed) x = self.pos_drop(x) x = self.layers(x) x = self.norm(x) if (self.global_pool == 'avg'): x = x.mean(dim=1) return x
def create_swin(img_size: (int | Tuple[(int, int)])=224, patch_size: (int | Tuple[(int, int)])=4, in_chans: int=3, global_pool: str='avg', embed_dim: int=96, depths: Tuple[int]=(2, 2, 6, 2), num_heads: Tuple[int]=(3, 6, 12, 24), window_size: int=7, mlp_ratio: float=4.0, qkv_bias: bool=True, drop_rate: float=0.0, attn_drop_rate: float=0.0, drop_path_rate: float=0.1, norm_layer: nn.Module=None, ape: bool=False, patch_norm: bool=True, weight_init: str='', conv_type: str='Conv2d', tube_size: int=2, pretrain_pth: (str | None)=None, **kwargs) -> Swin: 'Instantiate `Swin`\n\n Args:\n img_size: input image size.\n patch_size: patch size.\n in_chans: number of input channels.\n global_pool: type of global pooling for final sequence.\n embed_dim: embedding dimension.\n depth: depth of transformer.\n num_heads: number of attention heads.\n mlp_ratio: ratio of mlp hidden dim to embedding dim.\n qkv_bias: enable bias for qkv if True.\n init_values: layer-scale init values.\n class_token: use class token.\n drop_rate: dropout rate\n attn_drop_rate: attention dropout rate\n drop_path_rate: stochastic depth rate\n weight_init: weight init scheme\n embed_layer: patch embedding layer\n norm_layer: normalization layer\n act_layer: MLP activation layer\n block_fn: Block layer\n has_multi_res: Whether the model will receive crops at different resolution\n pretrain_pth: Checkpoint to load pretrained ViT.\n conv_type: Type of convolution used for the patch embedder.\n tube_size: Tube size used in case the patch embedder uses a convolution 3D.\n\n Returns:\n The ViT model\n ' model = Swin(img_size=img_size, patch_size=patch_size, in_chans=in_chans, global_pool=global_pool, embed_dim=embed_dim, depths=depths, num_heads=num_heads, window_size=window_size, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, drop_path_rate=drop_path_rate, norm_layer=norm_layer, ape=ape, patch_norm=patch_norm, weight_init=weight_init, conv_type=conv_type, tube_size=tube_size, **kwargs) if (pretrain_pth is not None): raise NotImplementedError('TODO') return model
def create_swin_tiny(*args, **kwargs) -> Swin: return create_swin(*args, patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), **kwargs)
def create_swin_small(*args, **kwargs) -> Swin: return create_swin(*args, patch_size=4, window_size=7, embed_dim=96, depths=(2, 2, 18, 2), num_heads=(3, 6, 12, 24), **kwargs)
def create_swin_base(*args, **kwargs) -> Swin: return create_swin(*args, patch_size=4, window_size=7, embed_dim=128, depths=(2, 2, 18, 2), num_heads=(4, 8, 16, 32), **kwargs)
@torch.no_grad() def constant_init_(tensor, constant_value=0): nn.init.constant_(tensor, constant_value)
@torch.no_grad() def kaiming_init_(tensor, a=0, mode='fan_out', nonlinearity='relu', distribution='normal'): assert (distribution in ['uniform', 'normal']) if (distribution == 'uniform'): nn.init.kaiming_uniform_(tensor, a=a, mode=mode, nonlinearity=nonlinearity) else: nn.init.kaiming_normal_(tensor, a=a, mode=mode, nonlinearity=nonlinearity)
class PatchEmbed(nn.Module): 'Images to Patch Embedding.\n\n Args:\n img_size: Size of input image.\n patch_size: Size of one patch.\n tube_size: Size of temporal field of one 3D patch.\n in_chans: Channel num of input features.\n embed_dim: Dimensions of embedding.\n conv_type: Type for convolution layer.\n ' def __init__(self, img_size: (int | Tuple[(int, int)])=224, patch_size: (int | Tuple[(int, int)])=16, tube_size: int=2, in_chans: int=3, embed_dim: int=768, bias: bool=True, conv_type: str='Conv2d', norm_layer: (nn.Module | None)=None): super().__init__() if (type(img_size) is int): img_size = _pair(img_size) if (type(patch_size) is int): patch_size = _pair(patch_size) self.img_size = img_size self.patch_size = patch_size num_patches = ((self.img_size[1] // self.patch_size[1]) * (self.img_size[0] // self.patch_size[0])) assert ((((num_patches * self.patch_size[0]) * self.patch_size[1]) == (self.img_size[0] * self.img_size[1])), 'The image size H*W must be divisible by patch size') self.num_patches = num_patches if (conv_type == 'Conv2d'): self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size, bias=bias) elif (conv_type == 'Conv3d'): self.proj = nn.Conv3d(in_chans, embed_dim, kernel_size=(tube_size, patch_size[0], patch_size[1]), stride=(tube_size, patch_size[0], patch_size[1]), bias=bias) else: raise TypeError(f'Unsupported conv layer type {conv_type}') self.norm = (norm_layer(embed_dim) if norm_layer else nn.Identity()) def init_weights(self): if (hasattr(self.proj, 'weight') and (self.proj.weight is not None)): kaiming_init_(self.proj.weight, mode='fan_in', nonlinearity='relu') if (hasattr(self.proj, 'bias') and (self.proj.bias is not None)): constant_init_(self.proj.bias, constant_value=0) def forward(self, x: torch.Tensor) -> torch.Tensor: layer_type = type(self.proj) if (layer_type == nn.Conv3d): x = self.proj(x) x = rearrange(x, 'b c t h w -> (b t) (h w) c') elif (layer_type == nn.Conv2d): if (x.ndim == 5): x = rearrange(x, 'b c t h w -> (b t) c h w') x = self.proj(x) x = rearrange(x, 'b c h w -> b (h w) c') else: x = self.proj(x) x = rearrange(x, 'b c h w -> b (h w) c') else: raise TypeError(f'Unsupported conv layer type {layer_type}') x = self.norm(x) return x
def get_patch_embed(**kwargs) -> nn.Module: if (kwargs['conv_type'] == 'identity'): return nn.Identity() return PatchEmbed(**kwargs)
class VideoHeadModel(Module): 'A general purpose model that handles an encoder and its head.\n\n Args:\n model: A model that precedes the head and is supposed to have initialized weights. Ex: stem + stages.\n head: A network head.\n ' def __init__(self, model: Module, head: Module): super().__init__() self.model = model self.head = head def forward(self, x: Tensor): x = self.model(x) x = self.head(x) return x
def create_video_head_model(model: DictConfig, head: DictConfig): 'Build a video model.\n\n Args:\n model: Config for the model.\n head: Config for the head.\n ' model = hydra.utils.instantiate(model) head = hydra.utils.instantiate(head) video_model = VideoHeadModel(model, head) return video_model
def create_x3d(*, input_channel: int=3, input_clip_length: int=13, input_crop_size: int=160, model_num_class: int=400, dropout_rate: float=0.5, width_factor: float=2.0, depth_factor: float=2.2, norm: Callable=BatchNorm3d, norm_eps: float=1e-05, norm_momentum: float=0.1, activation: Callable=ReLU, stem_dim_in: int=12, stem_conv_kernel_size: Tuple[int]=(5, 3, 3), stem_conv_stride: Tuple[int]=(1, 2, 2), stage_conv_kernel_size: Tuple[Tuple[int]]=((3, 3, 3), (3, 3, 3), (3, 3, 3), (3, 3, 3)), stage_spatial_stride: Tuple[int]=(2, 2, 2, 2), stage_temporal_stride: Tuple[int]=(1, 1, 1, 1), bottleneck: Callable=create_x3d_bottleneck_block, bottleneck_factor: float=2.25, se_ratio: float=0.0625, inner_act: Callable=Swish, head: Callable=create_x3d_head, head_dim_out: int=2048, head_pool_act: Callable=ReLU, head_bn_lin5_on: bool=False, head_activation: Callable=Softmax, head_output_with_global_average: bool=True) -> Module: 'X3D model builder. It builds a X3D network backbone, which is a ResNet.\n\n Christoph Feichtenhofer.\n "X3D: Expanding Architectures for Efficient Video Recognition."\n https://arxiv.org/abs/2004.04730\n\n ::\n\n Input\n ↓\n Stem\n ↓\n Stage 1\n ↓\n .\n .\n .\n ↓\n Stage N\n ↓\n Head\n\n Args:\n input_channel: Number of channels for the input video clip.\n input_clip_length: Length of the input video clip. Value for\n different models: X3D-XS: 4; X3D-S: 13; X3D-M: 16; X3D-L: 16.\n input_crop_size: Spatial resolution of the input video clip.\n Value for different models: X3D-XS: 160; X3D-S: 160; X3D-M: 224;\n X3D-L: 312.\n\n model_num_class: The number of classes for the video dataset.\n dropout_rate: Dropout rate.\n width_factor: Width expansion factor.\n depth_factor: Depth expansion factor. Value for different\n models: X3D-XS: 2.2; X3D-S: 2.2; X3D-M: 2.2; X3D-L: 5.0.\n\n norm: A callable that constructs normalization layer.\n norm_eps: Normalization epsilon.\n norm_momentum: Normalization momentum.\n\n activation: A callable that constructs activation layer.\n\n stem_dim_in: Input channel size for stem before expansion.\n stem_conv_kernel_size: Convolutional kernel size(s) of stem.\n stem_conv_stride: Convolutional stride size(s) of stem.\n\n stage_conv_kernel_size: Convolutional kernel size(s) for ``conv_b``.\n stage_spatial_stride: The spatial stride for each stage.\n stage_temporal_stride: The temporal stride for each stage.\n bottleneck_factor: Bottleneck expansion factor for the 3x3x3 conv.\n se_ratio: if > 0, apply SE to the 3x3x3 conv, with the SE\n channel dimensionality being se_ratio times the 3x3x3 conv dim.\n inner_act: Whether use Swish activation for ``act_b`` or not.\n\n head_dim_out: Output channel size of the X3D head.\n head_pool_act: A callable that constructs resnet pool activation\n layer such as ``ReLU``.\n head_bn_lin5_on: If ``True``, perform normalization on the features\n before the classifier.\n head_activation: A callable that constructs activation layer.\n head_output_with_global_average: If ``True``, perform global averaging on\n the head output.\n\n Returns:\n The X3D network.\n ' torch._C._log_api_usage_once('PYTORCHVIDEO.model.create_x3d') blocks = [] stem_dim_out = round_width(stem_dim_in, width_factor) stem = create_x3d_stem(in_channels=input_channel, out_channels=stem_dim_out, conv_kernel_size=stem_conv_kernel_size, conv_stride=stem_conv_stride, conv_padding=[(size // 2) for size in stem_conv_kernel_size], norm=norm, norm_eps=norm_eps, norm_momentum=norm_momentum, activation=activation) blocks.append(stem) stage_depths = [1, 2, 5, 3] exp_stage = 2.0 stage_dim1 = stem_dim_in stage_dim2 = round_width(stage_dim1, exp_stage, divisor=8) stage_dim3 = round_width(stage_dim2, exp_stage, divisor=8) stage_dim4 = round_width(stage_dim3, exp_stage, divisor=8) stage_dims = [stage_dim1, stage_dim2, stage_dim3, stage_dim4] dim_in = stem_dim_out for idx in range(len(stage_depths)): dim_out = round_width(stage_dims[idx], width_factor) dim_inner = int((bottleneck_factor * dim_out)) depth = round_repeats(stage_depths[idx], depth_factor) stage_conv_stride = (stage_temporal_stride[idx], stage_spatial_stride[idx], stage_spatial_stride[idx]) stage = create_x3d_res_stage(depth=depth, dim_in=dim_in, dim_inner=dim_inner, dim_out=dim_out, bottleneck=bottleneck, conv_kernel_size=stage_conv_kernel_size[idx], conv_stride=stage_conv_stride, norm=norm, norm_eps=norm_eps, norm_momentum=norm_momentum, se_ratio=se_ratio, activation=activation, inner_act=inner_act) blocks.append(stage) dim_in = dim_out total_spatial_stride = (stem_conv_stride[1] * np.prod(stage_spatial_stride)) total_temporal_stride = (stem_conv_stride[0] * np.prod(stage_temporal_stride)) assert (input_clip_length >= total_temporal_stride), "Clip length doesn't match temporal stride!" assert (input_crop_size >= total_spatial_stride), "Crop size doesn't match spatial stride!" head_pool_kernel_size = ((input_clip_length // total_temporal_stride), int(math.ceil((input_crop_size / total_spatial_stride))), int(math.ceil((input_crop_size / total_spatial_stride)))) if (head is not None): head = head(dim_in=dim_out, dim_inner=dim_inner, dim_out=head_dim_out, num_classes=model_num_class, pool_act=head_pool_act, pool_kernel_size=head_pool_kernel_size, norm=norm, norm_eps=norm_eps, norm_momentum=norm_momentum, bn_lin5_on=head_bn_lin5_on, dropout_rate=dropout_rate, activation=head_activation, output_with_global_average=head_output_with_global_average) blocks.append(head) return Net(blocks=ModuleList(blocks))
def extract_features(model: Module, loader: DataLoader) -> Tuple[(Tensor, Tensor)]: 'Extract features from a model.\n\n Args:\n model: The model to extract features from.\n loader: The dataloader to retrieve features from.\n\n Returns:\n The features and its associated labels.\n ' (x, y) = ([], []) for (x_i, y_i) in iter(loader): x.append(model(x_i)) y.append(y_i) x = torch.cat(x) y = torch.cat(y) return (x, y)
def group_params_layer_id(model: Module) -> List[Tuple[(int, Tuple[(str, Parameter)])]]: 'Retrive from model the groups of parameters in the different layers.\n\n Args:\n model: The model to retrieve the parameters from.\n\n Returns:\n The list of groups of parameters in the format:\n [\n (id_layer, (name_param, param)),\n ...\n ]\n ' group_parameters = {} for (name, param) in model.named_parameters(): layer_id = model.get_param_layer_id(name) if (layer_id in group_parameters): group_parameters[layer_id].append((name, param)) else: group_parameters[layer_id] = [(name, param)] return list(group_parameters.items())
class GatherLayer(torch.autograd.Function): 'Gather tensor across devices with grad.' @staticmethod def forward(ctx, inp): ctx.save_for_backward(inp) if (dist.is_available() and dist.is_initialized()): output = [torch.zeros_like(inp) for _ in range(dist.get_world_size())] dist.all_gather(output, inp) else: output = [inp] return tuple(output) @staticmethod def backward(ctx, *grads): (inp,) = ctx.saved_tensors if (dist.is_available() and dist.is_initialized()): grad_out = torch.zeros_like(inp) grad_out[:] = grads[dist.get_rank()] else: grad_out = grads[0] return grad_out
def concat_all_gather_with_backprop(x: Tensor, dim: int=0) -> Tensor: 'Gather tensor across devices with grad.\n\n Args:\n x: Tensor to gather.\n dim: Dimension to concat.\n\n Returns:\n Gathered tensor.\n ' return torch.cat(GatherLayer.apply(x), dim=dim)
@torch.no_grad() def concat_all_gather_without_backprop(x: Tensor, dim: int=0) -> Tensor: 'Gather tensor across devices without grad.\n\n Args:\n x: Tensor to gather.\n dim: Dimension to concat.\n\n Returns:\n Gathered tensor.\n ' if (dist.is_available() and dist.is_initialized()): tensors_gather = [torch.ones_like(x) for _ in range(torch.distributed.get_world_size())] torch.distributed.all_gather(tensors_gather, x, async_op=False) output = torch.cat(tensors_gather, dim=dim) else: output = x return output
@torch.no_grad() def get_world_size() -> int: 'Returns the world size.\n\n Returns:\n The world size.\n ' if (dist.is_available() and dist.is_initialized()): return torch.distributed.get_world_size() return 1
class SplitBatchNorm2D(BatchNorm2d): 'Split batch normalization in several pieces to simulate several devices.\n\n Args:\n num_features: :math:`C` from an expected input of size :math:`(N, C, H, W)`.\n num_splits: Number of devices to simulate.\n ' def __init__(self, num_features: int, num_splits: int, **kw): super().__init__(num_features, **kw) self.num_splits = num_splits def forward(self, input: Tensor) -> Tensor: self._check_input_dim(input) (N, C, H, W) = input.shape if (self.training or (not self.track_running_stats)): running_mean_split = self.running_mean.repeat(self.num_splits) running_var_split = self.running_var.repeat(self.num_splits) outcome = nn.functional.batch_norm(input.view((- 1), (C * self.num_splits), H, W), running_mean_split, running_var_split, self.weight.repeat(self.num_splits), self.bias.repeat(self.num_splits), True, self.momentum, self.eps).view(N, C, H, W) self.running_mean.data.copy_(running_mean_split.view(self.num_splits, C).mean(dim=0)) self.running_var.data.copy_(running_var_split.view(self.num_splits, C).mean(dim=0)) return outcome else: return nn.functional.batch_norm(input, self.running_mean, self.running_var, self.weight, self.bias, False, self.momentum, self.eps) def __repr__(self): return '{}({num_features}, num_splits={num_splits}, eps={eps}, momentum={momentum}, affine={affine}, track_running_stats={track_running_stats})'.format(__class__.__name__, **self.__dict__)
def convert_to_split_batchnorm(module: Module, num_splits: int) -> Module: 'Convert BatchNorm layers to SplitBatchNorm layers in module.\n\n Args:\n module: Module to convert.\n num_splits: Number of splits for the :class:`SplitBatchNorm2D` layers.\n\n Returns:\n The converted module.\n ' module_output = module if isinstance(module, BatchNorm2d): module_output = SplitBatchNorm2D(num_features=module.num_features, num_splits=num_splits, eps=module.eps, momentum=module.momentum, affine=module.affine, track_running_stats=module.track_running_stats) if module.affine: with torch.no_grad(): module_output.weight = module.weight module_output.bias = module.bias module_output.running_mean = module.running_mean module_output.running_var = module.running_var module_output.num_batches_tracked = module.num_batches_tracked if hasattr(module, 'qconfig'): module_output.qconfig = module.qconfig for (name, child) in module.named_children(): module_output.add_module(name, convert_to_split_batchnorm(child, num_splits)) del module return module_output
class LARS(torch.optim.Optimizer): 'LARS optimizer, no rate scaling or weight decay for parameters <= 1D.\n\n References LARS:\n - https://arxiv.org/pdf/1708.03888.pdf\n\n Args:\n params: Parameters to optimize.\n lr: Learning rate of the optimizer.\n weight_decay: Weight decay to apply.\n momentum: Momentum for optimization.\n trust_coefficient: LARS trust coefficient.\n ' def __init__(self, params: Iterable[Parameter], lr: float=0, weight_decay: float=0, momentum: float=0.9, trust_coefficient: float=0.001) -> None: defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum, trust_coefficient=trust_coefficient) super().__init__(params, defaults) @torch.no_grad() def step(self, closure: Optional[Callable]=None): loss = None if (closure is not None): with torch.enable_grad(): loss = closure() for g in self.param_groups: for p in g['params']: dp = p.grad if (dp is None): continue if (p.ndim > 1): dp = dp.add(p, alpha=g['weight_decay']) param_norm = torch.norm(p) update_norm = torch.norm(dp) one = torch.ones_like(param_norm) q = torch.where((param_norm > 0.0), torch.where((update_norm > 0), ((g['trust_coefficient'] * param_norm) / update_norm), one), one) dp = dp.mul(q) param_state = self.state[p] if ('mu' not in param_state): param_state['mu'] = torch.zeros_like(p) mu = param_state['mu'] mu.mul_(g['momentum']).add_(dp) p.add_(mu, alpha=(- g['lr'])) return loss
def optimizer_factory(name: str, initial_lr: float, model: Module, batch_size: Optional[int]=None, num_steps_per_epoch: Optional[int]=None, layer_decay_lr: (float | None)=None, keys_without_decay: List[str]=[], exclude_wd_norm: bool=False, exclude_wd_bias: bool=False, scaler: Optional[str]=None, params: DictConfig={}, divide_wd_by_lr: bool=False, scheduler: Optional[DictConfig]=None, multiply_lr: float=1.0, multiply_parameters: List[Parameter]=[]) -> Tuple[(Optimizer, Optional[_LRScheduler])]: 'Optimizer factory to build optimizers and optionally an attached scheduler.\n\n Args:\n name: Name of the scheduler to retrieve the optimizer constructor from ``_OPTIMIZERS`` dict.\n initial_lr: Initial learning rate.\n model: Model to optimize.\n batch_size: Batch size for the input of the model.\n num_steps_per_epoch: Number of steps per epoch. Useful for some schedulers.\n keys_without_decay: Keys to filter parameters for weight decay.\n exclude_wd_norm: If ``True``, exclude normalization layers to be regularized by weight decay.\n exclude_wd_bias: If ``True``, exclude bias layers to be regularized by weight decay.\n scaler: Scaler rule for the initial learning rate.\n params: Parameters for the optimizer constructor.\n divide_wd_by_lr: If ``True``, divide the weight decay by the value of the learning rate.\n scheduler: Scheduler config.\n multiply_lr: Multiply the learning rate by factor. Applied for scheduler aswell.\n\n Returns:\n The optimizer with its optional scheduler.\n ' optimizer_class = _OPTIMIZERS[name] lr = scale_learning_rate(initial_lr, scaler, batch_size, multiply_lr) if (('weight_decay' in params) and divide_wd_by_lr): params['weight_decay'] /= lr rank_zero_info(f"weight_decay has been scaled to {params['weight_decay']}") modules_without_decay = [] if exclude_wd_norm: modules_without_decay.extend(_NORM_LAYERS) if exclude_wd_bias: keys_without_decay.append('bias') (no_wd_parameters, wd_parameters) = retrieve_model_params(model, modules_without_decay, keys_without_decay) wd_parameters = filter_learnable_params(wd_parameters, model) no_wd_parameters = filter_learnable_params(no_wd_parameters, model) named_wd_parameters = [name for (name, param) in model.named_parameters() if any([(param is wd_param) for wd_param in wd_parameters])] named_no_wd_parameters = [name for (name, param) in model.named_parameters() if any([(param is no_wd_param) for no_wd_param in no_wd_parameters])] if (layer_decay_lr is not None): if ((not hasattr(model, 'num_layers')) and (not hasattr(model, 'group_params_layer_id'))): raise NotImplementedError('Model should have `num_layers` and `params_layer_id` defined.') else: num_layers: int = model.num_layers params_layer_id = group_params_layer_id(model) layer_lr_decay_values = list(((layer_decay_lr ** ((num_layers - 1) - i)) for i in range(num_layers))) group_wd_parameters = [(i, layer_lr_decay_values[i], [param for (name, param) in parameters if (name in named_wd_parameters)], [name for (name, param) in parameters if (name in named_wd_parameters)]) for (i, parameters) in params_layer_id] group_no_wd_parameters = [(i, layer_lr_decay_values[i], [param for (name, param) in parameters if (name in named_no_wd_parameters)], [name for (name, param) in parameters if (name in named_no_wd_parameters)]) for (i, parameters) in params_layer_id] rank_zero_info(f'''{model._get_name()} optimizer's: Layers with weight decay:''') for (i, layer_lr_decay_value, parameters, name_parameters) in group_wd_parameters: rank_zero_info(f'Layer {i}: num parameters={len(parameters)}, decay={layer_lr_decay_value}, name parameters={name_parameters}') rank_zero_info('Layers without weight decay:') for (i, layer_lr_decay_value, parameters, name_parameters) in group_no_wd_parameters: rank_zero_info(f'Layer {i}: num parameters={len(parameters)}, decay={layer_lr_decay_value}, name parameters={name_parameters}') group_parameters = [{'params': parameters, 'layer_lr_decay': layer_lr_decay, 'layer_id': layer_id} for (layer_id, layer_lr_decay, parameters, _) in group_wd_parameters] group_parameters += [{'params': parameters, 'weight_decay': 0.0, 'layer_lr_decay': layer_lr_decay, 'layer_id': layer_id} for (layer_id, layer_lr_decay, parameters, _) in group_no_wd_parameters if (parameters != [])] else: group_parameters = [{'params': [param for param in wd_parameters if (param not in multiply_parameters)]}] if (len(multiply_parameters) > 0): wd_multiply_parameters = [param for param in multiply_parameters if (param in wd_parameters)] if (len(wd_multiply_parameters) > 0): group_parameters.append({'params': wd_parameters, 'lr': (lr * multiply_lr)}) if (no_wd_parameters != []): group_parameters.append({'params': [param for param in no_wd_parameters if (param not in multiply_parameters)], 'weight_decay': 0.0}) if (len(multiply_parameters) > 0): no_wd_multiply_parameters = [param for param in multiply_parameters if (param in no_wd_parameters)] if (len(no_wd_multiply_parameters) > 0): group_parameters.append({'params': wd_parameters, 'lr': (lr * multiply_lr)}) group_parameters.append({'params': [param for param in multiply_parameters if (param in no_wd_parameters)], 'lr': (lr * multiply_lr), 'weight_decay': 0.0}) rank_zero_info(f'''{model._get_name()} optimizer's: With weight decay: num parameters={len(wd_parameters)}, name parameters: {named_wd_parameters} Without weight decay: num parameters={len(no_wd_parameters)}, name parameters:{named_no_wd_parameters}''') optimizer = optimizer_class(group_parameters, lr=lr, **params) if (scheduler is not None): scheduler = hydra.utils.instantiate(scheduler, num_steps_per_epoch=num_steps_per_epoch, optimizer=optimizer, scaler=scaler, batch_size=batch_size, multiply_lr=multiply_lr) return (optimizer, scheduler)
def optimizer_factory_two_groups(name: str, initial_lr1: float, initial_lr2: float, model1: Module, model2: Module, batch_size: Optional[int]=None, num_steps_per_epoch: Optional[int]=None, exclude_wd_norm: bool=False, exclude_wd_bias: bool=False, scaler: Optional[str]=None, params: DictConfig={}, scheduler: Optional[DictConfig]=None) -> Tuple[(Optimizer, Optional[_LRScheduler])]: 'Optimizer factory to build an optimizer for two groups of parameters and optionally an attached scheduler.\n\n Args:\n name: Name of the scheduler to retrieve the optimizer constructor from ``_OPTIMIZERS`` dict.\n initial_lr1: Initial learning rate for model 1.\n initial_lr2: Initial learning rate for model 2.\n model1: Model 1 to optimize.\n model2: Model 2 to optimize.\n batch_size: Batch size for the input of the model.\n num_steps_per_epoch: Number of steps per epoch. Useful for some schedulers.\n exclude_wd_norm: If ``True``, exclude normalization layers to be regularized by weight decay.\n exclude_wd_bias: If ``True``, exclude bias layers to be regularized by weight decay.\n scaler: Scaler rule for the initial learning rate.\n params: Parameters for the optimizer constructor.\n scheduler: Scheduler config for model.\n\n Returns:\n The optimizer with its optional scheduler.\n ' optimizer_class = _OPTIMIZERS[name] lr1 = scale_learning_rate(initial_lr1, scaler, batch_size) lr2 = scale_learning_rate(initial_lr2, scaler, batch_size) modules_without_decay = [] keys_without_decay = [] if exclude_wd_norm: modules_without_decay.extend(_NORM_LAYERS) if exclude_wd_bias: keys_without_decay.append('bias') (no_wd_parameters1, wd_parameters1) = retrieve_model_params(model1, modules_without_decay, keys_without_decay) (no_wd_parameters2, wd_parameters2) = retrieve_model_params(model2, modules_without_decay, keys_without_decay) wd_parameters1 = filter_learnable_params(wd_parameters1, model1) no_wd_parameters1 = filter_learnable_params(no_wd_parameters1, model1) wd_parameters2 = filter_learnable_params(wd_parameters2, model2) no_wd_parameters2 = filter_learnable_params(no_wd_parameters2, model2) named_wd_parameters1 = [name for (name, param) in model1.named_parameters() if any([(param is wd_param) for wd_param in wd_parameters1])] named_no_wd_parameters1 = [name for (name, param) in model1.named_parameters() if any([(param is no_wd_param) for no_wd_param in no_wd_parameters1])] named_wd_parameters2 = [name for (name, param) in model2.named_parameters() if any([(param is wd_param) for wd_param in wd_parameters2])] named_no_wd_parameters2 = [name for (name, param) in model2.named_parameters() if any([(param is no_wd_param) for no_wd_param in no_wd_parameters2])] list_optim = [{'params': wd_parameters1, 'lr': lr1}, {'params': wd_parameters2, 'lr': lr2}] if (no_wd_parameters1 != []): list_optim.append({'params': no_wd_parameters1, 'weight_decay': 0.0}) if (no_wd_parameters2 != []): list_optim.append({'params': no_wd_parameters2, 'weight_decay': 0.0}) optimizer = optimizer_class(list_optim, **params) rank_zero_info(f'''{model1._get_name()} optimizer's: With weight decay: num parameters={len(wd_parameters1)}, name parameters: {named_wd_parameters1} Without weight decay: num parameters={len(no_wd_parameters1)}, name parameters:{named_no_wd_parameters1}''') rank_zero_info(f'''{model2._get_name()} optimizer's: With weight decay: num parameters={len(wd_parameters2)}, name parameters: {named_wd_parameters2} Without weight decay: num parameters={len(no_wd_parameters2)}, name parameters:{named_no_wd_parameters2}''') if (scheduler is not None): scheduler = hydra.utils.instantiate(scheduler, num_steps_per_epoch=num_steps_per_epoch, optimizer=optimizer, scaler=scaler, batch_size=batch_size) return (optimizer, scheduler)
def retrieve_model_params(model: Module, modules_to_filter: Iterable[Module]=[], keys_to_filter: Iterable[str]=[]) -> Tuple[(List[Parameter], List[Parameter])]: 'Retrieve sets of filtered and not filtered parameters from a model.\n\n Args:\n model: Model to retrieve the params from.\n modules_to_filter: Module to filter.\n keys_to_filter: keys to filter.\n\n Returns:\n Filtered parameters, other parameters.\n ' other_parameters = [] filtered_parameters = [] for module in model.modules(): if (type(module) in modules_to_filter): for (param_name, param) in module.named_parameters(recurse=False): filtered_parameters.append(param) else: for (param_name, param) in module.named_parameters(recurse=False): no_key = all([(param_name != key) for key in keys_to_filter]) if no_key: other_parameters.append(param) else: filtered_parameters.append(param) return (filtered_parameters, other_parameters)
def filter_learnable_params(parameters: Iterable[Parameter], model: Module) -> List[Parameter]: 'Filter passed parameters to be in learnable parameters list from model. If model do not have\n ``learnable_params`` property defined, return all passed parameters.\n\n Args:\n parameters: Parameters to filter.\n model: Model to retrieve learnable parameters from.\n\n Returns:\n Learnable parameters.\n ' if hasattr(model, 'learnable_params'): return [param for param in parameters if (param.requires_grad and any([(param is learnable_param) for learnable_param in model.learnable_params]))] else: rank_zero_info(f'Model of type {type(model)} has no learnable parameters defined, all passed parameters returned.') return list([param for param in parameters if param.requires_grad])
def scale_learning_rate(initial_lr: int, scaler: Optional[str]=None, batch_size: Optional[int]=None, multiply_lr: float=1.0) -> int: 'Scale the initial learning rate.\n\n Args:\n initial_lr: Initial learning rate.\n scaler: Scaler rule.\n batch_size: Batch size to scale the learning rate.\n multiply_lr: Multiply the learning rate by factor.\n\n Returns:\n Scaled initial learning rate.\n ' if ((scaler is None) or (scaler == 'none')): return (initial_lr * multiply_lr) elif (scaler == 'linear'): lr = ((initial_lr * batch_size) / 256) elif (scaler == 'sqrt'): lr = (initial_lr * np.sqrt(batch_size)) lr *= multiply_lr return lr
class LinearWarmupCosineAnnealingLR(_LRScheduler): 'Sets the learning rate of each parameter group to follow a linear warmup schedule between warmup_start_lr\n and base_lr followed by a cosine annealing schedule between base_lr and eta_min.\n\n Args:\n optimizer: Wrapped optimizer.\n warmup_epochs: Maximum number of iterations for linear warmup.\n max_epochs: Maximum number of iterations.\n warmup_start_lr: Learning rate to start the linear warmup.\n eta_min: Minimum learning rate.\n last_epoch: The index of last epoch.\n\n .. warning::\n It is recommended to call :func:`.step()` for :class:`LinearWarmupCosineAnnealingLR`\n after each iteration as calling it after each epoch will keep the starting lr at\n ``warmup_start_lr`` for the first epoch which is 0 in most cases.\n\n .. warning::\n passing epoch to :func:`.step()` is being deprecated and comes with an EPOCH_DEPRECATION_WARNING.\n It calls the :func:`_get_closed_form_lr()` method for this scheduler instead of\n :func:`get_lr()`. Though this does not change the behavior of the scheduler, when passing\n epoch param to :func:`.step()`, the user should call the :func:`.step()` function before calling\n train and validation methods.\n\n Example::\n >>> layer = nn.Linear(10, 1)\n >>> optimizer = Adam(layer.parameters(), lr=0.02)\n >>> scheduler = LinearWarmupCosineAnnealingLR(optimizer, warmup_epochs=10, max_epochs=40)\n >>> #\n >>> # the default case\n >>> for epoch in range(40):\n ... # train(...)\n ... # validate(...)\n ... scheduler.step()\n >>> #\n >>> # passing epoch param case\n >>> for epoch in range(40):\n ... scheduler.step(epoch)\n ... # train(...)\n ... # validate(...)\n ' def __init__(self, optimizer: Optimizer, warmup_epochs: int, max_epochs: int, warmup_start_lr: float=0.0, eta_min: float=0.0, last_epoch: int=(- 1)) -> None: self.warmup_epochs = warmup_epochs self.max_epochs = max_epochs self.warmup_start_lr = warmup_start_lr self.eta_min = eta_min has_layer_lr_decay = (optimizer.param_groups[0].get('layer_lr_decay', None) is not None) if has_layer_lr_decay: self.layer_lr_decay_values = [group['layer_lr_decay'] for group in optimizer.param_groups] else: self.layer_lr_decay_values = [1.0 for _ in optimizer.param_groups] super().__init__(optimizer, last_epoch) def get_lr(self) -> List[float]: 'Compute learning rate using chainable form of the scheduler.' if (not self._get_lr_called_within_step): warnings.warn('To get the last learning rate computed by the scheduler, please use `get_last_lr()`.', UserWarning) if ((self.last_epoch == 0) and (self.warmup_epochs != 0)): return [(self.warmup_start_lr * decay_value) for decay_value in self.layer_lr_decay_values] elif (self.last_epoch == 0): return [(base_lr * layer_lr_decay) for (base_lr, layer_lr_decay) in zip(self.base_lrs, self.layer_lr_decay_values)] elif (self.last_epoch < self.warmup_epochs): return [(group['lr'] + (((base_lr - self.warmup_start_lr) / (self.warmup_epochs - 1)) * layer_lr_decay)) for (base_lr, group, layer_lr_decay) in zip(self.base_lrs, self.optimizer.param_groups, self.layer_lr_decay_values, strict=True)] elif (self.last_epoch == self.warmup_epochs): return [(base_lr * layer_lr_decay) for (base_lr, layer_lr_decay) in zip(self.base_lrs, self.layer_lr_decay_values, strict=True)] elif ((((self.last_epoch - 1) - self.max_epochs) % (2 * (self.max_epochs - self.warmup_epochs))) == 0): return [(group['lr'] + ((((base_lr - self.eta_min) * (1 - math.cos((math.pi / (self.max_epochs - self.warmup_epochs))))) / 2) * layer_lr_decay)) for (base_lr, group, layer_lr_decay) in zip(self.base_lrs, self.optimizer.param_groups, self.layer_lr_decay_values, strict=True)] return [((((1 + math.cos(((math.pi * (self.last_epoch - self.warmup_epochs)) / (self.max_epochs - self.warmup_epochs)))) / (1 + math.cos(((math.pi * ((self.last_epoch - self.warmup_epochs) - 1)) / (self.max_epochs - self.warmup_epochs))))) * (group['lr'] - (self.eta_min * layer_lr_decay))) + (self.eta_min * layer_lr_decay)) for (group, layer_lr_decay) in zip(self.optimizer.param_groups, self.layer_lr_decay_values, strict=True)] def _get_closed_form_lr(self) -> List[float]: 'Called when epoch is passed as a param to the `step` function of the scheduler.' if (self.last_epoch < self.warmup_epochs): return [(layer_lr_decay * (self.warmup_start_lr + ((self.last_epoch * (base_lr - self.warmup_start_lr)) / (self.warmup_epochs - 1)))) for (base_lr, layer_lr_decay) in zip(self.base_lrs, self.layer_lr_decay_values, strict=True)] return [(layer_lr_decay * (self.eta_min + ((0.5 * (base_lr - self.eta_min)) * (1 + math.cos(((math.pi * (self.last_epoch - self.warmup_epochs)) / (self.max_epochs - self.warmup_epochs))))))) for (base_lr, layer_lr_decay) in zip(self.base_lrs, self.layer_lr_decay_values, strict=True)]
def scheduler_factory(optimizer: Optimizer, name: str, params: DictConfig={}, interval: str='epoch', num_steps_per_epoch: Optional[int]=None, scaler: Optional[str]=None, batch_size: Optional[int]=None, multiply_lr: float=1.0) -> Dict[(str, Any)]: "Scheduler factory.\n\n Args:\n optimizer: Optimizer to wrap around.\n name: Name of the scheduler to retrieve the scheduler constructor from the ``_SCHEDULERS`` dict.\n params: Scheduler parameters for the scheduler constructor.\n interval: Interval to call step, if ``'epoch'`` call` :func:`.step()` at each epoch.\n num_steps_per_epoch: Number of steps per epoch. Useful for some schedulers.\n scaler: Scaler rule for the initial learning rate.\n batch_size: Batch size for the input of the model.\n multiply_lr: Multiply the learning rate by factor. Applied for warmup and minimum learning rate aswell.\n\n Returns:\n Scheduler configuration for pytorch lightning.\n " if (interval == 'step'): if (name == 'linear_warmup_cosine_annealing_lr'): params.max_epochs = (num_steps_per_epoch * params.max_epochs) params.warmup_epochs = (num_steps_per_epoch * params.warmup_epochs) if params.get('eta_min'): params.eta_min = scale_learning_rate(params.eta_min, scaler, batch_size, multiply_lr) if params.get('warmup_start_lr'): params.warmup_start_lr = scale_learning_rate(params.warmup_start_lr, scaler, batch_size, multiply_lr) scheduler = _SCHEDULERS[name](optimizer=optimizer, **params) return {'scheduler': scheduler, 'interval': interval}
class ApplyTransformToKey(Module): 'Applies transform to key of dictionary input.\n\n Args:\n key: The dictionary key the transform is applied to.\n transform: The transform that is applied.\n ' def __init__(self, key: str, transform: Module): super().__init__() self._key = key self._transform = transform def forward(self, x: Dict[(str, torch.Tensor)]) -> Dict[(str, torch.Tensor)]: x[self._key] = self._transform(x[self._key]) return x
class ApplyTransformToKeyOnList(Module): "\n Applies transform to key of dictionary input where input is a list\n Args:\n key: the dictionary key the transform is applied to.\n transform: the transform that is applied.\n\n Example::\n >>> transforms.ApplyTransformToKeyOnList(\n >>> key='input',\n >>> transform=UniformTemporalSubsample(num_video_samples),\n >>> )\n " def __init__(self, key: str, transform: Module) -> None: super().__init__() self._key = key self._transform = transform def forward(self, x: Dict[(str, List[Tensor])]) -> Dict[(str, List[Tensor])]: x[self._key] = [self._transform(a) for a in x[self._key]] return x def __repr__(self): return f'{self.__class__.__name__}(key={self._key}, transform={self._transform})'
class ApplySameTransformToKeyOnList(Module): 'Applies the same transform to key of dictionary input where input is a list.\n\n Args:\n key: the dictionary key the transform is applied to.\n transform: the transform that is applied.\n dim: The dimension to retrieve the various elements of the list.\n ' def __init__(self, key: str, transform: Module, dim: int=1) -> None: super().__init__() self._key = key self._transform = transform self._dim = dim def forward(self, x: Dict[(str, List[Tensor])]) -> Dict[(str, List[Tensor])]: data = x[self._key] len_data = len(data) data = torch.cat(data, dim=self._dim) data = self._transform(data) data = list(data.split((data.shape[self._dim] // len_data), dim=self._dim)) x[self._key] = data return x def __repr__(self): return f'{self.__class__.__name__}(key={self._key}, transform={self._transform}, dim={self._dim})'
class ApplyTransformInputKeyOnList(ApplyTransformToKeyOnList): 'Apply Transform to the input key.\n\n Args:\n transform: The transform to apply.\n ' def __init__(self, transform: Module): super().__init__('input', transform=transform) def __repr__(self): return f'{self.__class__.__name__}(transform={self._transform})'
class ApplySameTransformInputKeyOnList(ApplySameTransformToKeyOnList): 'Apply same transform to the input list key.\n\n Args:\n transform: The transform to apply.\n dim: The dimension to retrieve the various elements of the list.\n ' def __init__(self, transform: Module, dim: int=1): super().__init__('input', transform=transform, dim=dim) def __repr__(self): return f'{self.__class__.__name__}(transform={self._transform}, dim={self._dim})'
class ApplyTransformAudioKeyOnList(ApplyTransformToKeyOnList): 'Apply Transform to the audio key.\n\n Args:\n transform: The transform to apply.\n ' def __init__(self, transform: Module): super().__init__('audio', transform=transform) def __repr__(self): return f'{self.__class__.__name__}(transform={self._transform})'
class ApplyTransformInputKey(ApplyTransformToKey): 'Apply Transform to the input key.\n\n Args:\n transform: The transform to apply.\n ' def __init__(self, transform: Module): super().__init__('input', transform=transform) def __repr__(self): return f'{self.__class__.__name__}(transform={self._transform})'
class ApplyTransformAudioKey(ApplyTransformToKey): 'Apply Transform to the audio key.\n\n Args:\n transform: The transform to apply.\n ' def __init__(self, transform: Module): super().__init__('audio', transform=transform)
class ApplyTransformOnDict(Module): 'Apply Transform to the audio key.\n\n Args:\n transform: The transform to apply.\n ' def __init__(self, transform: Module): super().__init__() self._transform = transform def forward(self, x: Dict[(str, torch.Tensor)]) -> Dict[(str, torch.Tensor)]: x = self._transform(x) return x def __repr__(self): return f'{self.__class__.__name__}(transform={self._transform})'
class ApplyTransformOnList(Module): 'Apply transform to a list of input.\n\n Args:\n transform: A transform for the list of input.\n list_len: len of the input.\n ' def __init__(self, transform: Module, list_len: int=2) -> None: super().__init__() self.list_len = list_len self.transform = transform def forward(self, X: Iterable[Tensor]) -> List[Tensor]: assert (self.list_len == len(X)) X = [self.transform(X[i]) for i in range(self.list_len)] return X def __repr__(self) -> str: format_string = (self.__class__.__name__ + f'(transform={self.transform})') return format_string
class ApplyTransformsOnList(Module): 'Apply transform to a list of input.\n\n Args:\n transform: A transform for the list of input.\n list_len: len of the input.\n ' def __init__(self, transforms: List[Module]) -> None: super().__init__() self.list_len = len(transforms) self.transforms = ModuleList(transforms) def forward(self, X: Iterable[Tensor]) -> List[Tensor]: assert (self.list_len == len(X)) X = [self.transforms[i](X[i]) for i in range(self.list_len)] return X def __repr__(self) -> str: format_string = (self.__class__.__name__ + f'(transforms={self.transforms})') return format_string
class ApplySameTransformOnList(Module): 'Apply same transform to a list of input by concatenating the inputs and splitting them after.\n\n Args:\n transform: A transform for the list of input.\n list_len: len of the input.\n dim: The dimension to retrieve the various elements of the list.\n ' def __init__(self, transform: Any, list_len: int=2, dim: int=1) -> None: super().__init__() self.list_len = list_len self.transform = transform self.dim = dim def forward(self, X: Iterable[Tensor]) -> List[Tensor]: assert (self.list_len == len(X)) X = torch.cat(X, dim=self.dim) X = self.transform(X) X = list(X.split((X.shape[self.dim] // self.list_len), dim=self.dim)) return X def __repr__(self) -> str: format_string = (self.__class__.__name__ + f'(transform={self.transform})') return format_string
class DictKeepKeys(): 'Keep specified keys in dict.\n\n References:\n - https://github.com/kalyanvasudev/pytorchInput-1/blob/export-D33431232/pytorchInput_trainer/pytorchInput_trainer/datamodule/transforms.py\n\n Args:\n keys: The list of keys to keep.\n ' def __init__(self, keys: List[str]): self.keys = keys def __call__(self, x: Dict[(str, Tensor)]) -> List[Tensor]: x = {key: value for (key, value) in x.items() if (key in self.keys)} return x def __repr__(self): return f'{self.__class__.__name__}(keys={self.keys})'
class DictKeepInputLabel(DictKeepKeys): "Transform dict to list containing values of only ``'input'`` and ``'label'``." def __init__(self): super().__init__(['input', 'label'])
class DictKeepInputLabelIdx(DictKeepKeys): "Transform dict to list containing values of only ``'input'``, ``'label'``, ``'index'`` and\n ``'aug_index'``." def __init__(self): super().__init__(['input', 'label', 'idx', 'aug_index'])
class DictToListFromKeys(): 'Transform dict to list containing values of only specified keys.\n\n References:\n - https://github.com/kalyanvasudev/pytorchInput-1/blob/export-D33431232/pytorchInput_trainer/pytorchInput_trainer/datamodule/transforms.py\n\n Args:\n keys: The list of keys to keep.\n ' def __init__(self, keys: List[str]): self.keys = keys def __call__(self, x: Dict[(str, Tensor)]) -> List[Tensor]: x = [tensor for (key, tensor) in x.items() if (key in self.keys)] return x def __repr__(self): return f'{self.__class__.__name__}(keys={self.keys})'
class DictToListInputLabel(DictToListFromKeys): "Transform dict to list containing values of only ``'input'`` and ``'label'``.\n\n Args:\n transform: The transform to apply.\n " def __init__(self): super().__init__(['input', 'label']) def __call__(self, x: Dict[(str, Tensor)]) -> List[Tensor]: x = [x[key] for key in self.keys] return x
def div_255(input: Tensor, inplace: bool=True, dtype: dtype=torch.get_default_dtype()) -> Tensor: 'Divide the given tensor x by 255.\n\n Args:\n input: The input tensor.\n inplace: Whether to perform the operation inplace. Performed after dtype conversion.\n dtype: dtype to convert the tensor before applying division.\n\n Returns:\n Scaled tensor by dividing 255.\n ' input = input.to(dtype=dtype) if inplace: input /= 255.0 else: input = (input / 255.0) return input
class Div255Input(Module): 'Perform Division by 255 on a tensor or list of tensor.' def __init__(self, inplace: bool=True, dtype: dtype=torch.get_default_dtype()) -> None: super().__init__() self.inplace = inplace self.dtype = dtype def forward(self, x: (Tensor | List[Tensor])) -> Tensor: if (type(x) is Tensor): return div_255(x, inplace=self.inplace, dtype=self.dtype) return [div_255(el, inplace=self.inplace, dtype=self.dtype) for el in x] def __repr__(self): return f'{__class__.__name__}(inplace={self.inplace}, dtype={self.dtype})'