code stringlengths 17 6.64M |
|---|
class MultiCropTransform():
"Define multi crop transform that apply several sets of transform to the inputs.\n\n Args:\n set_transforms: List of Dictionary of sets of transforms specifying transforms and number of views per set.\n\n Example::\n\n set_transforms = [\n {'transform': [...], 'num_views': ...},\n {'transform': [...], 'num_views': ...},\n ...\n ]\n\n transform = MultiCropTransform(\n set_transforms\n )\n "
def __init__(self, set_transforms: List[Any]) -> None:
super().__init__()
self.set_transforms = set_transforms
transforms = []
for set_transform in self.set_transforms:
if ('num_views' not in set_transform):
set_transform['num_views'] = 1
transforms.extend(([set_transform['transform']] * set_transform['num_views']))
self.transforms = transforms
def __call__(self, img: ((Image | Tensor) | Iterable[(Image | Tensor)])) -> Tensor:
if (type(img) not in [Image, Tensor]):
transformed_images = [transform(image) for (transform, image) in zip(self.transforms, img, strict=True)]
else:
transformed_images = [transform(img) for transform in self.transforms]
return transformed_images
def __repr__(self) -> str:
format_string = self.__class__.__name__
for set_transform in self.set_transforms:
format_string += '(\n'
format_string += ' num views={}\n'.format(set_transform['num_views'])
format_string += ' transforms={}'.format(set_transform['transform'])
format_string += '\n)'
return format_string
|
class OnlyInputListTransform(Compose):
"Apply Transform to only the key ``'input'`` in a list of sample dictionary.\n\n Args:\n transform: The transform to apply.\n "
def __init__(self, transform: Callable) -> None:
transforms = [ApplyTransformInputKeyOnList(transform), DictKeepInputLabelIdx()]
super().__init__(transforms=transforms)
|
class OnlyInputTransform(Compose):
"Apply Transform to only the key ``'input'`` in a sample dictionary.\n\n Args:\n transform: The transform to apply.\n "
def __init__(self, transform: Callable) -> None:
transforms = [ApplyTransformInputKey(transform), DictKeepInputLabelIdx()]
super().__init__(transforms=transforms)
|
class OnlyInputListSameTransform(Compose):
"Apply the same transform to only the key ``'input'`` in a list of sample dictionary.\n\n Args:\n transform: The transform to apply.\n "
def __init__(self, transform: Callable) -> None:
transforms = [ApplySameTransformInputKeyOnList(transform), DictKeepInputLabelIdx()]
super().__init__(transforms=transforms)
|
class OnlyInputTransformWithDictTransform(Compose):
"Apply Transform to only the key ``'input'`` in a sample dictionary with a transformation on the dictionary\n afterwards.\n\n Args:\n transform: The transform to apply to the input.\n dict_transform: The transform to apply to the dictionary.\n first_dict: If ``True``, first apply the transformation on the dict, else, first apply the transformation on the input.\n "
def __init__(self, transform: Callable, dict_transform: Callable, first_dict: bool=False) -> None:
if first_dict:
transforms = [ApplyTransformInputKey(transform), ApplyTransformOnDict(dict_transform), DictKeepInputLabelIdx()]
else:
transforms = [ApplyTransformInputKey(transform), ApplyTransformOnDict(dict_transform), DictKeepInputLabelIdx()]
super().__init__(transforms=transforms)
|
class OnlyInputListTransformWithDictTransform():
"Apply Transform to only the key ``'input'`` in a list of sample dictionary with a transformation on the\n dictionary afterwards.\n\n Args:\n transform: The transform to apply to the input.\n dict_transform: The transform to apply to the dictionary.\n first_dict: If ``True``, first apply the transformation on the dict, else, first apply the transformation on the input.\n "
def __init__(self, transform: Callable, dict_transform: Callable, first_dict: bool=False) -> None:
self.transform = OnlyInputTransformWithDictTransform(transform, dict_transform, first_dict)
def __call__(self, x: List[Dict[(str, Any)]]) -> List[Dict[(str, Any)]]:
return [self.transform(sample) for sample in x]
def __repr__(self) -> str:
format_string = (self.__class__.__name__ + '(')
format_string += '\n'
format_string += f' {self.transform}'
format_string += '\n)'
return format_string
|
class RandomResizedCrop(transforms.RandomResizedCrop):
def __init__(self, size: Union[(int, Iterable[int])], scale: Iterable[float]=[0.08, 1.0], ratio: Iterable[float]=[(3 / 4), (4 / 3)], interpolation: Union[(str, InterpolationMode)]='bilinear', antialias: bool=True, **kwargs) -> None:
if (type(interpolation) is str):
interpolation = _INTERPOLATION[interpolation]
super().__init__(size, scale=scale, ratio=ratio, interpolation=interpolation, antialias=antialias, **kwargs)
|
class RemoveKey(Module):
"Removes the given key from the input dict. Useful for removing modalities from a video clip that aren't\n needed.\n\n Args:\n key: The dictionary key to remove.\n "
def __init__(self, key: str):
super().__init__()
self._key = key
def __call__(self, x: Dict[(str, Tensor)]) -> Dict[(str, Tensor)]:
if (self._key in x):
del x[self._key]
return x
|
class RemoveInputKey(RemoveKey):
'Remove video key from sample dictionary.'
def __init__(self):
super().__init__('input')
|
class RemoveAudioKey(RemoveKey):
'Remove audio key from sample dictionary.'
def __init__(self):
super().__init__('audio')
|
class RemoveTimeDim(nn.Module):
'Remove time dimension from tensor.\n\n Suppose the tensor shape is [C,T,H,W].\n '
def __init__(self) -> None:
super().__init__()
def forward(self, tensor: Tensor):
(c, t, h, w) = tensor.shape
return tensor.view((c * t), h, w)
def __repr__(self):
return f'{self.__class__.__name__}()'
|
def mix_spotting(x: Tensor, mix_value: Tensor, permutation: Tensor, labels: Tensor, has_label: Tensor, ignore_class: Tensor):
'Make mixup of the batch for action spotting.\n\n Args:\n x: The batch values to mix.\n mix_value: Value coefficients for mixing.\n permutation: Permutation to perform mix.\n labels: Labels of the timestamps in the batch.\n has_label: Whether timestamps have label.\n ignore_class: Whether class in the batch should be ignored.\n\n Returns:\n Tuple containing:\n - The mixed input.\n - The mixed class labels.\n - The `ignore_class` of the mixed elements.\n - The concatenated `mix_value` of the mixed elements.\n '
x_permuted = x[permutation]
labels_permuted = labels[permutation]
has_label_permuted = has_label[permutation]
ignore_class_permuted = ignore_class[permutation]
labels_cat = torch.cat((labels, labels_permuted))
has_label_cat = torch.cat((has_label, has_label_permuted))
ignore_class_cat = torch.cat((ignore_class, ignore_class_permuted))
mix_value_x = mix_value.view([(- 1), *([1] * (x.ndim - 1))])
one_minus_mix_value_x = (1 - mix_value_x)
mix_value_label = mix_value.view([(- 1), *([1] * (labels.ndim - 1))])
one_minus_mix_value_label = (1 - mix_value_label)
x_mixed = ((mix_value_x * x) + (one_minus_mix_value_x * x_permuted))
mixed_weights = torch.cat((mix_value_label, one_minus_mix_value_label))
return (x_mixed, labels_cat, has_label_cat, ignore_class_cat, mixed_weights)
|
class SpottingMixup(Module):
'Make mixup for spotting for labels.\n\n Args:\n alpha: Alpha value for the beta distribution of mixup.\n '
def __init__(self, alpha: float=0.5) -> None:
super().__init__()
self.alpha = alpha
self.mix_sampler = torch.distributions.Beta(alpha, alpha)
def forward(self, batch: Dict[(str, Any)]):
(x, labels, has_label, ignore_class) = (batch['input'], batch['labels'], batch['has_label'], batch['ignore_class'])
(device, dtype) = (x.device, x.dtype)
batch_size = x.shape[0]
with torch.inference_mode():
mix_value = self.mix_sampler.sample((batch_size,)).to(device=device, dtype=dtype)
mix_value = mix_value.clone()
permutation = torch.randperm(batch_size, device=device)
(x_mixed, labels_after_mix, has_label_after_mix, ignore_class_after_mix, mixed_weights) = mix_spotting(x=x, mix_value=mix_value, permutation=permutation, labels=labels, has_label=has_label, ignore_class=ignore_class)
new_batch = {'input': x_mixed, 'labels': labels_after_mix, 'ignore_class': ignore_class_after_mix, 'has_label': has_label_after_mix, 'mixup_weights': mixed_weights}
return new_batch
def __repr__(self):
return f'{__class__.__name__}(alpha={self.alpha})'
|
def get_matching_files_in_dir(dir: str, file_pattern: str) -> List[Path]:
'Retrieve files in directory matching a pattern.\n\n Args:\n dir: Directory path.\n file_pattern: Pattern for the files.\n\n Raises:\n NotADirectoryError: If `dir` does not exist or is not a directory.\n\n Returns:\n List of files matching the pattern\n '
dir = Path(dir)
if (dir.exists() and dir.is_dir()):
files = list(dir.glob(file_pattern))
return files
else:
raise NotADirectoryError(f'Directory "{dir}" does not exist or is not a directory')
|
def get_ckpts_in_dir(dir: str, ckpt_pattern: str='*.ckpt') -> List[Path]:
'Get all checkpoints in a directory.\n\n Args:\n dir: Directory path containing the checkpoints.\n ckpt_pattern: Checkpoint glob pattern.\n\n Returns:\n List of checkpoints paths in directory.\n '
try:
files = get_matching_files_in_dir(dir, ckpt_pattern)
except NotADirectoryError:
warnings.warn(f'No checkpoint found in: {dir}', category=RuntimeWarning)
files = []
return files
|
def get_last_ckpt_in_dir(dir: str, ckpt_pattern: str='*.ckpt', key_sort: Callable=(lambda x: x.stat().st_mtime)) -> Optional[Path]:
'Get last ckpt in directory following a sorting function.\n\n Args:\n dir: Directory path containing the checkpoints.\n ckpt_pattern: Checkpoint glob pattern.\n key_sort: Function to sort the checkpoints.\n\n Returns:\n Last checkpoint in `dir`, if it exists, according to `key_sort`.\n '
ckpts = get_ckpts_in_dir(dir, ckpt_pattern)
if (ckpts == []):
return None
ckpts.sort(key=key_sort, reverse=False)
return ckpts[(- 1)]
|
def get_last_ckpt_in_path_or_dir(checkpoint_file: Optional[str]=None, checkpoint_dir: Optional[str]=None, ckpt_pattern: str='*.ckpt', key_sort: Callable=(lambda x: x.stat().st_mtime)) -> Optional[Path]:
'Get checkpoint from file or from last checkpoint in directory following a sorting function.\n\n Args:\n checkpoint_file: Checkpoint file path containing the checkpoint.\n checkpoint_dir: Directory path containing the checkpoints.\n ckpt_pattern: Checkpoint glob pattern.\n key_sort: Function to sort the checkpoints.\n\n Returns:\n Checkpoint file if it exists or last checkpoint in `dir` according to `key_sort`.\n '
if (checkpoint_file is not None):
checkpoint_file_path = Path(checkpoint_file)
if (checkpoint_file_path.exists() and checkpoint_file_path.is_file()):
return checkpoint_file_path
else:
warnings.warn(f'{checkpoint_file} is not a file or do not exist.', category=RuntimeWarning)
if (checkpoint_dir is not None):
return get_last_ckpt_in_dir(checkpoint_dir, ckpt_pattern=ckpt_pattern, key_sort=key_sort)
return None
|
def get_ckpt_by_callback_mode(checkpoint_path: str, checkpoint_mode: str) -> List[Path]:
"Get checkpoint from ModelCheckpoint callback based on the mode: ``'best'``, ``'last'``, or ``'both'``.\n\n Args:\n checkpoint_path: Checkpoint file path containing the callback checkpoint.\n checkpoint_mode: Mode to read the callback checkpoint. Can be either ``'best'``, ``'last'`` or ``'both'``.\n\n Returns:\n Checkpoint paths based on the mode.\n "
checkpoint = torch.load(checkpoint_path, map_location='cpu')
model_checkpoint_str = str(checkpoint['callbacks'])
paths: List[Path] = []
if ((checkpoint_mode == 'best') or (checkpoint_mode == 'both')):
regex = "'best_model_path':\\s'([a-zA-Z/0-9=_\\-\\.]+\\.ckpt)'"
paths.append(Path(re.search(regex, model_checkpoint_str).group(1)))
elif ((checkpoint_mode == 'last') or (checkpoint_mode == 'both')):
regex = "'last_model_path':\\s'([a-zA-Z/0-9=_\\-\\.]+\\.ckpt)'"
paths.append(Path(re.search(regex, model_checkpoint_str).group(1)))
else:
raise NotImplementedError(f"Checkpoint mode '{checkpoint_mode}' not supported.")
new_paths = []
checkpoint_dir = Path(checkpoint_path).parent
for path in paths:
if path.exists():
new_paths.append(path)
else:
new_path = (checkpoint_dir / path.name)
assert new_path.exists(), f'The checkpoint {path} is not available and not found at {new_path}.'
new_paths.append(new_path)
return new_paths
|
def get_sub_state_dict_from_pl_ckpt(checkpoint_path: str, pattern: str='^(trunk\\.)') -> Dict[(Any, Any)]:
'Retrieve sub state dict from a pytorch lightning checkpoint.\n\n Args:\n checkpoint_path: Pytorch lightning checkpoint path.\n pattern: Pattern to filter the keys for the sub state dictionary.\n If value is ``""`` keep all keys.\n\n Returns:\n Sub state dict from the checkpoint following the pattern.\n '
model = torch.load(checkpoint_path)
if ('state_dict' in model):
state_dict = {k: v for (k, v) in model['state_dict'].items() if ((pattern == '') or re.match(pattern, k))}
else:
state_dict = {k: v for (k, v) in model.items() if ((pattern == '') or re.match(pattern, k))}
return state_dict
|
def remove_pattern_in_keys_from_dict(d: Dict[(Any, Any)], pattern: str) -> Dict[(Any, Any)]:
'Remove the pattern from keys in a dictionary.\n\n Args:\n d: The dictionary.\n pattern: Pattern to remove from the keys.\n If value is ``""`` keep all keys.\n\n Returns:\n Input dictionary with updated keys.\n '
if (pattern == ''):
return d
return {re.sub(pattern, '', k): v for (k, v) in d.items()}
|
def mask_tube_in_sequence(mask_ratio: float, tube_size: int, len_sequence: int, device: (str | torch.device)='cpu'):
'Generate indices to mask tubes from a sequence.\n\n Args:\n mask_ratio: Ratio for the masking.\n tube_size: Tube size for the masking.\n len_sequence (int): Length of the sequence to mask.\n device: Device for the mask.\n\n Returns:\n Tuple:\n - The indices to mask.\n - The indices to keep.\n - The reversed order for temporal masking.\n - The number of tokens masked.\n '
num_masked = floor((len_sequence * mask_ratio))
indices_permuted = ((torch.randperm((len_sequence // tube_size), device=device) * tube_size).repeat_interleave(tube_size) + torch.arange(tube_size, device=device).repeat((len_sequence // tube_size)))
indices_not_kept: torch.Tensor = indices_permuted[:num_masked].sort()[0]
indices_kept: torch.Tensor = indices_permuted[num_masked:].sort()[0]
indices = torch.cat((indices_not_kept, indices_kept))
inversed_temporal_masked_indices = torch.argsort(indices)
return (indices_not_kept, indices_kept, inversed_temporal_masked_indices, num_masked)
|
def batch_mask_tube_in_sequence(mask_ratio: float, tube_size: int, len_sequence: int, batch_size: int, device: (str | torch.device)='cpu'):
'Generate indices to mask tubes from a batch of sequences.\n\n Args:\n mask_ratio: Ratio for the masking.\n tube_size: Tube size for the masking.\n len_sequence: Length of the sequence to mask.\n batch_size: The size of the batch.\n device: Device for the mask.\n\n Returns:\n Tuple:\n - The indices to mask.\n - The indices to keep.\n - The reversed order for temporal masking.\n - The number of tokens masked.\n '
tot_indices_not_kept = [None for i in range(batch_size)]
tot_indices_kept = [None for i in range(batch_size)]
tot_inversed_temporal_masked_indices = [None for i in range(batch_size)]
tot_num_masked = 0
expected_num_masked = floor((mask_ratio * len_sequence))
tot_indices_not_kept = torch.empty((batch_size, expected_num_masked), device=device, dtype=torch.long)
tot_indices_kept = torch.empty((batch_size, (len_sequence - expected_num_masked)), device=device, dtype=torch.long)
tot_inversed_temporal_masked_indices = torch.empty((batch_size, len_sequence), device=device, dtype=torch.long)
for i in range(batch_size):
(indices_not_kept, indices_kept, inversed_temporal_masked_indices, num_masked) = mask_tube_in_sequence(mask_ratio, tube_size, len_sequence, device)
tot_indices_not_kept[i] = indices_not_kept
tot_indices_kept[i] = indices_kept
tot_inversed_temporal_masked_indices[i] = inversed_temporal_masked_indices
tot_num_masked += num_masked
return (tot_indices_not_kept, tot_indices_kept, tot_inversed_temporal_masked_indices, tot_num_masked)
|
def get_global_batch_size_in_trainer(local_batch_size: int, trainer: Trainer) -> int:
'Get global batch size used by a trainer based on the local batch size.\n\n Args:\n local_batch_size: The local batch size used by the trainer.\n trainer: The trainer used.\n\n Raises:\n AttributeError: The strategy is not supported.\n\n Returns:\n The global batch size.\n '
strategy = get_trainer_strategy(trainer)
devices = trainer.num_devices
num_nodes = trainer.num_nodes
if (not any([isinstance(strategy, supported_strategy) for supported_strategy in supported_strategies])):
raise AttributeError(f'Strategy {strategy} not supported.')
elif any([isinstance(strategy, tpu_strategy) for tpu_strategy in tpu_strategies]):
return (local_batch_size * devices)
elif any([isinstance(strategy, process_independent_strategy) for process_independent_strategy in process_independent_strategies]):
return ((local_batch_size * devices) * num_nodes)
elif any([isinstance(strategy, fully_dependent_strategy) for fully_dependent_strategy in fully_dependent_strategies]):
return local_batch_size
else:
raise AttributeError(f'Strategy {strategy} not supported.')
|
def get_local_batch_size_in_trainer(global_batch_size: int, trainer: Trainer) -> int:
'Get local batch size used by a trainer based on the global batch size.\n\n Args:\n global_batch_size: The global batch size used by the trainer.\n strategy: The trainer used.\n\n Raises:\n AttributeError: The strategy is not supported.\n\n Returns:\n The local batch size.\n '
strategy = get_trainer_strategy(trainer)
devices = trainer.num_devices
num_nodes = trainer.num_nodes
if (not any([isinstance(strategy, supported_strategy) for supported_strategy in supported_strategies])):
raise AttributeError(f'Strategy {strategy} not supported.')
elif any([isinstance(strategy, tpu_strategy) for tpu_strategy in tpu_strategies]):
return (global_batch_size // devices)
elif any([isinstance(strategy, process_independent_strategy) for process_independent_strategy in process_independent_strategies]):
return ((global_batch_size // devices) // num_nodes)
elif any([isinstance(strategy, fully_dependent_strategy) for fully_dependent_strategy in fully_dependent_strategies]):
return global_batch_size
else:
raise AttributeError(f'Strategy {strategy} not supported.')
|
def get_num_devices_in_trainer(trainer: Trainer) -> int:
'Get the number of devices used by the trainer.\n\n Args:\n trainer: The trainer.\n\n Raises:\n AttributeError: The strategy used by trainer is not supported\n\n Returns:\n The number of devices used by trainer.\n '
strategy = get_trainer_strategy(trainer)
if (not any([isinstance(strategy, supported_strategy) for supported_strategy in supported_strategies])):
raise AttributeError(f'Strategy {strategy} not supported.')
elif any([isinstance(strategy, tpu_strategy) for tpu_strategy in tpu_strategies]):
return trainer.num_devices
elif any([isinstance(strategy, process_independent_strategy) for process_independent_strategy in process_independent_strategies]):
return (trainer.num_devices * trainer.num_nodes)
elif any([isinstance(strategy, fully_dependent_strategy) for fully_dependent_strategy in fully_dependent_strategies]):
return 1
else:
raise AttributeError(f'Strategy {strategy} not supported.')
|
def get_trainer_strategy(trainer: Trainer) -> Any:
'Retrieve the strategy from a trainer.\n\n Args:\n trainer: The trainer.\n\n Returns:\n The strategy.\n '
if (pl.__version__ < '1.6.0'):
return trainer.training_type_plugin
else:
return trainer.strategy
|
def is_strategy_ddp(strategy: Any) -> bool:
'Test if strategy is ddp.\n\n Args:\n strategy: The strategy.\n\n Returns:\n ``True`` if strategy is ddp.\n '
return any([isinstance(strategy, process_strategy) for process_strategy in process_independent_strategies])
|
def is_strategy_tpu(strategy: Any) -> bool:
'Test if strategy is tpu.\n\n Args:\n strategy: The strategy.\n\n Returns:\n ``True`` if strategy is tpu.\n '
return any([isinstance(strategy, tpu_strategy) for tpu_strategy in tpu_strategies])
|
def compile_model(model: LightningModule, do_compile: bool=False, fullgraph: bool=False, dynamic: bool=False, backend: Union[(str, Callable)]='inductor', mode: Union[(str, None)]=None, options: Optional[Dict[(str, Union[(str, int, bool)])]]=None, disable: bool=False):
"If torch version is greater than `'2.0.0'` and users ask for it, compile the model.\n\n Args:\n model: Model to compile.\n do_compile: Whether to compile the model.\n fullgraph: Defined by `torch.compile`.\n dynamic: Defined by `torch.compile`.\n backend: Defined by `torch.compile`.\n mode: Defined by `torch.compile`.\n passes: Defined by `torch.compile`.\n\n Returns:\n The compiled model if available else the model.\n "
if ((version.parse(torch.__version__) >= version.parse('2.0.0.dev')) and do_compile):
rank_zero_info(f'Compiling model {model.__class__.__name__}.')
return torch.compile(model=model, fullgraph=fullgraph, dynamic=dynamic, backend=backend, mode=mode, options=options, disable=disable)
else:
rank_zero_info(f'Not compiling model {model.__class__.__name__}.')
return model
|
def get_default_seed(default_seed: int=0) -> int:
'Get the default seed if pytorch lightning did not initialize one.\n\n Args:\n default_seed: The default seed.\n\n Returns:\n Pytorch lightning seed or the default one.\n '
return int(os.getenv('PL_GLOBAL_SEED', default_seed))
|
def get_global_rank() -> int:
'Get global rank of the process.'
if (dist.is_available() and dist.is_initialized()):
return dist.get_rank()
if (('RANK' in os.environ) and ('WORLD_SIZE' in os.environ)):
rank = int(os.environ['RANK'])
elif (int(os.environ.get('SLURM_NPROCS', 1)) > 1):
rank = int(os.environ['SLURM_PROCID'])
else:
rank = 0
return rank
|
def get_local_rank() -> int:
'Get local rank of the process.'
if (('RANK' in os.environ) and ('WORLD_SIZE' in os.environ)):
local_rank = int(os.environ['LOCAL_RANK'])
elif (int(os.environ.get('SLURM_NPROCS', 1)) > 1):
local_rank = int(os.environ['SLURM_LOCALID'])
else:
local_rank = 0
return local_rank
|
def get_world_size() -> int:
'Get world size or number of the processes.'
if (dist.is_available() and dist.is_initialized()):
return dist.get_world_size()
if (('RANK' in os.environ) and ('WORLD_SIZE' in os.environ)):
world_size = int(os.environ['WORLD_SIZE'])
elif (int(os.environ.get('SLURM_NPROCS', 1)) > 1):
world_size = int(os.environ['SLURM_NPROCS'])
else:
world_size = 1
return world_size
|
def get_local_world_size() -> int:
'Get local world size or number of processes on the node.'
if (dist.is_available() and dist.is_initialized()):
return torch.cuda.device_count()
else:
return 1
|
def is_only_one_condition_true(*conditions: List[bool]) -> bool:
'Test if only one of the conditions is True.'
a = conditions[0]
b = conditions[0]
for condition in conditions[1:]:
a = (a ^ condition)
b = (b & condition)
return (a & (~ b))
|
def all_false(*conditions: List[bool]) -> bool:
'Test that all conditions are False.'
return all([(~ condition) for condition in conditions])
|
def warmup_value(initial_value: float, final_value: float, step: int=0, max_step: int=0) -> float:
'Apply warmup to a value.\n\n Args:\n initial_value: Initial value.\n final_value: Final value.\n step: Current step.\n max_step: Max step for warming up.\n\n Returns:\n The value at current warmup step.\n '
if (step >= max_step):
return final_value
else:
return (initial_value + ((step * (final_value - initial_value)) / max_step))
|
def scheduler_value(scheduler: Optional[str], initial_value: float, final_value: float, step: int=0, max_step: int=0) -> float:
'Apply scheduler to a value.\n\n Args:\n scheduler: The type of the scheduler.\n initial_value: The initial value.\n final_value: The final value.\n step: Current step.\n max_step: Maximum step for scheduler.\n\n Returns:\n The value at current step.\n '
if (scheduler is None):
return initial_value
elif (scheduler == 'linear'):
if (final_value < initial_value):
return (initial_value + ((step * (final_value - initial_value)) / max_step))
else:
return (final_value + ((step * (initial_value - final_value)) / max_step))
elif (scheduler == 'cosine'):
if (final_value > initial_value):
return (initial_value + ((0.5 * (1.0 + math.cos((math.pi + ((math.pi * step) / max_step))))) * (final_value - initial_value)))
else:
return (initial_value - ((0.5 * (1.0 + math.cos((math.pi + ((math.pi * step) / max_step))))) * (initial_value - final_value)))
|
def apply_several_transforms(images: Iterable[Tensor], transforms: Iterable[Module]) -> List[List[Tensor]]:
'Apply several transformations to a list of images.\n\n Args:\n images: The images.\n transforms: The transformations to apply to the images.\n\n Returns:\n List of list of transformed images.\n '
transformed_images = [[transform(image) for image in images] for transform in transforms]
return transformed_images
|
def apply_several_video_transforms(videos: Iterable[Dict[(str, Any)]], transforms: Iterable[Module]) -> List[List[Tensor]]:
'Apply several transformations to a list of videos.\n\n Args:\n videos: The videos.\n transforms: The transformations to apply to the videos.\n\n Returns:\n List of list of transformed videos.\n '
transformed_images = [[transform(copy.deepcopy(video)) for video in videos] for transform in transforms]
return transformed_images
|
def make_grid_from_several_transforms(sets_images: Iterable[Iterable[Tensor]], n_images_per_row: int=8) -> Tensor:
'Make a grid of images by aligning images from several transformations vertically.\n\n Args:\n sets_images: Sets of transformed images aligned, base_image(sets_images[0][?]) == ... == base_images(sets_images[-1][?]).\n n_images_per_row: Number of images displayed per row.\n\n Returns:\n Grid of images.\n '
n_images = len(sets_images[0])
for set_image in sets_images:
assert (len(set_image) == n_images)
if (n_images_per_row == (- 1)):
nrow = n_images
all_images = torch.cat(*sets_images, dim=0)
else:
nrow = n_images_per_row
all_images = []
number_of_rows = math.ceil((n_images // n_images_per_row))
for row_idx in range(number_of_rows):
for set_image in sets_images:
for i in range((row_idx * n_images_per_row), min(n_images, ((row_idx + 1) * n_images_per_row))):
all_images.append(set_image[i])
grid = torchvision.utils.make_grid(all_images, nrow=nrow)
return grid
|
def make_several_transforms_from_config(cfg_transforms: Mapping[(Any, Any)]) -> List[Module]:
"Make several transformations from a configuration dictionary.\n\n Args:\n cfg_transforms: Configuration of transformations in the form {'transform1': {...}, 'transform2': {...}}.\n\n Returns:\n List of transformations.\n "
transforms = [hydra.utils.instantiate(conf_transform) for (_, conf_transform) in cfg_transforms.items()]
return transforms
|
def show_images(imgs: Union[(Iterable[Tensor], Tensor)], figsize: Iterable[float]=[6.4, 4.8]):
'Show images from a tensor or a list of tensor.\n\n Args:\n imgs: Images to display.\n figsize: Figure size for the images.\n '
if (not isinstance(imgs, list)):
imgs = [imgs]
(_, axs) = plt.subplots(ncols=len(imgs), squeeze=False, figsize=figsize)
for (i, img) in enumerate(imgs):
img = img.detach()
img = np.asarray(F.to_pil_image(img))
axs[(0, i)].imshow(np.asarray(img))
axs[(0, i)].set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])
plt.show()
|
def show_video(video: Tensor) -> animation.ArtistAnimation:
'Show a video thanks to animation from matplotlib.\n\n Args:\n video: The raw video to display.\n\n Returns:\n The animation to show.\n '
video = video.long()
video = np.asarray(video.permute(1, 2, 3, 0))
(fig, ax) = plt.subplots()
fig.patch.set_visible(False)
frames = [[ax.imshow(video[i], animated=True)] for i in range(len(video))]
anim = animation.ArtistAnimation(fig, frames)
plt.axis('off')
plt.show()
return anim
|
def main():
args = parser.parse_args()
soccernet_dataset(data_path=args.data_path, transform=None, video_path_prefix=args.path_prefix, decoder_args={'fps': args.fps}, features_args=None, label_args={'radius_label': args.radius_label, 'cache_dir': args.cache_dir}, task=SoccerNetTask(args.task))
|
def get_video_duration(video_file):
cmd = ['ffmpeg', '-i', str(video_file), '-f', 'null', '-']
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
print(video_file, err.output)
return (- 1)
try:
output_decoded = output.decode()
result_all = ffmpeg_duration_template.findall(output_decoded)
except Exception as err:
print(video_file, err, 'chose to carry on decoding video')
return 1
if result_all:
result = result_all[(- 1)]
duration = (((((float(result[0]) * 60) * 60) + (float(result[1]) * 60)) + float(result[2])) + (float(result[3]) * (10 ** (- len(result[3])))))
else:
duration = (- 1)
return duration
|
def has_video_stream(video_file):
cmd = ['ffprobe', '-i', str(video_file), '-show_streams', '-select_streams', 'v', '-loglevel', 'error']
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
print(video_file, err.output)
return False
return (output != '')
|
def is_video_empty(video_file):
return ((get_video_duration(video_file) <= 0) or (not has_video_stream(video_file)))
|
def process(row, folder_path, output_path, args):
classname = row[0]
videoname = row[1]
videostem = row[2]
inname = ((folder_path / classname) / videoname)
if is_video_empty(inname):
print(f'{inname} is empty.')
return (False, f'{inname} is empty.')
output_folder = (output_path / classname)
if (os.path.isdir(output_folder) is False):
try:
os.mkdir(output_folder)
except:
print(f"{output_folder} can't be created.")
if args.downscale:
downscaled_cmd = f"""-c:v libx264 -filter:v "scale='if(gt(ih,iw),{args.downscale_size},trunc(oh*a/2)*2):if(gt(ih,iw),trunc(ow/a/2)*2,{args.downscale_size})'" -c:a copy"""
else:
downscaled_cmd = ''
if args.frames:
outfile = '%08d.jpg'
outfolder = (output_folder / videostem)
outfolder.mkdir(exist_ok=False, parents=False)
outname = (outfolder / outfile)
frames_cmd = f'-q:v {args.video_quality}'
else:
outname = (output_folder / videoname)
frames_cmd = ''
if (args.fps > 0):
fps_cmd = f'-r {args.fps}'
else:
fps_cmd = ''
status = False
inname = ('"%s"' % inname)
outname = ('"%s"' % outname)
command = f'ffmpeg -loglevel panic -i {inname} {downscaled_cmd} {frames_cmd} {fps_cmd} {outname}'
try:
subprocess.run(command, shell=True, check=True, stdout=subprocess.PIPE).stdout
except subprocess.CalledProcessError as err:
print(inname, outname, status, err.output)
if args.frames:
os.rmdir(outfolder)
return (status, err.output)
status = os.path.exists(outname)
return (status, 'Process')
|
@hydra.main(config_path='../eztorch/configs/run/evaluation/feature_extractor/resnet3d50', config_name='resnet3d50_ucf101')
def main(config: DictConfig) -> None:
rundir = Path(to_absolute_path(config.dir.run))
rundir.mkdir(parents=True, exist_ok=True)
os.chdir(rundir)
rank_zero_info(f'Run directory: {rundir}')
hydradir = (rundir / 'config/')
hydradir.mkdir(parents=True, exist_ok=True)
config_file = (hydradir / 'extract_features.yaml')
resolved_config = OmegaConf.to_yaml(config, resolve=True)
with config_file.open(mode='w') as f:
f.write(resolved_config)
if config.get('seed'):
hydra.utils.instantiate(config.seed)
else:
warnings.warn('No seed fixed, the results are not reproducible.')
datamodule: LightningDataModule = hydra.utils.instantiate(config.datamodule)
if (not config.model.get('_target_')):
with open_dict(config):
config.model._target_ = 'eztorch.evaluation.FeatureExtractor'
config.model._recursive_ = False
model: Module = hydra.utils.instantiate(config.model)
callbacks = []
if config.get('callbacks'):
for (_, callback_cfg) in config.callbacks.items():
callback: Callback = hydra.utils.instantiate(callback_cfg)
callbacks.append(callback)
trainer: Trainer = hydra.utils.instantiate(config.trainer, callbacks=callbacks, devices=1, strategy='auto')
rank_zero_info(resolved_config)
rank_zero_info(model)
if config.datamodule.get('train'):
trainer.fit(model, datamodule=datamodule)
elif config.datamodule.get('val'):
trainer.validate(model, datamodule=datamodule)
if config.datamodule.get('test'):
trainer.test(model, datamodule=datamodule)
|
@hydra.main(config_path='../eztorch/configs/run/evaluation/linear_classifier/sce/resnet50', config_name='resnet50_imagenet_mocov3')
def main(config: DictConfig) -> None:
rundir = Path(to_absolute_path(config.dir.run))
rundir.mkdir(parents=True, exist_ok=True)
os.chdir(rundir)
rank_zero_info(f'Run directory: {rundir}')
hydradir = (rundir / 'config')
hydradir.mkdir(parents=True, exist_ok=True)
config_file = (hydradir / 'linear_classifier_evaluation.yaml')
resolved_config = OmegaConf.to_yaml(config, resolve=True)
with config_file.open(mode='w') as f:
f.write(resolved_config)
if config.get('seed'):
hydra.utils.instantiate(config.seed)
else:
warnings.warn('No seed fixed, the results are not reproducible.')
callbacks = []
if config.get('callbacks'):
for (_, callback_cfg) in config.callbacks.items():
callback: Callback = hydra.utils.instantiate(callback_cfg)
callbacks.append(callback)
trainer: Trainer = hydra.utils.instantiate(config.trainer, callbacks=callbacks)
datamodule: LightningDataModule = hydra.utils.instantiate(config.datamodule)
model: LightningModule = hydra.utils.instantiate(config.model)
model_ckpt_dirpath = (config.callbacks.model_checkpoint.dirpath if config.callbacks.get('model_checkpoint') else None)
ckpt_path = get_last_ckpt_in_path_or_dir(config.ckpt_path, model_ckpt_dirpath)
if (ckpt_path is not None):
warnings.warn(f'A checkpoint has been found and loaded from this file: {ckpt_path}', category=RuntimeWarning)
rank_zero_info(resolved_config)
rank_zero_info(model)
model = compile_model(model, **config.get('compile', {}))
trainer.fit(model, datamodule=datamodule, ckpt_path=ckpt_path)
if config.get('test'):
if config.test.get('ckpt_by_callback_mode'):
ckpt_paths = get_ckpt_by_callback_mode(config.test.ckpt_path, config.test.ckpt_by_callback_mode)
else:
ckpt_paths = [config.test.ckpt_path]
for ckpt_path in ckpt_paths:
trainer.test(model, ckpt_path=ckpt_path, datamodule=datamodule)
|
@hydra.main(config_path='../eztorch/configs/run/pretrain/sce/resnet50', config_name='resnet50_imagenet')
def main(config: DictConfig) -> None:
rundir = Path(to_absolute_path(config.dir.run))
rundir.mkdir(parents=True, exist_ok=True)
os.chdir(rundir)
rank_zero_info(f'Run directory: {rundir}')
hydradir = (rundir / 'config/')
hydradir.mkdir(parents=True, exist_ok=True)
config_file = (hydradir / 'pretrain.yaml')
resolved_config = OmegaConf.to_yaml(config, resolve=True)
with config_file.open(mode='w') as f:
f.write(resolved_config)
if config.get('seed'):
hydra.utils.instantiate(config.seed)
else:
warnings.warn('No seed fixed, the results are not reproducible.')
callbacks = []
if config.get('callbacks'):
for (_, callback_cfg) in config.callbacks.items():
callback: Callback = hydra.utils.instantiate(callback_cfg)
callbacks.append(callback)
trainer: Trainer = hydra.utils.instantiate(config.trainer, callbacks=callbacks)
datamodule: LightningDataModule = hydra.utils.instantiate(config.datamodule)
model: LightningModule = hydra.utils.instantiate(config.model)
model_ckpt_dirpath = (config.callbacks.model_checkpoint.dirpath if config.callbacks.get('model_checkpoint') else None)
ckpt_path = get_last_ckpt_in_path_or_dir(config.ckpt_path, model_ckpt_dirpath)
if (ckpt_path is not None):
warnings.warn(f'A checkpoint has been found and loaded from this file: {ckpt_path}', category=RuntimeWarning)
rank_zero_info(resolved_config)
rank_zero_info(model)
model = compile_model(model, **config.get('compile', {}))
trainer.fit(model, datamodule=datamodule, ckpt_path=ckpt_path)
|
@hydra.main(config_path='../eztorch/configs/run/pretrain/moco', config_name='resnet18_cifar10')
def main(config: DictConfig) -> None:
rundir = Path(to_absolute_path(config.dir.run))
rundir.mkdir(parents=True, exist_ok=True)
os.chdir(rundir)
rank_zero_info(f'Run directory: {rundir}')
hydradir = (rundir / 'config/')
hydradir.mkdir(parents=True, exist_ok=True)
config_file = (hydradir / 'dataloader.yaml')
resolved_config = OmegaConf.to_yaml(config, resolve=True)
with config_file.open(mode='w') as f:
f.write(resolved_config)
if config.get('seed'):
hydra.utils.instantiate(config.seed)
else:
warnings.warn('No seed fixed, the results are not reproducible.')
trainer: Trainer = hydra.utils.instantiate(config.trainer, callbacks=[])
datamodule: LightningDataModule = hydra.utils.instantiate(config.datamodule)
if (config.model.get('input_shape') is None):
raise AssertionError('input_shape should be specified in model config.')
if config.model.get('transform'):
transform = config.model.train_transform
else:
transform = None
model: LightningModule = DummyModel(config.model.input_shape, transform=transform)
rank_zero_info(config.datamodule)
rank_zero_info(model)
trainer.fit(model, datamodule=datamodule)
|
@hydra.main(config_path='../eztorch/configs/run/evaluation/retrieval_from_bank', config_name='default')
def main(config: DictConfig) -> None:
rundir = Path(to_absolute_path(config.dir.run))
rundir.mkdir(parents=True, exist_ok=True)
os.chdir(rundir)
rank_zero_info(f'Run directory: {rundir}')
hydradir = (rundir / 'config/')
hydradir.mkdir(parents=True, exist_ok=True)
config_file = (hydradir / 'retrieval_train_from_test.yaml')
resolved_config = OmegaConf.to_yaml(config, resolve=True)
with config_file.open(mode='w') as f:
f.write(resolved_config)
rank_zero_info(resolved_config)
ranks = config.ranks
rank_zero_info('\nLoading query features and labels...')
query_features = torch.load(config.query.features_path)
query_labels = torch.load(config.query.labels_path)
rank_zero_info(f'''Loaded query features and labels.
shape of features is: {query_features.shape}.
shape of labels is: {query_labels.shape}.''')
rank_zero_info('\nLoading bank features and labels...')
bank_features = torch.load(config.bank.features_path)
bank_labels = torch.load(config.bank.labels_path)
rank_zero_info(f'''Loaded bank features and labels.
shape of features is: {bank_features.shape}.
shape of labels is: {bank_labels.shape}.''')
if torch.cuda.is_available():
rank_zero_info('\nCuda available, tensors put on GPU...')
query_features = query_features
query_labels = query_labels
bank_features = bank_features
bank_labels = bank_labels
if config.query.center:
rank_zero_info('\nQuery centering...')
query_features = (query_features - query_features.mean(dim=0, keepdim=True))
rank_zero_info('Query centered...')
if config.bank.center:
rank_zero_info('\nBank centering...')
bank_features = (bank_features - bank_features.mean(dim=0, keepdim=True))
rank_zero_info('Bank centered...')
if config.query.normalize:
rank_zero_info('\nQuery normalizing...')
query_features = torch.nn.functional.normalize(query_features, p=2, dim=1)
rank_zero_info('Query normalized...')
if config.bank.normalize:
rank_zero_info('\nBank normalizing...')
bank_features = torch.nn.functional.normalize(bank_features, p=2, dim=1)
rank_zero_info('Bank normalized...')
rank_zero_info('\nComputing similarties...')
sim = query_features.matmul(bank_features.t())
rank_zero_info('Computed similarities...')
rank_zero_info('\nStart computing metrics:')
for rank in ranks:
(_, topkidx) = torch.topk(sim, rank, dim=1)
acc = torch.any((bank_labels[topkidx] == query_labels.unsqueeze(1)), dim=1).float().mean().item()
rank_zero_info(f'R @ {rank} = {acc:.4f}')
|
@hydra.main(config_path='../eztorch/configs/run/supervised/resnet3d18', config_name='kinetics200')
def main(config: DictConfig) -> None:
rundir = Path(to_absolute_path(config.dir.run))
rundir.mkdir(parents=True, exist_ok=True)
os.chdir(rundir)
rank_zero_info(f'Run directory: {rundir}')
hydradir = (rundir / 'config/')
hydradir.mkdir(parents=True, exist_ok=True)
config_file = (hydradir / 'supervised.yaml')
resolved_config = OmegaConf.to_yaml(config, resolve=True)
with config_file.open(mode='w') as f:
f.write(resolved_config)
if config.get('seed'):
hydra.utils.instantiate(config.seed)
else:
warnings.warn('No seed fixed, the results are not reproducible.')
callbacks = []
if config.get('callbacks'):
for (_, callback_cfg) in config.callbacks.items():
callback: Callback = hydra.utils.instantiate(callback_cfg)
callbacks.append(callback)
trainer: Trainer = hydra.utils.instantiate(config.trainer, callbacks=callbacks)
datamodule: LightningDataModule = hydra.utils.instantiate(config.datamodule)
model: LightningModule = hydra.utils.instantiate(config.model)
model_ckpt_dirpath = (config.callbacks.model_checkpoint.dirpath if config.callbacks.get('model_checkpoint') else None)
ckpt_path = get_last_ckpt_in_path_or_dir(config.ckpt_path, model_ckpt_dirpath)
if (ckpt_path is not None):
warnings.warn(f'A checkpoint has been found and loaded from this file: {ckpt_path}', category=RuntimeWarning)
rank_zero_info(resolved_config)
rank_zero_info(model)
model = compile_model(model, **config.get('compile', {}))
trainer.fit(model, datamodule=datamodule, ckpt_path=ckpt_path)
if config.get('test'):
if config.test.get('ckpt_by_callback_mode'):
ckpt_paths = get_ckpt_by_callback_mode(config.test.ckpt_path, config.test.ckpt_by_callback_mode)
else:
ckpt_paths = [config.test.ckpt_path]
for ckpt_path in ckpt_paths:
trainer.test(model, ckpt_path=ckpt_path, datamodule=datamodule)
|
@hydra.main(config_path='../eztorch/configs/run/supervised/resnet3d18', config_name='kinetics200')
def main(config: DictConfig) -> None:
rundir = Path(to_absolute_path(config.dir.run))
rundir.mkdir(parents=True, exist_ok=True)
os.chdir(rundir)
rank_zero_info(f'Run directory: {rundir}')
hydradir = (rundir / 'config/')
hydradir.mkdir(parents=True, exist_ok=True)
config_file = (hydradir / 'supervised.yaml')
resolved_config = OmegaConf.to_yaml(config, resolve=True)
with config_file.open(mode='w') as f:
f.write(resolved_config)
if config.get('seed'):
hydra.utils.instantiate(config.seed)
else:
warnings.warn('No seed fixed, the results are not reproducible.')
callbacks = []
if config.get('callbacks'):
for (_, callback_cfg) in config.callbacks.items():
callback: Callback = hydra.utils.instantiate(callback_cfg)
callbacks.append(callback)
schedule = torch.profiler.schedule(wait=0, warmup=50, active=2, repeat=0, skip_first=0)
trace_handler = torch.profiler.tensorboard_trace_handler(dir_name=config.dir.root)
profiler = PyTorchProfiler(dirpath=config.dir.root, export_to_chrome=False, with_stack=True, schedule=schedule, on_trace_ready=trace_handler)
trainer: Trainer = hydra.utils.instantiate(config.trainer, callbacks=callbacks, profiler=profiler)
datamodule: LightningDataModule = hydra.utils.instantiate(config.datamodule)
model: LightningModule = hydra.utils.instantiate(config.model)
model_ckpt_dirpath = (config.callbacks.model_checkpoint.dirpath if config.callbacks.get('model_checkpoint') else None)
ckpt_path = get_last_ckpt_in_path_or_dir(config.ckpt_path, model_ckpt_dirpath)
if (ckpt_path is not None):
warnings.warn(f'A checkpoint has been found and loaded from this file: {ckpt_path}', category=RuntimeWarning)
rank_zero_info(resolved_config)
rank_zero_info(model)
model = compile_model(model, **config.get('compile', {}))
trainer.fit(model, datamodule=datamodule, ckpt_path=ckpt_path)
if config.get('test'):
if config.test.get('ckpt_by_callback_mode'):
ckpt_paths = get_ckpt_by_callback_mode(config.test.ckpt_path, config.test.ckpt_by_callback_mode)
else:
ckpt_paths = [config.test.ckpt_path]
for ckpt_path in ckpt_paths:
trainer.test(model, ckpt_path=ckpt_path, datamodule=datamodule)
|
@hydra.main(config_path='../eztorch/configs/run/supervised/resnet3d50', config_name='ucf101')
def main(config: DictConfig) -> None:
rundir = Path(to_absolute_path(config.dir.run))
rundir.mkdir(parents=True, exist_ok=True)
os.chdir(rundir)
rank_zero_info(f'Run directory: {rundir}')
hydradir = (rundir / 'config/')
hydradir.mkdir(parents=True, exist_ok=True)
config_file = (hydradir / 'test.yaml')
resolved_config = OmegaConf.to_yaml(config, resolve=True)
with config_file.open(mode='w') as f:
f.write(resolved_config)
if config.get('seed'):
hydra.utils.instantiate(config.seed)
else:
warnings.warn('No seed fixed, the results are not reproducible.')
callbacks = []
if config.get('callbacks'):
for (_, callback_cfg) in config.callbacks.items():
callback: Callback = hydra.utils.instantiate(callback_cfg)
callbacks.append(callback)
datamodule: LightningDataModule = hydra.utils.instantiate(config.datamodule)
model: LightningModule = hydra.utils.instantiate(config.model)
model_ckpt_dirpath = (config.callbacks.model_checkpoint.dirpath if config.callbacks.get('model_checkpoint') else None)
ckpt_path = get_last_ckpt_in_path_or_dir(config.ckpt_path, model_ckpt_dirpath)
if (ckpt_path is not None):
warnings.warn(f'A checkpoint has been found and loaded from this file: {ckpt_path}', category=RuntimeWarning)
rank_zero_info(resolved_config)
rank_zero_info(model)
model = compile_model(model, **config.get('compile', {}))
if config.test.get('ckpt_by_callback_mode'):
ckpt_paths = get_ckpt_by_callback_mode(config.test.ckpt_path, config.test.ckpt_by_callback_mode)
else:
ckpt_paths = [config.test.ckpt_path]
for ckpt_path in ckpt_paths:
trainer: Trainer = hydra.utils.instantiate(config.trainer, callbacks=callbacks, devices=1, strategy='auto')
trainer.test(model, ckpt_path=ckpt_path, datamodule=datamodule)
|
class TestFrameSoccerNetVideo(unittest.TestCase):
def setUp(self) -> None:
self.default_args = {'video_path': Path('/video/'), 'half_path': Path('/video/half1'), 'transform': None, 'video_frame_to_path_fn': get_video_to_frame_path_fn(zeros=8), 'num_threads_io': 0}
def test_same_fps_get_timestamps_indices(self) -> None:
video = FrameSoccerNetVideo(duration=2700, fps_video=2, fps=2, num_frames=5400, **self.default_args)
(timestamps, frame_indices, fps_video_frame_indices) = video.get_timestamps_and_frame_indices(0.75, 15.02)
expected_timestamps = torch.arange(0.5, 15, 0.5)
expected_frame_indices = torch.arange(1, (15 * 2))
expected_fps_video_frame_indices = expected_frame_indices
assert torch.allclose(timestamps, expected_timestamps)
assert torch.allclose(frame_indices, expected_frame_indices)
assert torch.allclose(fps_video_frame_indices, expected_fps_video_frame_indices)
video = FrameSoccerNetVideo(duration=2700, fps_video=2, fps=2, num_frames=5400, **self.default_args)
(timestamps, frame_indices, fps_video_frame_indices) = video.get_timestamps_and_frame_indices(0.75, 15.52)
expected_timestamps = torch.arange(0.5, 15.5, 0.5)
expected_frame_indices = torch.arange(1, ((15 * 2) + 1))
expected_fps_video_frame_indices = expected_frame_indices
assert torch.allclose(timestamps, expected_timestamps)
assert torch.allclose(frame_indices, expected_frame_indices)
assert torch.allclose(fps_video_frame_indices, expected_fps_video_frame_indices)
video = FrameSoccerNetVideo(duration=2699.5, fps_video=2, fps=2, num_frames=5399, **self.default_args)
(timestamps, frame_indices, fps_video_frame_indices) = video.get_timestamps_and_frame_indices(0.0, 2699.5)
expected_timestamps = torch.arange(0.0, 2699.5, 0.5)
expected_frame_indices = torch.arange(0, ((2699 * 2) + 1))
expected_fps_video_frame_indices = expected_frame_indices
assert torch.allclose(timestamps, expected_timestamps)
assert torch.allclose(frame_indices, expected_frame_indices)
assert torch.allclose(fps_video_frame_indices, expected_fps_video_frame_indices)
def test_different_fps_get_timestamps_indices(self) -> None:
video = FrameSoccerNetVideo(duration=2700, fps_video=25, fps=2, num_frames=67500, **self.default_args)
(timestamps, frame_indices, fps_video_frame_indices) = video.get_timestamps_and_frame_indices(0.75, 15.02)
expected_timestamps = torch.arange(0.5, 15, 0.5)
expected_frame_indices = torch.arange(1, (15 * 2))
expected_fps_video_frame_indices = torch.floor(((expected_frame_indices / 2) * 25)).to(dtype=torch.long)
assert torch.allclose(timestamps, expected_timestamps)
assert torch.allclose(frame_indices, expected_frame_indices)
assert torch.allclose(fps_video_frame_indices, expected_fps_video_frame_indices)
video = FrameSoccerNetVideo(duration=2700, fps_video=25, fps=2, num_frames=67500, **self.default_args)
(timestamps, frame_indices, fps_video_frame_indices) = video.get_timestamps_and_frame_indices(0.75, 15.52)
expected_timestamps = torch.arange(0.5, 15.5, 0.5)
expected_frame_indices = torch.arange(1, ((15 * 2) + 1))
expected_fps_video_frame_indices = torch.floor(((expected_frame_indices / 2) * 25)).to(dtype=torch.long)
assert torch.allclose(timestamps, expected_timestamps)
assert torch.allclose(frame_indices, expected_frame_indices)
assert torch.allclose(fps_video_frame_indices, expected_fps_video_frame_indices)
video = FrameSoccerNetVideo(duration=2699.96, fps_video=25, fps=2, num_frames=67499, **self.default_args)
(timestamps, frame_indices, fps_video_frame_indices) = video.get_timestamps_and_frame_indices(0.0, 2699.96)
expected_timestamps = torch.arange(0.0, 2699.5, 0.5)
expected_frame_indices = torch.arange(0, ((2699 * 2) + 1))
expected_fps_video_frame_indices = torch.floor(((expected_frame_indices / 2) * 25)).to(dtype=torch.long)
assert torch.allclose(timestamps, expected_timestamps)
assert torch.allclose(frame_indices, expected_frame_indices)
assert torch.allclose(fps_video_frame_indices, expected_fps_video_frame_indices)
video = FrameSoccerNetVideo(duration=2700, fps_video=25, fps=4, num_frames=67500, **self.default_args)
(timestamps, frame_indices, fps_video_frame_indices) = video.get_timestamps_and_frame_indices(0.76, 15.02)
expected_timestamps = torch.arange(0.75, 15, 0.25)
expected_frame_indices = torch.arange(3, (15 * 4))
expected_fps_video_frame_indices = torch.tensor([19, 25, 31, 37, 44, 50, 56, 62, 69, 75, 81, 87, 94, 100, 106, 112, 119, 125, 131, 137, 144, 150, 156, 162, 169, 175, 181, 187, 194, 200, 206, 212, 219, 225, 231, 237, 244, 250, 256, 262, 269, 275, 281, 287, 294, 300, 306, 312, 319, 325, 331, 337, 344, 350, 356, 362, 369])
assert torch.allclose(timestamps, expected_timestamps)
assert torch.allclose(frame_indices, expected_frame_indices)
assert torch.allclose(fps_video_frame_indices, expected_fps_video_frame_indices)
|
class TestSoccerNetDataset(unittest.TestCase):
def test_soccernet_dataset(self):
dataset = soccernet_dataset((Path(os.path.realpath(__file__)).parent / 'small_annotations.json'), None, (Path(os.path.realpath(__file__)).parent / 'images'), label_args={'radius_label': 2, 'cache_dir': (Path(os.path.realpath(__file__)).parent / 'labels')}, decoder_args={'fps': 2})
sample = dataset[(0, 1, 1860, 1870)]
expected_sample = {'timestamps': tensor([1860.0, 1860.5, 1861.0, 1861.5, 1862.0, 1862.5, 1863.0, 1863.5, 1864.0, 1864.5, 1865.0, 1865.5, 1866.0, 1866.5, 1867.0, 1867.5, 1868.0, 1868.5, 1869.0, 1869.5]), 'video_num_timestamps': tensor(5400), 'labels': tensor([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]), 'ignore_class': tensor([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]).bool()}
for (key, value) in expected_sample.items():
assert torch.allclose(sample[key], value, rtol=0.001, atol=0.001)
|
class TestSoccerNetPredictions(unittest.TestCase):
def test_aggregate_predictions(self):
predictions = torch.tensor([[0.5, 0.6], [0.4, 0.7], [0.7, 0.3], [0.3, 0.5], [0.8, 0.2], [0.9, 0.9]])
timestamps = torch.tensor([[0.0, 0.0], [1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [8.0, 2.0], [8.0, 8.0]])
expected_predictions = torch.tensor([[0.3, 0.6], [0.5, 0.7], [0.7, 0.0], [0.0, 0.0], [0.8, 0.0], [0.0, 0.5], [0.0, 0.0], [0.9, 0.9]])
expected_timestamps = torch.tensor([0, 1, 2, 3, 4, 5, 6, 7])
(new_predictions, new_timestamps) = aggregate_predictions(predictions, timestamps, 8, 1.0)
assert torch.allclose(expected_predictions, new_predictions)
assert torch.allclose(expected_timestamps, new_timestamps)
def test_aggregate_predictions_with_ignore(self):
predictions = torch.tensor([[0.5, 0.6], [0.4, 0.7], [0.7, 0.3], [0.3, 0.5], [0.8, 0.2], [0.9, 0.9]])
timestamps = torch.tensor([[0.0, 0.0], [1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [8.0, 2.0], [8.0, 8.0]])
expected_predictions = torch.tensor([[0.0, 0.6], [0.5, 0.7], [0.7, 0.0], [0.0, 0.0], [0.8, 0.0], [0.0, 0.5], [0.0, 0.0], [0.9, 0.0], [0.0, 0.0]])
expected_timestamps = torch.tensor([0, 1, 2, 3, 4, 5, 6, 7, 8])
(new_predictions, new_timestamps) = aggregate_predictions(predictions, timestamps, 8, 1.0, True)
assert torch.allclose(expected_predictions, new_predictions)
assert torch.allclose(expected_timestamps, new_timestamps)
def test_postprocess_spotting_half_predictions(self):
predictions = torch.tensor([[0.5, 0.3], [1.0, 0.6], [0.2, 0.8], [0.3, 0.2], [0.1, 0.1], [0.2, 0.9], [0.6, 0.8]])
NMS_args = {'window': 3, 'threshold': 0.49}
half_id = 0
half_predictions = postprocess_spotting_half_predictions(predictions, half_id, 15, NMS_args)
expected_half_predictions = [{'gameTime': f'{half_id} - {int((15 / 60)):02d}:{int((15 % 60))}', 'label': REVERSE_ACTION_SPOTTING_LABELS[0], 'position': f'{int(15000)}', 'half': str(half_id), 'confidence': float(1.0)}, {'gameTime': f'{half_id} - {int((90 / 60)):02d}:{int((90 % 60))}', 'label': REVERSE_ACTION_SPOTTING_LABELS[0], 'position': f'{int(90000)}', 'half': str(half_id), 'confidence': float(0.6)}, {'gameTime': f'{half_id} - {int((30 / 60)):02d}:{int((30 % 60))}', 'label': REVERSE_ACTION_SPOTTING_LABELS[1], 'position': f'{int(30000)}', 'half': str(half_id), 'confidence': float(0.8)}, {'gameTime': f'{half_id} - {int((75 / 60)):02d}:{int((75 % 60))}', 'label': REVERSE_ACTION_SPOTTING_LABELS[1], 'position': f'{int(75000)}', 'half': str(half_id), 'confidence': float(0.9)}]
for (expected, actual) in zip(expected_half_predictions, half_predictions):
for key in expected:
if (type(expected[key]) is float):
assert math.isclose(expected[key], actual[key], rel_tol=1e-06, abs_tol=0.0)
else:
assert (expected[key] == actual[key])
|
class TestSCETokenMasks(unittest.TestCase):
def test_perform_hard_NMS(self):
values = torch.tensor([0.5, 1.0, 0.2, 0.3, 0.1, 0.2, 0.6])
window = 3
threshold = 0.49
keep_indexes = perform_hard_NMS(values, window, threshold)
expected_keep_indexes = torch.tensor([False, True, False, False, False, False, True])
assert torch.allclose(keep_indexes, expected_keep_indexes)
def test_perform_hard_NMS_longer_window(self):
values = torch.tensor([0.5, 1.0, 0.2, 0.3, 0.1, 0.2, 0.6])
window = 14
threshold = 0.49
keep_indexes = perform_hard_NMS(values, window, threshold)
expected_keep_indexes = torch.tensor([False, True, False, False, False, False, False])
assert torch.allclose(keep_indexes, expected_keep_indexes)
def test_perform_all_classes_hard_NMS(self):
values = torch.tensor([[0.5, 0.3], [1.0, 0.6], [0.2, 0.8], [0.3, 0.2], [0.1, 0.1], [0.2, 0.9], [0.6, 0.8]])
window = 3
threshold = 0.49
step_timestamp = 1.0
(kept_values, kept_timestamps_per_class) = perform_all_classes_NMS(values, step_timestamp, window, threshold, nms_type='hard')
expected_kept_values = [torch.tensor([1.0, 0.6]), torch.tensor([0.8, 0.9])]
expected_kept_timestamps_per_class = [torch.tensor([1.0, 6.0]), torch.tensor([2.0, 5.0])]
assert all([torch.allclose(kept_value, expected_kept_value) for (kept_value, expected_kept_value) in zip(kept_values, expected_kept_values)])
assert all([torch.allclose(timestamp_per_class, expected_timestamp_per_class) for (timestamp_per_class, expected_timestamp_per_class) in zip(kept_timestamps_per_class, expected_kept_timestamps_per_class)])
def test_perform_all_classes_hard_NMS_step_timestamp(self):
values = torch.tensor([[0.5, 0.3], [1.0, 0.6], [0.2, 0.8], [0.3, 0.2], [0.1, 0.1], [0.2, 0.9], [0.6, 0.8]])
window = 3
threshold = 0.49
step_timestamp = 0.5
(kept_values, kept_timestamps_per_class) = perform_all_classes_NMS(values, step_timestamp, window, threshold, nms_type='hard')
expected_kept_values = [torch.tensor([1.0, 0.6]), torch.tensor([0.8, 0.9])]
expected_kept_timestamps_per_class = [torch.tensor([0.5, 3.0]), torch.tensor([1.0, 2.5])]
assert all([torch.allclose(kept_value, expected_kept_value) for (kept_value, expected_kept_value) in zip(kept_values, expected_kept_values)])
assert all([torch.allclose(timestamp_per_class, expected_timestamp_per_class) for (timestamp_per_class, expected_timestamp_per_class) in zip(kept_timestamps_per_class, expected_kept_timestamps_per_class)])
def test_perform_soft_NMS(self):
values = torch.tensor([0.0001, 1.0, 0.2, 0.3, 0.1, 0.2, 0.6])
window = 3
threshold = 0.001
decayed_values = perform_soft_NMS(values, window, threshold)
print(decayed_values)
def test_perform_all_classes_soft_NMS_step_timestamp(self):
values = torch.tensor([[0.5, 0.3], [1.0, 0.6], [0.2, 0.8], [0.3, 0.2], [0.1, 0.1], [0.2, 0.9], [0.6, 0.8]])
window = 3
threshold = 0.2
step_timestamp = 0.5
(kept_values, kept_timestamps_per_class) = perform_all_classes_NMS(values, step_timestamp, window, threshold, nms_type='soft')
print(kept_values, kept_timestamps_per_class)
|
class BoringDataModule(LightningDataModule):
def __init__(self, data_dir: str='./', dataset=RandomDataset((32, (64 * 4))), val_dataset=RandomDataset((32, (64 * 4))), batch_size: int=1):
super().__init__()
self.data_dir = data_dir
self.non_picklable = None
self.checkpoint_state: Optional[str] = None
self.dataset = dataset
self.val_dataset = val_dataset
self.batch_size = batch_size
@property
def train_num_samples(self):
return len(self.dataset)
@property
def val_num_samples(self):
return len(self.val_dataset)
@property
def train_global_batch_size(self) -> int:
return self.batch_size
@property
def val_global_batch_size(self) -> int:
return self.batch_size
@property
def train_local_batch_size(self) -> int:
return self.batch_size
@property
def val_local_batch_size(self) -> int:
return self.batch_size
def train_dataloader(self):
return DataLoader(self.dataset, batch_size=self.batch_size, drop_last=True)
def val_dataloader(self):
return DataLoader(self.val_dataset, batch_size=self.batch_size)
|
class RandomDataset(Dataset):
def __init__(self, size: Iterable[int]):
self.length = size[0]
self.data = torch.randn(size)
def __getitem__(self, index):
return {'input': self.data[index], 'idx': index}
def __len__(self):
return self.length
|
class RandomLabeledDataset(Dataset):
def __init__(self, size: Iterable[int], num_classes: int=10):
self.length = size[0]
self.data = torch.randn(size)
self.labels = torch.randint(num_classes, size=(size[0], 1))
def __getitem__(self, index):
return {'input': self.data[index], 'label': self.labels[index], 'idx': index}
def __len__(self):
return self.length
|
class RandomVisionLabeledDataset(VisionDataset):
def __init__(self, size: Iterable[int], num_classes: int=10, transform: Optional[Module]=None):
super().__init__('data/', transform=transform)
self.length = size[0]
self.data = torch.randn(size)
self.labels = torch.randint(num_classes, size=(size[0], 1))
def __getitem__(self, index):
data = self.data[index]
if (self.transform is not None):
data = self.transform(data)
return {'input': data, 'label': self.labels[index]}
def __len__(self):
return self.length
|
class BoringModel(LightningModule):
def __init__(self):
'Testing PL Module. Use as follows:\n\n - subclass\n - modify the behavior for what you want\n class TestModel(BaseTestModel):\n def training_step(...):\n # do your own thing\n or:\n model = BaseTestModel()\n model.on_train_epoch_end = None\n '
super().__init__()
self.layer = torch.nn.Linear(32, 2)
@property
def num_layers(self) -> int:
return 1
def get_param_layer_id(self, name: str) -> int:
return 0
def forward(self, x):
return self.layer(x)
def loss(self, batch, prediction):
return torch.nn.functional.mse_loss(prediction, torch.ones_like(prediction))
def step(self, x):
x = self(x)
out = torch.nn.functional.mse_loss(x, torch.ones_like(x))
return out
def training_step(self, batch, batch_idx):
output = self(batch)
loss = self.loss(batch, output)
return {'loss': loss}
def on_train_batch_end(self, training_step_outputs):
return training_step_outputs
def on_train_epoch_end(self, outputs) -> None:
torch.stack([x['loss'] for x in outputs]).mean()
def validation_step(self, batch, batch_idx):
output = self(batch)
loss = self.loss(batch, output)
return {'x': loss}
def on_validation_epoch_end(self, outputs) -> None:
torch.stack([x['x'] for x in outputs]).mean()
def test_step(self, batch, batch_idx):
output = self(batch)
loss = self.loss(batch, output)
return {'y': loss}
def on_test_epoch_end(self, outputs) -> None:
torch.stack([x['y'] for x in outputs]).mean()
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return ([optimizer], [lr_scheduler])
def train_dataloader(self):
return DataLoader(RandomDataset(32, 64))
def val_dataloader(self):
return DataLoader(RandomDataset(32, 64))
def test_dataloader(self):
return DataLoader(RandomDataset(32, 64))
def predict_dataloader(self):
return DataLoader(RandomDataset(32, 64))
|
class LargeBoringModel(LightningModule):
def __init__(self):
'Testing PL Module. Use as follows:\n\n - subclass\n - modify the behavior for what you want\n class TestModel(BaseTestModel):\n def training_step(...):\n # do your own thing\n or:\n model = BaseTestModel()\n model.on_train_epoch_end = None\n '
super().__init__()
self.layer1 = torch.nn.Linear(32, 32, bias=False)
self.bn1 = torch.nn.BatchNorm1d(32)
self.layer2 = torch.nn.Linear(32, 32, bias=False)
self.bn2 = torch.nn.BatchNorm1d(32)
self.layer3 = torch.nn.Linear(32, 32, bias=False)
self.bn3 = torch.nn.BatchNorm1d(32)
self.layer4 = torch.nn.Linear(32, 2, bias=True)
self.useless_layer = torch.nn.Linear(32, 2)
@property
def learnable_params(self) -> List[Parameter]:
params = [param for layer in [self.layer1, self.bn1, self.layer2, self.bn2, self.layer3, self.bn3, self.layer4] for param in layer.parameters()]
return params
@property
def num_layers(self) -> int:
'Number of layers of the model.'
return 4
def get_param_layer_id(self, name: str) -> int:
'Get the layer id of the named parameter.\n\n Args:\n name: The name of the parameter.\n '
if (name.startswith('layer1') or name.startswith('bn1')):
return 0
elif (name.startswith('layer2') or name.startswith('bn2')):
return 1
elif (name.startswith('layer3') or name.startswith('bn3')):
return 2
elif name.startswith('layer4'):
return 3
elif name.startswith('useless_layer'):
return 3
def forward(self, x):
x = self.layer1(x)
x = self.bn1(x)
x = self.layer2(x)
x = self.bn2(x)
x = self.layer3(x)
x = self.bn3(x)
return x
def loss(self, batch, prediction):
return torch.nn.functional.mse_loss(prediction, torch.ones_like(prediction))
def step(self, x):
x = self(x)
out = torch.nn.functional.mse_loss(x, torch.ones_like(x))
return out
def training_step(self, batch, batch_idx):
output = self(batch)
loss = self.loss(batch, output)
return {'loss': loss}
def on_train_batch_end(self, training_step_outputs):
return training_step_outputs
def on_train_epoch_end(self, outputs) -> None:
torch.stack([x['loss'] for x in outputs]).mean()
def validation_step(self, batch, batch_idx):
output = self(batch)
loss = self.loss(batch, output)
return {'x': loss}
def on_validation_epoch_end(self, outputs) -> None:
torch.stack([x['x'] for x in outputs]).mean()
def test_step(self, batch, batch_idx):
output = self(batch)
loss = self.loss(batch, output)
return {'y': loss}
def on_test_epoch_end(self, outputs) -> None:
torch.stack([x['y'] for x in outputs]).mean()
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return ([optimizer], [lr_scheduler])
def train_dataloader(self):
return DataLoader(RandomDataset(32, 64))
def val_dataloader(self):
return DataLoader(RandomDataset(32, 64))
def test_dataloader(self):
return DataLoader(RandomDataset(32, 64))
def predict_dataloader(self):
return DataLoader(RandomDataset(32, 64))
|
class ManualOptimBoringModel(BoringModel):
def __init__(self):
super().__init__()
self.automatic_optimization = False
def training_step(self, batch, batch_idx):
opt = self.optimizers()
output = self(batch)
loss = self.loss(batch, output)
opt.zero_grad()
self.manual_backward(loss)
opt.step()
return loss
|
class TestSCELoss(unittest.TestCase):
def setUp(self) -> None:
self.coeff = 0.5
self.temp = 0.1
self.temp_m = 0.07
def test_sce_loss_without_key(self):
q = torch.arange(1.0, 9.0, 1.0).view((4, 2))
k = torch.tensor([[0, 0], [1, 1], [0, 0], [1.0, 1.0]])
queue = torch.tensor([[0.0, 0, 0, 2], [0.0, 0, 0, 2.0]])
sim_pos = torch.tensor([0.0, 7, 0, 15]).unsqueeze((- 1))
sim_qqueue = torch.Tensor([[0.0, 0, 0, 6], [0, 0, 0, 14], [0, 0, 0, 22], [0, 0, 0, 30]])
sim_kqueue = torch.Tensor([[0.0, 0, 0, 0], [0, 0, 0, 4], [0, 0, 0, 0], [0, 0, 0, 4]])
expected_mask = torch.Tensor([[1.0, 0, 0, 0, 0], [1.0, 0, 0, 0, 0], [1.0, 0, 0, 0, 0], [1.0, 0, 0, 0, 0]])
sim_q = torch.cat((sim_pos, sim_qqueue), 1)
sim_k = torch.cat((torch.tensor([0.0, 0, 0, 0]).unsqueeze((- 1)), sim_kqueue), 1)
logits_q = (sim_q / self.temp)
logits_k = (sim_k / self.temp_m)
prob_k = nn.functional.softmax(logits_k, dim=1)
prob_q = nn.functional.normalize(((self.coeff * expected_mask) + ((1 - self.coeff) * prob_k)), p=1, dim=1)
expected_loss = (- torch.sum((prob_q * nn.functional.log_softmax(logits_q, dim=1)), dim=1).mean(dim=0))
mask = compute_sce_mask(4, 4, False, 0, 1, 'cuda')
loss = compute_sce_loss(q, k, k, False, queue, mask, self.coeff, self.temp, self.temp_m)
assert torch.equal(expected_mask, mask)
assert torch.equal(expected_loss, loss)
def test_sce_loss_with_key(self):
q = torch.arange(1.0, 9.0, 1.0).view((4, 2))
k = torch.tensor([[0, 0], [1, 1], [0, 0], [1.0, 1.0]])
queue = torch.tensor([[0.0, 0, 0, 2], [0.0, 0, 0, 2.0]])
sim_qk = torch.tensor([[0, 3, 0, 3], [0, 7, 0, 7], [0, 11, 0, 11], [0, 15.0, 0, 15]])
sim_kk = torch.tensor([[0, 0, 0, 0], [0, 2, 0, 2], [0, 0, 0, 0], [0, 2.0, 0, 2]])
sim_qqueue = torch.Tensor([[0.0, 0, 0, 6], [0, 0, 0, 14], [0, 0, 0, 22], [0, 0, 0, 30]])
sim_kqueue = torch.Tensor([[0.0, 0, 0, 0], [0, 0, 0, 4], [0, 0, 0, 0], [0, 0, 0, 4]])
expected_mask = torch.tensor([[1.0, 0, 0, 0, 0, 0, 0, 0], [0, 1.0, 0, 0, 0, 0, 0, 0], [0, 0, 1.0, 0, 0, 0, 0, 0], [0, 0, 0, 1.0, 0, 0, 0, 0]])
sim_q = torch.cat([sim_qk, sim_qqueue], dim=1)
sim_k = torch.cat([sim_kk, sim_kqueue], dim=1)
sim_k -= (1000000000.0 * expected_mask)
logits_q = (sim_q / self.temp)
logits_k = (sim_k / self.temp_m)
prob_k = nn.functional.softmax(logits_k, dim=1)
prob_q = nn.functional.normalize(((self.coeff * expected_mask) + ((1 - self.coeff) * prob_k)), p=1, dim=1)
expected_loss = (- torch.sum((prob_q * nn.functional.log_softmax(logits_q, dim=1)), dim=1).mean(dim=0))
mask = compute_sce_mask(4, 4, True, 0, 1, 'cuda')
loss = compute_sce_loss(q, k, k, True, queue, mask, self.coeff, self.temp, self.temp_m)
assert torch.equal(expected_mask, mask)
assert torch.equal(expected_loss, loss)
def test_sce_loss_with_key_without_queue(self):
q = torch.arange(1.0, 9.0, 1.0).view((4, 2))
k = torch.tensor([[0, 0], [1, 1], [0, 0], [1.0, 1.0]])
sim_qk = torch.tensor([[0, 3, 0, 3], [0, 7, 0, 7], [0, 11, 0, 11], [0, 15.0, 0, 15]])
sim_kk = torch.tensor([[0, 0, 0, 0], [0, 2, 0, 2], [0, 0, 0, 0], [0, 2.0, 0, 2]])
expected_mask = torch.tensor([[1.0, 0, 0, 0], [0, 1.0, 0, 0], [0, 0, 1.0, 0], [0, 0, 0, 1.0]])
sim_q = sim_qk
sim_k = sim_kk
logits_q = (sim_q / self.temp)
logits_k = (sim_k / self.temp_m)
logits_k -= (1000000000.0 * expected_mask)
prob_k = nn.functional.softmax(logits_k, dim=1)
prob_q = nn.functional.normalize(((self.coeff * expected_mask) + ((1 - self.coeff) * prob_k)), p=1, dim=1)
expected_loss = (- torch.sum((prob_q * nn.functional.log_softmax(logits_q, dim=1)), dim=1).mean(dim=0))
mask = compute_sce_mask(4, 0, True, 0, 1, 'cuda')
loss = compute_sce_loss(q, k, k, True, None, mask, self.coeff, self.temp, self.temp_m)
assert torch.equal(expected_mask, mask)
assert torch.equal(expected_loss, loss)
|
class TestSCETokenMasks(unittest.TestCase):
def setUp(self) -> None:
self.batch_size = 2
self.num_tokens = 8
self.num_negatives = 2
def test_one_device_zero_pos_radius_no_keys_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=0, keep_aligned_positive=True, use_keys=False, use_all_keys=False, rank=0, world_size=1)
expected_mask_prob_q = torch.tensor([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]], device=mask_prob_q.device, dtype=mask_prob_q.dtype)
expected_mask_sim_q = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0]], device=mask_sim_q.device, dtype=mask_sim_q.dtype)
expected_mask_sim_k = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], device=mask_sim_k.device, dtype=mask_sim_k.dtype)
expected_num_positives = torch.tensor([[1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1]], device=num_positives_per_token.device, dtype=num_positives_per_token.dtype)
expected_mask_log_q = torch.cat(((1 - mask_sim_q), torch.ones((mask_sim_q.shape[0], self.num_negatives), device=mask_sim_q.device, dtype=mask_sim_q.dtype)), 1).to(dtype=torch.bool)
assert torch.allclose(mask_prob_q, expected_mask_prob_q)
assert torch.allclose(mask_sim_k, expected_mask_sim_k)
assert torch.allclose(mask_sim_q, expected_mask_sim_q)
assert torch.allclose(mask_log_q, expected_mask_log_q)
assert torch.allclose(num_positives_per_token, expected_num_positives)
def test_one_device_two_pos_radius_no_keys_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=2, keep_aligned_positive=True, use_keys=False, use_all_keys=False, rank=0, world_size=1)
expected_mask_prob_q = torch.tensor([[1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0]], device=mask_prob_q.device, dtype=mask_prob_q.dtype)
expected_mask_sim_q = torch.tensor([[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]], device=mask_sim_q.device, dtype=mask_sim_q.dtype)
expected_mask_sim_k = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], device=mask_sim_k.device, dtype=mask_sim_k.dtype)
expected_num_positives = torch.tensor([[3], [4], [5], [5], [5], [5], [4], [3], [3], [4], [5], [5], [5], [5], [4], [3]], device=num_positives_per_token.device, dtype=num_positives_per_token.dtype)
expected_mask_log_q = torch.cat(((1 - mask_sim_q), torch.ones((mask_sim_q.shape[0], self.num_negatives), device=mask_sim_q.device, dtype=mask_sim_q.dtype)), 1).to(dtype=torch.bool)
assert torch.allclose(mask_prob_q, (expected_mask_prob_q / expected_num_positives))
assert torch.allclose(mask_sim_k, expected_mask_sim_k)
assert torch.allclose(mask_sim_q, expected_mask_sim_q)
assert torch.allclose(mask_log_q, expected_mask_log_q)
assert torch.allclose(num_positives_per_token, expected_num_positives)
def test_one_device_two_pos_radius_no_keys_not_aligned_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=2, keep_aligned_positive=False, use_keys=False, use_all_keys=False, rank=0, world_size=1)
expected_mask_prob_q = torch.tensor([[0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0]], device=mask_prob_q.device, dtype=mask_prob_q.dtype)
expected_mask_sim_q = torch.tensor([[1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1]], device=mask_sim_q.device, dtype=mask_sim_q.dtype)
expected_mask_sim_k = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], device=mask_sim_k.device, dtype=mask_sim_k.dtype)
expected_num_positives = torch.tensor([[2], [3], [4], [4], [4], [4], [3], [2], [2], [3], [4], [4], [4], [4], [3], [2]], device=num_positives_per_token.device, dtype=num_positives_per_token.dtype)
expected_mask_log_q = torch.cat(((1 - mask_sim_q), torch.ones((mask_sim_q.shape[0], self.num_negatives), device=mask_sim_q.device, dtype=mask_sim_q.dtype)), 1).to(dtype=torch.bool)
assert torch.allclose(mask_prob_q, (expected_mask_prob_q / expected_num_positives))
assert torch.allclose(mask_sim_k, expected_mask_sim_k)
assert torch.allclose(mask_sim_q, expected_mask_sim_q)
assert torch.allclose(mask_log_q, expected_mask_log_q)
assert torch.allclose(num_positives_per_token, expected_num_positives)
def test_several_devices_zero_pos_radius_no_keys_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=0, keep_aligned_positive=True, use_keys=False, use_all_keys=False, rank=1, world_size=3)
expected_mask_prob_q = torch.tensor([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]], device=mask_prob_q.device, dtype=mask_prob_q.dtype)
expected_mask_sim_q = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0]], device=mask_sim_q.device, dtype=mask_sim_q.dtype)
expected_mask_sim_k = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], device=mask_sim_k.device, dtype=mask_sim_k.dtype)
expected_num_positives = torch.tensor([[1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1]], device=num_positives_per_token.device, dtype=num_positives_per_token.dtype)
expected_mask_log_q = torch.cat(((1 - mask_sim_q), torch.ones((mask_sim_q.shape[0], self.num_negatives), device=mask_sim_q.device, dtype=mask_sim_q.dtype)), 1).to(dtype=torch.bool)
assert torch.allclose(mask_prob_q, expected_mask_prob_q)
assert torch.allclose(mask_sim_k, expected_mask_sim_k)
assert torch.allclose(mask_sim_q, expected_mask_sim_q)
assert torch.allclose(mask_log_q, expected_mask_log_q)
assert torch.allclose(num_positives_per_token, expected_num_positives)
def test_several_devices_two_pos_radius_no_keys_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=2, keep_aligned_positive=True, use_keys=False, use_all_keys=False, rank=1, world_size=3)
expected_mask_prob_q = torch.tensor([[1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0]], device=mask_prob_q.device, dtype=mask_prob_q.dtype)
expected_mask_sim_q = torch.tensor([[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]], device=mask_sim_q.device, dtype=mask_sim_q.dtype)
expected_mask_sim_k = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], device=mask_sim_k.device, dtype=mask_sim_k.dtype)
expected_num_positives = torch.tensor([[3], [4], [5], [5], [5], [5], [4], [3], [3], [4], [5], [5], [5], [5], [4], [3]], device=num_positives_per_token.device, dtype=num_positives_per_token.dtype)
expected_mask_log_q = torch.cat(((1 - mask_sim_q), torch.ones((mask_sim_q.shape[0], self.num_negatives), device=mask_sim_q.device, dtype=mask_sim_q.dtype)), 1).to(dtype=torch.bool)
assert torch.allclose(mask_prob_q, (expected_mask_prob_q / expected_num_positives))
assert torch.allclose(mask_sim_k, expected_mask_sim_k)
assert torch.allclose(mask_sim_q, expected_mask_sim_q)
assert torch.allclose(mask_log_q, expected_mask_log_q)
assert torch.allclose(num_positives_per_token, expected_num_positives)
def test_several_devices_two_pos_radius_no_keys_not_aligned_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=2, keep_aligned_positive=False, use_keys=False, use_all_keys=False, rank=1, world_size=3)
expected_mask_prob_q = torch.tensor([[0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0]], device=mask_prob_q.device, dtype=mask_prob_q.dtype)
expected_mask_sim_q = torch.tensor([[1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1]], device=mask_sim_q.device, dtype=mask_sim_q.dtype)
expected_mask_sim_k = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], device=mask_sim_k.device, dtype=mask_sim_k.dtype)
expected_num_positives = torch.tensor([[2], [3], [4], [4], [4], [4], [3], [2], [2], [3], [4], [4], [4], [4], [3], [2]], device=num_positives_per_token.device, dtype=num_positives_per_token.dtype)
expected_mask_log_q = torch.cat(((1 - mask_sim_q), torch.ones((mask_sim_q.shape[0], self.num_negatives), device=mask_sim_q.device, dtype=mask_sim_q.dtype)), 1).to(dtype=torch.bool)
assert torch.allclose(mask_prob_q, (expected_mask_prob_q / expected_num_positives))
assert torch.allclose(mask_sim_k, expected_mask_sim_k)
assert torch.allclose(mask_sim_q, expected_mask_sim_q)
assert torch.allclose(mask_log_q, expected_mask_log_q)
assert torch.allclose(num_positives_per_token, expected_num_positives)
def test_one_device_zero_pos_radius_with_keys_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=0, keep_aligned_positive=True, use_keys=True, use_all_keys=False, rank=0, world_size=1)
expected_mask_prob_q = torch.tensor([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]], device=mask_prob_q.device, dtype=mask_prob_q.dtype)
expected_mask_sim_q = torch.tensor([[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]], device=mask_sim_q.device, dtype=mask_sim_q.dtype)
expected_mask_sim_k = torch.tensor([[1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1]], device=mask_sim_k.device, dtype=mask_sim_k.dtype)
expected_num_positives = torch.tensor([[1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1]], device=num_positives_per_token.device, dtype=num_positives_per_token.dtype)
expected_mask_log_q = torch.cat(((1 - mask_sim_q), torch.ones((mask_sim_q.shape[0], self.num_negatives), device=mask_sim_q.device, dtype=mask_sim_q.dtype)), 1).to(dtype=torch.bool)
assert torch.allclose(mask_prob_q, expected_mask_prob_q)
assert torch.allclose(mask_sim_k, expected_mask_sim_k)
assert torch.allclose(mask_sim_q, expected_mask_sim_q)
assert torch.allclose(mask_log_q, expected_mask_log_q)
assert torch.allclose(num_positives_per_token, expected_num_positives)
def test_one_device_two_pos_radius_with_keys_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=2, keep_aligned_positive=True, use_keys=True, use_all_keys=False, rank=0, world_size=1)
expected_mask_prob_q = torch.tensor([[1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0]], device=mask_prob_q.device, dtype=mask_prob_q.dtype)
expected_mask_sim_q = torch.tensor([[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]], device=mask_sim_q.device, dtype=mask_sim_q.dtype)
expected_mask_sim_k = torch.tensor([[1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1]], device=mask_sim_k.device, dtype=mask_sim_k.dtype)
expected_num_positives = torch.tensor([[3], [4], [5], [5], [5], [5], [4], [3], [3], [4], [5], [5], [5], [5], [4], [3]], device=num_positives_per_token.device, dtype=num_positives_per_token.dtype)
expected_mask_log_q = torch.cat(((1 - mask_sim_q), torch.ones((mask_sim_q.shape[0], self.num_negatives), device=mask_sim_q.device, dtype=mask_sim_q.dtype)), 1).to(dtype=torch.bool)
assert torch.allclose(mask_prob_q, (expected_mask_prob_q / expected_num_positives))
assert torch.allclose(mask_sim_k, expected_mask_sim_k)
assert torch.allclose(mask_sim_q, expected_mask_sim_q)
assert torch.allclose(mask_log_q, expected_mask_log_q)
assert torch.allclose(num_positives_per_token, expected_num_positives)
def test_one_device_two_pos_radius_with_keys_not_aligned_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=2, keep_aligned_positive=False, use_keys=True, use_all_keys=False, rank=0, world_size=1)
expected_mask_prob_q = torch.tensor([[0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0]], device=mask_prob_q.device, dtype=mask_prob_q.dtype)
expected_mask_sim_q = torch.tensor([[1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1]], device=mask_sim_q.device, dtype=mask_sim_q.dtype)
expected_mask_sim_k = torch.tensor([[1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1]], device=mask_sim_k.device, dtype=mask_sim_k.dtype)
expected_num_positives = torch.tensor([[2], [3], [4], [4], [4], [4], [3], [2], [2], [3], [4], [4], [4], [4], [3], [2]], device=num_positives_per_token.device, dtype=num_positives_per_token.dtype)
expected_mask_log_q = torch.cat(((1 - mask_sim_q), torch.ones((mask_sim_q.shape[0], self.num_negatives), device=mask_sim_q.device, dtype=mask_sim_q.dtype)), 1).to(dtype=torch.bool)
assert torch.allclose(mask_prob_q, (expected_mask_prob_q / expected_num_positives))
assert torch.allclose(mask_sim_k, expected_mask_sim_k)
assert torch.allclose(mask_sim_q, expected_mask_sim_q)
assert torch.allclose(mask_log_q, expected_mask_log_q)
assert torch.allclose(num_positives_per_token, expected_num_positives)
def test_several_devices_zero_pos_radius_with_keys_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=0, keep_aligned_positive=True, use_keys=True, use_all_keys=False, rank=1, world_size=3)
expected_mask_prob_q = torch.tensor([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]], device=mask_prob_q.device, dtype=mask_prob_q.dtype)
expected_mask_sim_q = torch.tensor([[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]], device=mask_sim_q.device, dtype=mask_sim_q.dtype)
expected_mask_sim_k = torch.tensor([[1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1]], device=mask_sim_k.device, dtype=mask_sim_k.dtype)
expected_num_positives = torch.tensor([[1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1]], device=num_positives_per_token.device, dtype=num_positives_per_token.dtype)
expected_mask_log_q = torch.cat(((1 - mask_sim_q), torch.ones((mask_sim_q.shape[0], self.num_negatives), device=mask_sim_q.device, dtype=mask_sim_q.dtype)), 1).to(dtype=torch.bool)
assert torch.allclose(mask_prob_q, expected_mask_prob_q)
assert torch.allclose(mask_sim_k, expected_mask_sim_k)
assert torch.allclose(mask_sim_q, expected_mask_sim_q)
assert torch.allclose(mask_log_q, expected_mask_log_q)
assert torch.allclose(num_positives_per_token, expected_num_positives)
def test_several_devices_two_pos_radius_with_keys_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=2, keep_aligned_positive=True, use_keys=True, use_all_keys=False, rank=1, world_size=3)
expected_mask_prob_q = torch.tensor([[1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0]], device=mask_prob_q.device, dtype=mask_prob_q.dtype)
expected_mask_sim_q = torch.tensor([[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]], device=mask_sim_q.device, dtype=mask_sim_q.dtype)
expected_mask_sim_k = torch.tensor([[1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1]], device=mask_sim_k.device, dtype=mask_sim_k.dtype)
expected_num_positives = torch.tensor([[3], [4], [5], [5], [5], [5], [4], [3], [3], [4], [5], [5], [5], [5], [4], [3]], device=num_positives_per_token.device, dtype=num_positives_per_token.dtype)
expected_mask_log_q = torch.cat(((1 - mask_sim_q), torch.ones((mask_sim_q.shape[0], self.num_negatives), device=mask_sim_q.device, dtype=mask_sim_q.dtype)), 1).to(dtype=torch.bool)
assert torch.allclose(mask_prob_q, (expected_mask_prob_q / expected_num_positives))
assert torch.allclose(mask_sim_k, expected_mask_sim_k)
assert torch.allclose(mask_sim_q, expected_mask_sim_q)
assert torch.allclose(mask_log_q, expected_mask_log_q)
assert torch.allclose(num_positives_per_token, expected_num_positives)
def test_several_devices_two_pos_radius_with_keys_not_aligned_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=2, keep_aligned_positive=False, use_keys=True, use_all_keys=False, rank=1, world_size=3)
expected_mask_prob_q = torch.tensor([[0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0]], device=mask_prob_q.device, dtype=mask_prob_q.dtype)
expected_mask_sim_q = torch.tensor([[1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1]], device=mask_sim_q.device, dtype=mask_sim_q.dtype)
expected_mask_sim_k = torch.tensor([[1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1]], device=mask_sim_k.device, dtype=mask_sim_k.dtype)
expected_num_positives = torch.tensor([[2], [3], [4], [4], [4], [4], [3], [2], [2], [3], [4], [4], [4], [4], [3], [2]], device=num_positives_per_token.device, dtype=num_positives_per_token.dtype)
expected_mask_log_q = torch.cat(((1 - mask_sim_q), torch.ones((mask_sim_q.shape[0], self.num_negatives), device=mask_sim_q.device, dtype=mask_sim_q.dtype)), 1).to(dtype=torch.bool)
assert torch.allclose(mask_prob_q, (expected_mask_prob_q / expected_num_positives))
assert torch.allclose(mask_sim_k, expected_mask_sim_k)
assert torch.allclose(mask_sim_q, expected_mask_sim_q)
assert torch.allclose(mask_log_q, expected_mask_log_q)
assert torch.allclose(num_positives_per_token, expected_num_positives)
def test_one_device_zero_pos_radius_with_all_keys_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=0, keep_aligned_positive=True, use_keys=False, use_all_keys=True, rank=0, world_size=1)
expected_mask_prob_q = torch.tensor([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]], device=mask_prob_q.device, dtype=mask_prob_q.dtype)
expected_mask_sim_k = torch.tensor([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]], device=mask_sim_k.device, dtype=mask_sim_k.dtype)
expected_num_positives = torch.tensor([[1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1]], device=num_positives_per_token.device, dtype=num_positives_per_token.dtype)
assert torch.allclose(mask_prob_q, expected_mask_prob_q)
assert torch.allclose(mask_sim_k, expected_mask_sim_k)
assert (mask_sim_q is None)
assert (mask_log_q is None)
assert torch.allclose(num_positives_per_token, expected_num_positives)
def test_one_device_two_pos_radius_with_all_keys_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=2, keep_aligned_positive=True, use_keys=False, use_all_keys=True, rank=0, world_size=1)
expected_mask_prob_q = torch.tensor([[1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0]], device=mask_prob_q.device, dtype=mask_prob_q.dtype)
expected_mask_sim_k = torch.tensor([[1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1]], device=mask_sim_k.device, dtype=mask_sim_k.dtype)
expected_num_positives = torch.tensor([[3], [4], [5], [5], [5], [5], [4], [3], [3], [4], [5], [5], [5], [5], [4], [3]], device=num_positives_per_token.device, dtype=num_positives_per_token.dtype)
assert torch.allclose(mask_prob_q, (expected_mask_prob_q / expected_num_positives))
assert torch.allclose(mask_sim_k, expected_mask_sim_k)
assert (mask_sim_q is None)
assert (mask_log_q is None)
assert torch.allclose(num_positives_per_token, expected_num_positives)
def test_one_device_two_pos_radius_with_all_keys_not_aligned_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=2, keep_aligned_positive=False, use_keys=False, use_all_keys=True, rank=0, world_size=1)
expected_mask_prob_q = torch.tensor([[0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0]], device=mask_prob_q.device, dtype=mask_prob_q.dtype)
expected_mask_sim_q = torch.tensor([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]], device=mask_sim_q.device, dtype=mask_sim_q.dtype)
expected_mask_sim_k = torch.tensor([[1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1]], device=mask_sim_k.device, dtype=mask_sim_k.dtype)
expected_num_positives = torch.tensor([[2], [3], [4], [4], [4], [4], [3], [2], [2], [3], [4], [4], [4], [4], [3], [2]], device=num_positives_per_token.device, dtype=num_positives_per_token.dtype)
expected_mask_log_q = torch.cat(((1 - mask_sim_q), torch.ones((mask_sim_q.shape[0], self.num_negatives), device=mask_sim_q.device, dtype=mask_sim_q.dtype)), 1).to(dtype=torch.bool)
assert torch.allclose(mask_prob_q, (expected_mask_prob_q / expected_num_positives))
assert torch.allclose(mask_sim_k, expected_mask_sim_k)
assert torch.allclose(mask_sim_q, expected_mask_sim_q)
assert torch.allclose(mask_log_q, expected_mask_log_q)
assert torch.allclose(num_positives_per_token, expected_num_positives)
def test_several_devices_zero_pos_radius_with_all_keys_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=0, keep_aligned_positive=True, use_keys=False, use_all_keys=True, rank=1, world_size=3)
zeros_other_device = torch.zeros(((self.batch_size * self.num_tokens), (self.batch_size * self.num_tokens)))
expected_mask_prob_q = torch.tensor([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]], device=mask_prob_q.device, dtype=mask_prob_q.dtype)
expected_mask_prob_q = torch.cat((zeros_other_device, expected_mask_prob_q, zeros_other_device), dim=1)
expected_mask_sim_k = torch.tensor([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]], device=mask_sim_k.device, dtype=mask_sim_k.dtype)
expected_mask_sim_k = torch.cat((zeros_other_device, expected_mask_sim_k, zeros_other_device), dim=1)
expected_num_positives = torch.tensor([[1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1]], device=num_positives_per_token.device, dtype=num_positives_per_token.dtype)
assert torch.allclose(mask_prob_q, expected_mask_prob_q)
assert torch.allclose(mask_sim_k, expected_mask_sim_k)
assert (mask_sim_q is None)
assert (mask_log_q is None)
assert torch.allclose(num_positives_per_token, expected_num_positives)
def test_several_devices_two_pos_radius_with_all_keys_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=2, keep_aligned_positive=True, use_keys=False, use_all_keys=True, rank=1, world_size=3)
zeros_other_device = torch.zeros(((self.batch_size * self.num_tokens), (self.batch_size * self.num_tokens)))
expected_mask_prob_q = torch.tensor([[1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0]], device=mask_prob_q.device, dtype=mask_prob_q.dtype)
expected_mask_prob_q = torch.cat((zeros_other_device, expected_mask_prob_q, zeros_other_device), dim=1)
expected_mask_sim_k = torch.tensor([[1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1]], device=mask_sim_k.device, dtype=mask_sim_k.dtype)
expected_mask_sim_k = torch.cat((zeros_other_device, expected_mask_sim_k, zeros_other_device), dim=1)
expected_num_positives = torch.tensor([[3], [4], [5], [5], [5], [5], [4], [3], [3], [4], [5], [5], [5], [5], [4], [3]], device=num_positives_per_token.device, dtype=num_positives_per_token.dtype)
assert torch.allclose(mask_prob_q, (expected_mask_prob_q / expected_num_positives))
assert torch.allclose(mask_sim_k, expected_mask_sim_k)
assert (mask_sim_q is None)
assert (mask_log_q is None)
assert torch.allclose(num_positives_per_token, expected_num_positives)
def test_with_keys_and_all_keys(self):
try:
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=2, keep_aligned_positive=False, use_keys=True, use_all_keys=True, rank=1, world_size=3)
except NotImplementedError:
return
else:
assert False
|
class TestSCETokenLoss(unittest.TestCase):
def setUp(self) -> None:
self.batch_size = 2
self.num_tokens = 8
self.num_negatives = 2
self.dim = 4
self.query = torch.randn(((self.batch_size * self.num_tokens), self.dim))
self.key = torch.randn(((self.batch_size * self.num_tokens), self.dim))
self.global_key = torch.randn((((3 * self.batch_size) * self.num_tokens), self.dim))
self.queue = torch.randn((self.dim, self.num_negatives))
def test_one_device_zero_pos_radius_no_keys_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=0, keep_aligned_positive=True, use_keys=False, use_all_keys=False, rank=0, world_size=1)
compute_sce_token_loss(self.query, self.key, self.key, self.queue, mask_sim_q=mask_sim_q, mask_sim_k=mask_sim_k, mask_prob_q=mask_prob_q, mask_log_q=mask_log_q, coeff=torch.tensor(0.5))
def test_one_device_two_pos_radius_no_keys_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=2, keep_aligned_positive=True, use_keys=False, use_all_keys=False, rank=0, world_size=1)
compute_sce_token_loss(self.query, self.key, self.key, self.queue, mask_sim_q=mask_sim_q, mask_sim_k=mask_sim_k, mask_prob_q=mask_prob_q, mask_log_q=mask_log_q, coeff=torch.tensor(0.5))
def test_one_device_two_pos_radius_no_keys_not_aligned_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=2, keep_aligned_positive=False, use_keys=False, use_all_keys=False, rank=0, world_size=1)
compute_sce_token_loss(self.query, self.key, self.key, self.queue, mask_sim_q=mask_sim_q, mask_sim_k=mask_sim_k, mask_prob_q=mask_prob_q, mask_log_q=mask_log_q, coeff=torch.tensor(0.5))
def test_several_devices_zero_pos_radius_no_keys_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=0, keep_aligned_positive=True, use_keys=False, use_all_keys=False, rank=1, world_size=3)
compute_sce_token_loss(self.query, self.key, self.key, self.queue, mask_sim_q=mask_sim_q, mask_sim_k=mask_sim_k, mask_prob_q=mask_prob_q, mask_log_q=mask_log_q, coeff=torch.tensor(0.5))
def test_several_devices_two_pos_radius_no_keys_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=2, keep_aligned_positive=True, use_keys=False, use_all_keys=False, rank=1, world_size=3)
compute_sce_token_loss(self.query, self.key, self.key, self.queue, mask_sim_q=mask_sim_q, mask_sim_k=mask_sim_k, mask_prob_q=mask_prob_q, mask_log_q=mask_log_q, coeff=torch.tensor(0.5))
def test_several_devices_two_pos_radius_no_keys_not_aligned_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=2, keep_aligned_positive=False, use_keys=False, use_all_keys=False, rank=1, world_size=3)
compute_sce_token_loss(self.query, self.key, self.key, self.queue, mask_sim_q=mask_sim_q, mask_sim_k=mask_sim_k, mask_prob_q=mask_prob_q, mask_log_q=mask_log_q, coeff=torch.tensor(0.5))
def test_one_device_zero_pos_radius_with_keys_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=0, keep_aligned_positive=True, use_keys=True, use_all_keys=False, rank=0, world_size=1)
compute_sce_token_loss(self.query, self.key, self.key, self.queue, mask_sim_q=mask_sim_q, mask_sim_k=mask_sim_k, mask_prob_q=mask_prob_q, mask_log_q=mask_log_q, coeff=torch.tensor(0.5))
def test_one_device_two_pos_radius_with_keys_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=2, keep_aligned_positive=True, use_keys=True, use_all_keys=False, rank=0, world_size=1)
compute_sce_token_loss(self.query, self.key, self.key, self.queue, mask_sim_q=mask_sim_q, mask_sim_k=mask_sim_k, mask_prob_q=mask_prob_q, mask_log_q=mask_log_q, coeff=torch.tensor(0.5))
def test_one_device_two_pos_radius_with_keys_not_aligned_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=2, keep_aligned_positive=False, use_keys=True, use_all_keys=False, rank=0, world_size=1)
compute_sce_token_loss(self.query, self.key, self.key, self.queue, mask_sim_q=mask_sim_q, mask_sim_k=mask_sim_k, mask_prob_q=mask_prob_q, mask_log_q=mask_log_q, coeff=torch.tensor(0.5))
def test_several_devices_zero_pos_radius_with_keys_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=0, keep_aligned_positive=True, use_keys=True, use_all_keys=False, rank=1, world_size=3)
compute_sce_token_loss(self.query, self.key, self.key, self.queue, mask_sim_q=mask_sim_q, mask_sim_k=mask_sim_k, mask_prob_q=mask_prob_q, mask_log_q=mask_log_q, coeff=torch.tensor(0.5))
def test_several_devices_two_pos_radius_with_keys_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=2, keep_aligned_positive=True, use_keys=True, use_all_keys=False, rank=1, world_size=3)
compute_sce_token_loss(self.query, self.key, self.key, self.queue, mask_sim_q=mask_sim_q, mask_sim_k=mask_sim_k, mask_prob_q=mask_prob_q, mask_log_q=mask_log_q, coeff=torch.tensor(0.5))
def test_several_devices_two_pos_radius_with_all_keys_not_aligned_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=2, keep_aligned_positive=False, use_keys=True, use_all_keys=False, rank=1, world_size=3)
compute_sce_token_loss(self.query, self.key, self.key, self.queue, mask_sim_q=mask_sim_q, mask_sim_k=mask_sim_k, mask_prob_q=mask_prob_q, mask_log_q=mask_log_q, coeff=torch.tensor(0.5))
def test_one_device_zero_pos_radius_with_all_keys_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=0, keep_aligned_positive=True, use_keys=False, use_all_keys=True, rank=0, world_size=1)
compute_sce_token_loss(self.query, self.key, self.key, self.queue, mask_sim_q=mask_sim_q, mask_sim_k=mask_sim_k, mask_prob_q=mask_prob_q, mask_log_q=mask_log_q, coeff=torch.tensor(0.5))
def test_one_device_two_pos_radius_with_all_keys_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=2, keep_aligned_positive=True, use_keys=False, use_all_keys=True, rank=0, world_size=1)
compute_sce_token_loss(self.query, self.key, self.key, self.queue, mask_sim_q=mask_sim_q, mask_sim_k=mask_sim_k, mask_prob_q=mask_prob_q, mask_log_q=mask_log_q, coeff=torch.tensor(0.5))
def test_one_device_two_pos_radius_with_all_keys_not_aligned_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=2, keep_aligned_positive=False, use_keys=False, use_all_keys=True, rank=0, world_size=1)
compute_sce_token_loss(self.query, self.key, self.key, self.queue, mask_sim_q=mask_sim_q, mask_sim_k=mask_sim_k, mask_prob_q=mask_prob_q, mask_log_q=mask_log_q, coeff=torch.tensor(0.5))
def test_several_devices_zero_pos_radius_with_all_keys_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=0, keep_aligned_positive=True, use_keys=False, use_all_keys=True, rank=1, world_size=3)
compute_sce_token_loss(self.query, self.key, self.global_key, self.queue, mask_sim_q=mask_sim_q, mask_sim_k=mask_sim_k, mask_prob_q=mask_prob_q, mask_log_q=mask_log_q, coeff=torch.tensor(0.5))
def test_several_devices_two_pos_radius_with_all_keys_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=2, keep_aligned_positive=True, use_keys=False, use_all_keys=True, rank=1, world_size=3)
compute_sce_token_loss(self.query, self.key, self.global_key, self.queue, mask_sim_q=mask_sim_q, mask_sim_k=mask_sim_k, mask_prob_q=mask_prob_q, mask_log_q=mask_log_q, coeff=torch.tensor(0.5))
def test_several_devices_two_pos_radius_with_all_keys_not_aligned_init(self):
(mask_sim_q, mask_sim_k, mask_prob_q, mask_log_q, num_positives_per_token) = compute_sce_token_masks(self.batch_size, self.num_tokens, self.num_negatives, positive_radius=2, keep_aligned_positive=False, use_keys=False, use_all_keys=True, rank=1, world_size=3)
compute_sce_token_loss(self.query, self.key, self.global_key, self.queue, mask_sim_q=mask_sim_q, mask_sim_k=mask_sim_k, mask_prob_q=mask_prob_q, mask_log_q=mask_log_q, coeff=torch.tensor(0.5))
|
class TestMoCoModel(unittest.TestCase):
def setUp(self) -> None:
self.trunk_cfg = DictConfig({'_target_': 'eztorch.models.trunks.create_resnet', 'name': 'resnet18', 'num_classes': 0, 'small_input': True})
self.projector_cfg = DictConfig({'_target_': 'eztorch.models.heads.MLPHead', 'input_dim': 512, 'output_dim': 2})
self.predictor_cfg = DictConfig({'_target_': 'eztorch.models.heads.MLPHead', 'input_dim': 512, 'output_dim': 2})
self.queue_cfg = DictConfig({'_target_': 'eztorch.models.queues.FIFOQueue', 'size': 8, 'feature_dim': 2})
self.temp = 0.2
def test_moco_init(self):
MoCoModel(trunk=self.trunk_cfg, projector=None, predictor=None, optimizer={}, queue=None, num_devices=1, simulate_n_devices=1, temp=self.temp)
MoCoModel(trunk=self.trunk_cfg, projector=None, predictor=None, optimizer={}, queue=None, num_devices=1, simulate_n_devices=8, temp=self.temp)
MoCoModel(trunk=self.trunk_cfg, projector=None, predictor=None, optimizer={}, queue=None, num_devices=2, simulate_n_devices=8, temp=self.temp)
MoCoModel(trunk=self.trunk_cfg, projector=self.projector_cfg, predictor=None, optimizer={}, queue=None, num_devices=2, temp=self.temp)
MoCoModel(trunk=self.trunk_cfg, projector=self.projector_cfg, predictor=self.predictor_cfg, optimizer={}, queue=None, num_devices=2, temp=self.temp)
MoCoModel(trunk=self.trunk_cfg, projector=self.projector_cfg, predictor=self.predictor_cfg, optimizer={}, queue=self.queue_cfg, num_devices=2, temp=self.temp)
def test_moco_loss_without_key(self):
q = torch.arange(1.0, 9.0, 1.0).view((4, 2))
k = torch.arange(9.0, 17.0, 1.0).view((4, 2))
queue = torch.tensor([[0.0, 0, 0, 2], [0.0, 0, 0, 2.0]])
model = MoCoModel(trunk=self.trunk_cfg, projector=self.projector_cfg, predictor=self.predictor_cfg, optimizer={}, queue=self.queue_cfg, num_devices=2, temp=self.temp)
model.queue = queue
labels = torch.tensor([0, 0, 0, 0])
sim = torch.tensor([[29, 0, 0, 0, 2.5], [81, 0, 0, 0, 5.5], [149, 0, 0, 0, 8.5], [233, 0, 0, 0, 11.5]])
logits = (sim / self.temp)
loss = nn.functional.cross_entropy(logits, labels)
output_loss = compute_moco_loss(q, k, k, False, queue, self.temp, 0)
assert torch.equal(loss, output_loss)
def test_moco_loss_with_key(self):
q = torch.arange(1.0, 9.0, 1.0).view((4, 2))
k = torch.tensor([[0, 0], [1, 1], [0, 0], [0.0, 0.0]])
queue = torch.tensor([[0.0, 0, 0, 2], [0.0, 0, 0, 2.0]])
model = MoCoModel(trunk=self.trunk_cfg, projector=self.projector_cfg, predictor=self.predictor_cfg, optimizer={}, use_keys=True, queue=self.queue_cfg, num_devices=2, temp=self.temp)
model.queue = queue
labels = torch.tensor([0, 1, 2, 3])
sim = torch.tensor([[0.0, 3, 0, 0, 0, 0, 0, 6], [0, 7, 0, 0, 0, 0, 0, 14], [0, 11, 0, 0, 0, 0, 0, 22], [0, 15, 0, 0, 0, 0, 0, 30]])
logits = (sim / self.temp)
loss = nn.functional.cross_entropy(logits, labels)
output_loss = compute_moco_loss(q, k, k, True, queue, self.temp, 0)
assert torch.equal(loss, output_loss)
def test_moco_loss_with_key_without_queue(self):
q = torch.arange(1.0, 9.0, 1.0).view((4, 2))
k = torch.tensor([[0, 0], [1, 1], [0, 0], [0.0, 0.0]])
model = MoCoModel(trunk=self.trunk_cfg, projector=self.projector_cfg, predictor=self.predictor_cfg, optimizer={}, use_keys=True, queue=None, num_devices=2, temp=self.temp)
labels = torch.tensor([0, 1, 2, 3])
sim = torch.tensor([[0.0, 3, 0, 0], [0, 7, 0, 0], [0, 11, 0, 0], [0, 15, 0, 0]])
logits = (sim / self.temp)
loss = nn.functional.cross_entropy(logits, labels)
output_loss = compute_moco_loss(q, k, k, True, None, self.temp, 0)
assert torch.equal(loss, output_loss)
def test_moco_sym_fit(self):
optimizer_cfg = DictConfig({'_target_': 'eztorch.optimizers.optimizer_factory', 'name': 'adam', 'scheduler': None, 'initial_lr': 0.06})
transform_cfg = [{'num_views': 2, 'transform': nn.Identity()}]
model = MoCoModel(trunk=self.trunk_cfg, optimizer=optimizer_cfg, use_keys=True, queue=None, num_devices=1, simulate_n_devices=1, sym=True, temp=self.temp)
datamodule = BoringDataModule(dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=MultiCropTransform(transform_cfg)), val_dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=None), batch_size=64)
trainer = Trainer(fast_dev_run=1, devices=1)
trainer.fit(model, datamodule)
def test_moco_sym_mutual_fit(self):
optimizer_cfg = DictConfig({'_target_': 'eztorch.optimizers.optimizer_factory', 'name': 'adam', 'scheduler': None, 'initial_lr': 0.06})
transform_cfg = [{'num_views': 2, 'transform': nn.Identity()}]
model = MoCoModel(trunk=self.trunk_cfg, optimizer=optimizer_cfg, use_keys=True, queue=None, num_devices=1, simulate_n_devices=1, sym=True, temp=self.temp, mutual_pass=True)
datamodule = BoringDataModule(dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=MultiCropTransform(transform_cfg)), val_dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=None), batch_size=64)
trainer = Trainer(fast_dev_run=1, devices=1)
trainer.fit(model, datamodule)
def test_moco_fit(self):
optimizer_cfg = DictConfig({'_target_': 'eztorch.optimizers.optimizer_factory', 'name': 'adam', 'scheduler': None, 'initial_lr': 0.06})
transform_cfg = [{'num_views': 2, 'transform': nn.Identity()}]
model = MoCoModel(trunk=self.trunk_cfg, optimizer=optimizer_cfg, projector=self.projector_cfg, use_keys=True, queue=None, num_devices=1, simulate_n_devices=1, initial_momentum=0.98, scheduler_momentum='cosine', temp=self.temp)
datamodule = BoringDataModule(dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=MultiCropTransform(transform_cfg)), val_dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=None), batch_size=64)
assert (model.current_momentum == 0.98)
trainer = Trainer(fast_dev_run=2, devices=1)
trainer.fit(model, datamodule)
assert (model.current_momentum == 0.98)
def test_moco_mutual_fit(self):
optimizer_cfg = DictConfig({'_target_': 'eztorch.optimizers.optimizer_factory', 'name': 'adam', 'scheduler': None, 'initial_lr': 0.06})
transform_cfg = [{'num_views': 2, 'transform': nn.Identity()}]
model = MoCoModel(trunk=self.trunk_cfg, optimizer=optimizer_cfg, projector=self.projector_cfg, use_keys=True, queue=None, num_devices=1, simulate_n_devices=1, initial_momentum=0.98, scheduler_momentum='cosine', temp=self.temp, mutual_pass=True)
datamodule = BoringDataModule(dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=MultiCropTransform(transform_cfg)), val_dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=None), batch_size=64)
assert (model.current_momentum == 0.98)
trainer = Trainer(fast_dev_run=2, devices=1)
trainer.fit(model, datamodule)
assert (model.current_momentum == 0.98)
|
class TestReSSLModel(unittest.TestCase):
def setUp(self) -> None:
self.trunk_cfg = DictConfig({'_target_': 'eztorch.models.trunks.create_resnet', 'name': 'resnet18', 'num_classes': 0, 'small_input': True})
self.projector_cfg = DictConfig({'_target_': 'eztorch.models.heads.MLPHead', 'input_dim': 512, 'output_dim': 2})
self.predictor_cfg = DictConfig({'_target_': 'eztorch.models.heads.MLPHead', 'input_dim': 512, 'output_dim': 2})
self.queue_cfg = DictConfig({'_target_': 'eztorch.models.queues.FIFOQueue', 'size': 8, 'feature_dim': 2})
self.temp = 0.1
self.temp_m = 0.04
def test_ressl_init(self):
ReSSLModel(trunk=self.trunk_cfg, projector=None, predictor=None, optimizer={}, queue=None, num_devices=1, simulate_n_devices=1, temp=self.temp, temp_m=self.temp_m)
ReSSLModel(trunk=self.trunk_cfg, projector=None, predictor=None, optimizer={}, queue=None, num_devices=1, simulate_n_devices=8, temp=self.temp, temp_m=self.temp_m)
ReSSLModel(trunk=self.trunk_cfg, projector=None, predictor=None, optimizer={}, queue=None, num_devices=2, simulate_n_devices=8, temp=self.temp, temp_m=self.temp_m)
ReSSLModel(trunk=self.trunk_cfg, projector=self.projector_cfg, predictor=None, optimizer={}, queue=None, num_devices=2, temp=self.temp, temp_m=self.temp_m)
ReSSLModel(trunk=self.trunk_cfg, projector=self.projector_cfg, predictor=self.predictor_cfg, optimizer={}, queue=None, num_devices=2, temp=self.temp, temp_m=self.temp_m)
ReSSLModel(trunk=self.trunk_cfg, projector=self.projector_cfg, predictor=self.predictor_cfg, optimizer={}, queue=self.queue_cfg, num_devices=2, temp=self.temp, temp_m=self.temp_m)
def test_ressl_loss_without_key(self):
q = torch.arange(1.0, 9.0, 1.0).view((4, 2))
k = torch.tensor([[0, 0], [1, 1], [0, 0], [1.0, 1.0]])
queue = torch.tensor([[0.0, 0, 0, 2], [0.0, 0, 0, 2.0]])
queue
model = ReSSLModel(trunk=self.trunk_cfg, projector=self.projector_cfg, predictor=self.predictor_cfg, optimizer={}, use_keys=False, queue=self.queue_cfg, num_devices=2, temp=self.temp, temp_m=self.temp_m)
model.queue = queue
sim_qqueue = torch.Tensor([[0.0, 0, 0, 6], [0, 0, 0, 14], [0, 0, 0, 22], [0, 0, 0, 30]])
sim_kqueue = torch.Tensor([[0.0, 0, 0, 0], [0, 0, 0, 4], [0, 0, 0, 0], [0, 0, 0, 4]])
sim_q = sim_qqueue
sim_k = sim_kqueue
logits_q = (sim_q / self.temp)
logits_k = (sim_k / self.temp_m)
loss = (- torch.sum((nn.functional.softmax(logits_k.detach(), dim=1) * nn.functional.log_softmax(logits_q, dim=1)), dim=1).mean(dim=0))
output_loss = compute_ressl_loss(q, k, k, False, queue, None, self.temp, self.temp_m)
assert torch.equal(loss, output_loss)
def test_ressl_loss_with_key(self):
q = torch.arange(1.0, 9.0, 1.0).view((4, 2))
k = torch.tensor([[0, 0], [1, 1], [0, 0], [1.0, 1.0]])
queue = torch.tensor([[0.0, 0, 0, 2], [0.0, 0, 0, 2.0]])
model = ReSSLModel(trunk=self.trunk_cfg, projector=self.projector_cfg, predictor=self.predictor_cfg, optimizer={}, use_keys=True, queue=self.queue_cfg, num_devices=2, temp=self.temp, temp_m=self.temp_m)
model.queue = queue
sim_qk = torch.tensor([[0, 3, 0, 3], [0, 7, 0, 7], [0, 11, 0, 11], [0, 15.0, 0, 15]])
sim_kk = torch.tensor([[0, 0, 0, 0], [0, 2, 0, 2], [0, 0, 0, 0], [0, 2.0, 0, 2]])
sim_qqueue = torch.Tensor([[0.0, 0, 0, 6], [0, 0, 0, 14], [0, 0, 0, 22], [0, 0, 0, 30]])
sim_kqueue = torch.Tensor([[0.0, 0, 0, 0], [0, 0, 0, 4], [0, 0, 0, 0], [0, 0, 0, 4]])
mask = torch.tensor([[1.0, 0, 0, 0], [0, 1.0, 0, 0], [0, 0, 1.0, 0], [0, 0, 0, 1.0]])
sim_kk -= (1000000000.0 * mask)
sim_qk -= (1000000000.0 * mask)
sim_q = torch.cat([sim_qk, sim_qqueue], dim=1)
sim_k = torch.cat([sim_kk, sim_kqueue], dim=1)
logits_q = (sim_q / self.temp)
logits_k = (sim_k / self.temp_m)
loss = (- torch.sum((nn.functional.softmax(logits_k.detach(), dim=1) * nn.functional.log_softmax(logits_q, dim=1)), dim=1).mean(dim=0))
mask = compute_ressl_mask(q.shape[0], queue.shape[1], True, 0, 1, 'cuda')
output_loss = compute_ressl_loss(q, k, k, True, queue, mask, self.temp, self.temp_m)
assert torch.equal(loss, output_loss)
def test_ressl_loss_with_key_without_queue(self):
q = torch.arange(1.0, 9.0, 1.0).view((4, 2))
k = torch.tensor([[0, 0], [1, 1], [0, 0], [1.0, 1.0]])
model = ReSSLModel(trunk=self.trunk_cfg, projector=self.projector_cfg, predictor=self.predictor_cfg, optimizer={}, use_keys=True, queue=None, num_devices=2, temp=self.temp, temp_m=self.temp_m)
sim_qk = torch.tensor([[0, 3, 0, 3], [0, 7, 0, 7], [0, 11, 0, 11], [0, 15.0, 0, 15]])
sim_kk = torch.tensor([[0, 0, 0, 0], [0, 2, 0, 2], [0, 0, 0, 0], [0, 2.0, 0, 2]])
mask = torch.tensor([[1.0, 0, 0, 0], [0, 1.0, 0, 0], [0, 0, 1.0, 0], [0, 0, 0, 1.0]])
sim_kk -= (1000000000.0 * mask)
sim_qk -= (1000000000.0 * mask)
sim_q = sim_qk
sim_k = sim_kk
logits_q = (sim_q / self.temp)
logits_k = (sim_k / self.temp_m)
loss = (- torch.sum((nn.functional.softmax(logits_k.detach(), dim=1) * nn.functional.log_softmax(logits_q, dim=1)), dim=1).mean(dim=0))
mask = compute_ressl_mask(q.shape[0], 0, True, 0, 1, 'cuda')
output_loss = compute_ressl_loss(q, k, k, True, None, mask, self.temp, self.temp_m)
assert torch.equal(loss, output_loss)
def test_ressl_sym_fit(self):
optimizer_cfg = DictConfig({'_target_': 'eztorch.optimizers.optimizer_factory', 'name': 'adam', 'scheduler': None, 'initial_lr': 0.06})
transform_cfg = [{'num_views': 2, 'transform': nn.Identity()}]
model = ReSSLModel(trunk=self.trunk_cfg, optimizer=optimizer_cfg, use_keys=True, queue=None, num_devices=1, simulate_n_devices=1, sym=True, temp=self.temp, temp_m=self.temp_m)
datamodule = BoringDataModule(dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=MultiCropTransform(transform_cfg)), val_dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=None), batch_size=64)
trainer = Trainer(fast_dev_run=1, devices=1)
trainer.fit(model, datamodule)
def test_ressl_sym_mutual_fit(self):
optimizer_cfg = DictConfig({'_target_': 'eztorch.optimizers.optimizer_factory', 'name': 'adam', 'scheduler': None, 'initial_lr': 0.06})
transform_cfg = [{'num_views': 2, 'transform': nn.Identity()}]
model = ReSSLModel(trunk=self.trunk_cfg, optimizer=optimizer_cfg, use_keys=True, queue=None, num_devices=1, simulate_n_devices=1, sym=True, temp=self.temp, temp_m=self.temp_m, mutual_pass=True)
datamodule = BoringDataModule(dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=MultiCropTransform(transform_cfg)), val_dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=None), batch_size=64)
trainer = Trainer(fast_dev_run=1, devices=1)
trainer.fit(model, datamodule)
def test_ressl_fit(self):
optimizer_cfg = DictConfig({'_target_': 'eztorch.optimizers.optimizer_factory', 'name': 'adam', 'scheduler': None, 'initial_lr': 0.06})
transform_cfg = [{'num_views': 2, 'transform': nn.Identity()}]
model = ReSSLModel(trunk=self.trunk_cfg, optimizer=optimizer_cfg, projector=self.projector_cfg, use_keys=True, queue=None, num_devices=1, simulate_n_devices=1, initial_momentum=0.98, scheduler_momentum='cosine', temp=self.temp, temp_m=self.temp_m)
datamodule = BoringDataModule(dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=MultiCropTransform(transform_cfg)), val_dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=None), batch_size=64)
assert (model.current_momentum == 0.98)
trainer = Trainer(fast_dev_run=2, devices=1)
trainer.fit(model, datamodule)
assert (model.current_momentum == 0.98)
def test_ressl_mutual_fit(self):
optimizer_cfg = DictConfig({'_target_': 'eztorch.optimizers.optimizer_factory', 'name': 'adam', 'scheduler': None, 'initial_lr': 0.06})
transform_cfg = [{'num_views': 2, 'transform': nn.Identity()}]
model = ReSSLModel(trunk=self.trunk_cfg, optimizer=optimizer_cfg, projector=self.projector_cfg, use_keys=True, queue=None, num_devices=1, simulate_n_devices=1, initial_momentum=0.98, scheduler_momentum='cosine', temp=self.temp, temp_m=self.temp_m, mutual_pass=True)
datamodule = BoringDataModule(dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=MultiCropTransform(transform_cfg)), val_dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=None), batch_size=64)
assert (model.current_momentum == 0.98)
trainer = Trainer(fast_dev_run=2, devices=1)
trainer.fit(model, datamodule)
assert (model.current_momentum == 0.98)
|
class TestSCEModel(unittest.TestCase):
def setUp(self) -> None:
self.trunk_cfg = DictConfig({'_target_': 'eztorch.models.trunks.create_resnet', 'name': 'resnet18', 'num_classes': 0, 'small_input': True})
self.projector_cfg = DictConfig({'_target_': 'eztorch.models.heads.MLPHead', 'input_dim': 512, 'output_dim': 2})
self.predictor_cfg = DictConfig({'_target_': 'eztorch.models.heads.MLPHead', 'input_dim': 512, 'output_dim': 2})
self.queue_cfg = DictConfig({'_target_': 'eztorch.models.queues.FIFOQueue', 'size': 8, 'feature_dim': 2})
self.temp = 0.1
self.temp_m = 0.05
self.coeff = 0.5
def test_sce_init(self):
SCEModel(trunk=self.trunk_cfg, projector=None, predictor=None, optimizer={}, queue=None, num_devices=1, simulate_n_devices=1, temp=self.temp, temp_m=self.temp_m, coeff=self.coeff)
SCEModel(trunk=self.trunk_cfg, projector=None, predictor=None, optimizer={}, queue=None, num_devices=1, simulate_n_devices=8, temp=self.temp, temp_m=self.temp_m, coeff=self.coeff)
SCEModel(trunk=self.trunk_cfg, projector=None, predictor=None, optimizer={}, queue=None, num_devices=2, simulate_n_devices=8, temp=self.temp, temp_m=self.temp_m, coeff=self.coeff)
SCEModel(trunk=self.trunk_cfg, projector=self.projector_cfg, predictor=None, optimizer={}, queue=None, num_devices=2, temp=self.temp, temp_m=self.temp_m, coeff=self.coeff)
SCEModel(trunk=self.trunk_cfg, projector=self.projector_cfg, predictor=self.predictor_cfg, optimizer={}, queue=None, num_devices=2, temp=self.temp, temp_m=self.temp_m, coeff=self.coeff)
SCEModel(trunk=self.trunk_cfg, projector=self.projector_cfg, predictor=self.predictor_cfg, optimizer={}, queue=self.queue_cfg, num_devices=2, temp=self.temp, temp_m=self.temp_m, coeff=self.coeff)
def test_sce_sym_fit(self):
optimizer_cfg = DictConfig({'_target_': 'eztorch.optimizers.optimizer_factory', 'name': 'adam', 'scheduler': None, 'initial_lr': 0.06})
transform_cfg = [{'num_views': 2, 'transform': nn.Identity()}]
model = SCEModel(trunk=self.trunk_cfg, optimizer=optimizer_cfg, use_keys=True, queue=None, num_devices=1, simulate_n_devices=1, sym=True, temp=self.temp, temp_m=self.temp_m, coeff=self.coeff)
datamodule = BoringDataModule(dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=MultiCropTransform(transform_cfg)), val_dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=None), batch_size=64)
trainer = Trainer(fast_dev_run=1, devices=1)
trainer.fit(model, datamodule)
def test_sce_split_fit(self):
optimizer_cfg = DictConfig({'_target_': 'eztorch.optimizers.optimizer_factory', 'name': 'adam', 'scheduler': None, 'initial_lr': 0.06})
transform_cfg = [{'num_views': 2, 'transform': nn.Identity()}]
model = SCEModel(trunk=self.trunk_cfg, optimizer=optimizer_cfg, use_keys=True, queue=None, num_devices=1, simulate_n_devices=1, sym=False, num_splits=2, temp=self.temp, temp_m=self.temp_m, coeff=self.coeff)
datamodule = BoringDataModule(dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=MultiCropTransform(transform_cfg)), val_dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=None), batch_size=64)
trainer = Trainer(fast_dev_run=1, devices=1)
trainer.fit(model, datamodule)
def test_sce_split_sym_fit(self):
optimizer_cfg = DictConfig({'_target_': 'eztorch.optimizers.optimizer_factory', 'name': 'adam', 'scheduler': None, 'initial_lr': 0.06})
transform_cfg = [{'num_views': 2, 'transform': nn.Identity()}]
model = SCEModel(trunk=self.trunk_cfg, optimizer=optimizer_cfg, use_keys=True, queue=None, num_devices=1, simulate_n_devices=1, sym=True, num_splits=2, temp=self.temp, temp_m=self.temp_m, coeff=self.coeff)
datamodule = BoringDataModule(dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=MultiCropTransform(transform_cfg)), val_dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=None), batch_size=64)
trainer = Trainer(fast_dev_run=1, devices=1)
trainer.fit(model, datamodule)
def test_sce_split_sym_with_queue_fit(self):
optimizer_cfg = DictConfig({'_target_': 'eztorch.optimizers.optimizer_factory', 'name': 'adam', 'scheduler': None, 'initial_lr': 0.06})
transform_cfg = [{'num_views': 2, 'transform': nn.Identity()}]
model = SCEModel(trunk=self.trunk_cfg, optimizer=optimizer_cfg, use_keys=True, queue=DictConfig({'size': 128, 'feature_dim': 512}), num_devices=1, simulate_n_devices=1, sym=True, num_splits=2, temp=self.temp, temp_m=self.temp_m, coeff=self.coeff)
datamodule = BoringDataModule(dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=MultiCropTransform(transform_cfg)), val_dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=None), batch_size=64)
trainer = Trainer(fast_dev_run=2, devices=1)
trainer.fit(model, datamodule)
def test_sce_sym_mutual_fit(self):
optimizer_cfg = DictConfig({'_target_': 'eztorch.optimizers.optimizer_factory', 'name': 'adam', 'scheduler': None, 'initial_lr': 0.06})
transform_cfg = [{'num_views': 2, 'transform': nn.Identity()}]
model = SCEModel(trunk=self.trunk_cfg, optimizer=optimizer_cfg, use_keys=True, queue=None, num_devices=1, simulate_n_devices=1, sym=True, temp=self.temp, temp_m=self.temp_m, coeff=self.coeff, mutual_pass=True)
datamodule = BoringDataModule(dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=MultiCropTransform(transform_cfg)), val_dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=None), batch_size=64)
trainer = Trainer(fast_dev_run=1, devices=1)
trainer.fit(model, datamodule)
def test_sce_fit(self):
optimizer_cfg = DictConfig({'_target_': 'eztorch.optimizers.optimizer_factory', 'name': 'adam', 'scheduler': None, 'initial_lr': 0.06})
transform_cfg = [{'num_views': 2, 'transform': nn.Identity()}]
model = SCEModel(trunk=self.trunk_cfg, optimizer=optimizer_cfg, projector=self.projector_cfg, use_keys=True, queue=None, num_devices=1, simulate_n_devices=1, initial_momentum=0.98, scheduler_momentum='cosine', temp=self.temp, temp_m=self.temp_m, coeff=self.coeff)
datamodule = BoringDataModule(dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=MultiCropTransform(transform_cfg)), val_dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=None), batch_size=64)
assert (model.current_momentum == 0.98)
trainer = Trainer(fast_dev_run=2, devices=1)
trainer.fit(model, datamodule)
assert (model.current_momentum == 0.98)
def test_sce_mutual_fit(self):
optimizer_cfg = DictConfig({'_target_': 'eztorch.optimizers.optimizer_factory', 'name': 'adam', 'scheduler': None, 'initial_lr': 0.06})
transform_cfg = [{'num_views': 2, 'transform': nn.Identity()}]
model = SCEModel(trunk=self.trunk_cfg, optimizer=optimizer_cfg, projector=self.projector_cfg, use_keys=True, queue=None, num_devices=1, simulate_n_devices=1, initial_momentum=0.98, scheduler_momentum='cosine', temp=self.temp, temp_m=self.temp_m, coeff=self.coeff, mutual_pass=True)
datamodule = BoringDataModule(dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=MultiCropTransform(transform_cfg)), val_dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=None), batch_size=64)
assert (model.current_momentum == 0.98)
trainer = Trainer(fast_dev_run=2, devices=1)
trainer.fit(model, datamodule)
assert (model.current_momentum == 0.98)
|
class TestSimCLRModel(unittest.TestCase):
def setUp(self) -> None:
self.trunk_cfg = DictConfig({'_target_': 'eztorch.models.trunks.create_resnet', 'name': 'resnet18', 'num_classes': 0, 'small_input': True})
self.projector_cfg = DictConfig({'_target_': 'eztorch.models.heads.MLPHead', 'input_dim': 512, 'output_dim': 2})
self.queue_cfg = DictConfig({'_target_': 'eztorch.models.queues.FIFOQueue', 'size': 8, 'feature_dim': 2})
self.temp = 10.0
def test_simclr_init(self):
SimCLRModel(trunk=self.trunk_cfg, projector=None, optimizer={}, temp=self.temp)
SimCLRModel(trunk=self.trunk_cfg, projector=None, optimizer={}, temp=self.temp)
SimCLRModel(trunk=self.trunk_cfg, projector=None, optimizer={}, temp=self.temp)
SimCLRModel(trunk=self.trunk_cfg, projector=self.projector_cfg, optimizer={}, temp=self.temp)
def test_simclr_loss(self):
z = torch.tensor([[1.0, 2], [3, 4], [5, 6], [7, 8], [0, 0], [1, 1], [0, 0], [0, 0]])
pos_mask = torch.tensor([[0.0, 0, 0, 0, 1, 0, 0, 0], [0.0, 0, 0, 0, 0, 1, 0, 0], [0.0, 0, 0, 0, 0, 0, 1, 0], [0.0, 0, 0, 0, 0, 0, 0, 1], [1.0, 0, 0, 0, 0, 0, 0, 0], [0.0, 1, 0, 0, 0, 0, 0, 0], [0.0, 0, 1, 0, 0, 0, 0, 0], [0.0, 0, 0, 1, 0, 0, 0, 0]])
neg_mask = torch.tensor([[0.0, 1, 1, 1, 0, 1, 1, 1], [1.0, 0, 1, 1, 1, 0, 1, 1], [1.0, 1, 0, 1, 1, 1, 0, 1], [1.0, 1, 1, 0, 1, 1, 1, 0], [0.0, 1, 1, 1, 0, 1, 1, 1], [1.0, 0, 1, 1, 1, 0, 1, 1], [1.0, 1, 0, 1, 1, 1, 0, 1], [1.0, 1, 1, 0, 1, 1, 1, 0]])
sim = torch.tensor([[5.0, 11, 17, 23, 0, 3, 0, 0], [11, 25, 39, 53, 0, 7, 0, 0], [17, 39, 61, 83, 0, 11, 0, 0], [23, 53, 83, 113, 0, 15, 0, 0], [0.0, 0, 0, 0, 0, 0, 0, 0], [3, 7, 11, 15, 0, 2, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]])
logits = torch.exp((sim / self.temp))
pos = torch.sum((logits * pos_mask), 1)
neg = torch.sum((logits * neg_mask), 1)
loss = (- torch.mean(torch.log((pos / (neg + pos)))))
output_loss = compute_simclr_loss(z, z, pos_mask, neg_mask, self.temp)
assert torch.equal(loss, output_loss)
def test_simclr_fit(self):
optimizer_cfg = DictConfig({'_target_': 'eztorch.optimizers.optimizer_factory', 'name': 'adam', 'scheduler': None, 'initial_lr': 0.06})
transform_cfg = [{'num_views': 2, 'transform': nn.Identity()}]
model = SimCLRModel(trunk=self.trunk_cfg, optimizer=optimizer_cfg, temp=self.temp)
datamodule = BoringDataModule(dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=MultiCropTransform(transform_cfg)), val_dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=None), batch_size=64)
trainer = Trainer(fast_dev_run=1, devices=1)
trainer.fit(model, datamodule)
def test_simclr_mutual_fit(self):
optimizer_cfg = DictConfig({'_target_': 'eztorch.optimizers.optimizer_factory', 'name': 'adam', 'scheduler': None, 'initial_lr': 0.06})
transform_cfg = [{'num_views': 2, 'transform': nn.Identity()}]
model = SimCLRModel(trunk=self.trunk_cfg, optimizer=optimizer_cfg, temp=self.temp, mutual_pass=True)
datamodule = BoringDataModule(dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=MultiCropTransform(transform_cfg)), val_dataset=RandomVisionLabeledDataset((128, 3, 32, 32), transform=None), batch_size=64)
trainer = Trainer(fast_dev_run=1, devices=1)
trainer.fit(model, datamodule)
|
class TestResnet(unittest.TestCase):
def test_all_resnets(self):
for resnet in _ResNets:
create_resnet(resnet)
def test_resnet_small_input_with_fc(self):
resnet = create_resnet('resnet18', small_input=True)
assert isinstance(resnet.fc, nn.Linear)
assert (resnet.conv1.kernel_size == (3, 3))
assert (resnet.conv1.stride == (1, 1))
assert (resnet.conv1.padding == (1, 1))
assert isinstance(resnet.maxpool, nn.Identity)
def test_resnet_small_input_without_fc(self):
resnet = create_resnet('resnet18', small_input=True, num_classes=0)
assert isinstance(resnet.fc, nn.Identity)
assert (resnet.conv1.kernel_size == (3, 3))
assert (resnet.conv1.stride == (1, 1))
assert (resnet.conv1.padding == (1, 1))
assert isinstance(resnet.maxpool, nn.Identity)
def test_resnet_large_input_with_fc(self):
resnet = create_resnet('resnet18', small_input=False)
assert isinstance(resnet.fc, nn.Linear)
assert (resnet.conv1.kernel_size == (7, 7))
assert (resnet.conv1.stride == (2, 2))
assert (resnet.conv1.padding == (3, 3))
assert isinstance(resnet.maxpool, nn.MaxPool2d)
def test_resnet_large_input_without_fc(self):
resnet = create_resnet('resnet18', small_input=False, num_classes=0)
assert isinstance(resnet.fc, nn.Identity)
assert (resnet.conv1.kernel_size == (7, 7))
assert (resnet.conv1.stride == (2, 2))
assert (resnet.conv1.padding == (3, 3))
assert isinstance(resnet.maxpool, nn.MaxPool2d)
def test_resnet_forward(self):
resnet = create_resnet('resnet18')
x = torch.rand((1, 3, 224, 224))
resnet(x)
|
class TestOptimizerFactory(unittest.TestCase):
def test_init_lars(self):
model = BoringModel()
LARS(model.parameters(), lr=0.1)
|
class TestOptimizerFactory(unittest.TestCase):
def setUp(self):
self.base_model = BoringModel()
self.large_model = LargeBoringModel()
self.sgd_config = DictConfig({'name': 'sgd', 'initial_lr': 2.0, 'batch_size': None, 'num_steps_per_epoch': None, 'exclude_wd_norm': False, 'exclude_wd_bias': False, 'scaler': None, 'params': {}, 'scheduler': None})
def test_no_exclude_optimizer_factory(self):
(optimizer, _) = optimizer_factory(**self.sgd_config, keys_without_decay=[], model=self.base_model)
assert (len(optimizer.param_groups) == 1)
assert (len(optimizer.param_groups[0]['params']) == 2)
(optimizer, _) = optimizer_factory(**self.sgd_config, keys_without_decay=[], model=self.large_model)
assert (len(optimizer.param_groups) == 1)
assert (len(optimizer.param_groups[0]['params']) == 11)
def test_no_exclude_optimizer_factory_layer_decay(self):
(optimizer, _) = optimizer_factory(**self.sgd_config, keys_without_decay=[], model=self.base_model, layer_decay_lr=0.7)
print(optimizer.param_groups)
assert (len(optimizer.param_groups) == 1)
assert (len(optimizer.param_groups[0]['params']) == 2)
(optimizer, _) = optimizer_factory(**self.sgd_config, keys_without_decay=[], model=self.large_model, layer_decay_lr=0.7)
assert (len(optimizer.param_groups) == 4)
assert (len(optimizer.param_groups[0]['params']) == 3)
assert (len(optimizer.param_groups[1]['params']) == 3)
assert (len(optimizer.param_groups[2]['params']) == 3)
assert (len(optimizer.param_groups[3]['params']) == 2)
def test_exclude_bias_optimizer_factory(self):
self.sgd_config.exclude_wd_bias = True
(optimizer, _) = optimizer_factory(**self.sgd_config, keys_without_decay=[], model=self.base_model)
assert (len(optimizer.param_groups) == 2)
assert (len(optimizer.param_groups[0]['params']) == 1)
assert (len(optimizer.param_groups[1]['params']) == 1)
(optimizer, _) = optimizer_factory(**self.sgd_config, model=self.large_model)
assert (len(optimizer.param_groups) == 2)
assert (len(optimizer.param_groups[0]['params']) == 7)
assert (len(optimizer.param_groups[1]['params']) == 4)
def test_exclude_bias_optimizer_factory_layer_decay(self):
self.sgd_config.exclude_wd_bias = True
(optimizer, _) = optimizer_factory(**self.sgd_config, keys_without_decay=[], model=self.base_model, layer_decay_lr=0.7)
assert (len(optimizer.param_groups) == 2)
assert (len(optimizer.param_groups[0]['params']) == 1)
assert (len(optimizer.param_groups[1]['params']) == 1)
(optimizer, _) = optimizer_factory(**self.sgd_config, model=self.large_model, layer_decay_lr=0.7)
assert (len(optimizer.param_groups) == 8)
assert (len(optimizer.param_groups[0]['params']) == 2)
assert (len(optimizer.param_groups[1]['params']) == 2)
assert (len(optimizer.param_groups[2]['params']) == 2)
assert (len(optimizer.param_groups[3]['params']) == 1)
assert (len(optimizer.param_groups[4]['params']) == 1)
assert (len(optimizer.param_groups[5]['params']) == 1)
assert (len(optimizer.param_groups[6]['params']) == 1)
assert (len(optimizer.param_groups[7]['params']) == 1)
def test_exclude_norm_optimizer_factory(self):
self.sgd_config.exclude_wd_norm = True
(optimizer, _) = optimizer_factory(**self.sgd_config, keys_without_decay=[], model=self.base_model)
assert (len(optimizer.param_groups) == 1)
assert (len(optimizer.param_groups[0]['params']) == 2)
(optimizer, _) = optimizer_factory(**self.sgd_config, keys_without_decay=[], model=self.large_model)
assert (len(optimizer.param_groups) == 2)
assert (len(optimizer.param_groups[0]['params']) == 5)
assert (len(optimizer.param_groups[1]['params']) == 6)
def test_exclude_norm_optimizer_factory_layer_decay(self):
self.sgd_config.exclude_wd_norm = True
(optimizer, _) = optimizer_factory(**self.sgd_config, keys_without_decay=[], model=self.base_model, layer_decay_lr=0.7)
assert (len(optimizer.param_groups) == 1)
assert (len(optimizer.param_groups[0]['params']) == 2)
(optimizer, _) = optimizer_factory(**self.sgd_config, keys_without_decay=[], model=self.large_model, layer_decay_lr=0.7)
assert (len(optimizer.param_groups) == 7)
assert (len(optimizer.param_groups[0]['params']) == 1)
assert (len(optimizer.param_groups[1]['params']) == 1)
assert (len(optimizer.param_groups[2]['params']) == 1)
assert (len(optimizer.param_groups[3]['params']) == 2)
assert (len(optimizer.param_groups[4]['params']) == 2)
assert (len(optimizer.param_groups[5]['params']) == 2)
assert (len(optimizer.param_groups[6]['params']) == 2)
def test_exclude_bias_and_norm_optimizer_factory(self):
self.sgd_config.exclude_wd_norm = True
self.sgd_config.exclude_wd_bias = True
(optimizer, _) = optimizer_factory(**self.sgd_config, keys_without_decay=[], model=self.base_model)
assert (len(optimizer.param_groups) == 2)
assert (len(optimizer.param_groups[0]['params']) == 1)
assert (len(optimizer.param_groups[1]['params']) == 1)
(optimizer, _) = optimizer_factory(**self.sgd_config, keys_without_decay=[], model=self.large_model)
assert (len(optimizer.param_groups) == 2)
assert (len(optimizer.param_groups[0]['params']) == 4)
assert (len(optimizer.param_groups[1]['params']) == 7)
def test_exclude_bias_and_norm_optimizer_factory_layer_decay(self):
self.sgd_config.exclude_wd_norm = True
self.sgd_config.exclude_wd_bias = True
(optimizer, _) = optimizer_factory(**self.sgd_config, keys_without_decay=[], model=self.base_model, layer_decay_lr=0.7)
assert (len(optimizer.param_groups) == 2)
assert (len(optimizer.param_groups[0]['params']) == 1)
assert (len(optimizer.param_groups[1]['params']) == 1)
(optimizer, _) = optimizer_factory(**self.sgd_config, keys_without_decay=[], model=self.large_model, layer_decay_lr=0.7)
assert (len(optimizer.param_groups) == 8)
assert (len(optimizer.param_groups[0]['params']) == 1)
assert (len(optimizer.param_groups[1]['params']) == 1)
assert (len(optimizer.param_groups[2]['params']) == 1)
assert (len(optimizer.param_groups[3]['params']) == 1)
assert (len(optimizer.param_groups[4]['params']) == 2)
assert (len(optimizer.param_groups[5]['params']) == 2)
assert (len(optimizer.param_groups[6]['params']) == 2)
assert (len(optimizer.param_groups[7]['params']) == 1)
def test_exclude_bias_via_key_and_norm_optimizer_factory(self):
self.sgd_config.exclude_wd_norm = True
self.sgd_config.exclude_wd_bias = False
(optimizer, _) = optimizer_factory(**self.sgd_config, keys_without_decay=['bias'], model=self.base_model)
assert (len(optimizer.param_groups) == 2)
assert (len(optimizer.param_groups[0]['params']) == 1)
assert (len(optimizer.param_groups[1]['params']) == 1)
(optimizer, _) = optimizer_factory(**self.sgd_config, keys_without_decay=['bias'], model=self.large_model)
assert (len(optimizer.param_groups) == 2)
assert (len(optimizer.param_groups[0]['params']) == 4)
assert (len(optimizer.param_groups[1]['params']) == 7)
def test_exclude_bias_via_key_and_norm_optimizer_factory_layer_decay(self):
self.sgd_config.exclude_wd_norm = True
self.sgd_config.exclude_wd_bias = False
(optimizer, _) = optimizer_factory(**self.sgd_config, keys_without_decay=['bias'], model=self.base_model, layer_decay_lr=0.7)
assert (len(optimizer.param_groups) == 2)
assert (len(optimizer.param_groups[0]['params']) == 1)
assert (len(optimizer.param_groups[1]['params']) == 1)
(optimizer, _) = optimizer_factory(**self.sgd_config, keys_without_decay=['bias'], model=self.large_model, layer_decay_lr=0.7)
assert (len(optimizer.param_groups) == 8)
assert (len(optimizer.param_groups[0]['params']) == 1)
assert (len(optimizer.param_groups[1]['params']) == 1)
assert (len(optimizer.param_groups[2]['params']) == 1)
assert (len(optimizer.param_groups[3]['params']) == 1)
assert (len(optimizer.param_groups[4]['params']) == 2)
assert (len(optimizer.param_groups[5]['params']) == 2)
assert (len(optimizer.param_groups[6]['params']) == 2)
assert (len(optimizer.param_groups[7]['params']) == 1)
|
class TestFilterLearnableParmams(unittest.TestCase):
def test_filter_learnable_params(self) -> None:
boring_model = BoringModel()
large_boring_model = LargeBoringModel()
boring_model_params = list(boring_model.parameters())
filtered_boring_model_params = filter_learnable_params(boring_model_params, boring_model)
assert all([any([(param is filtered_param) for filtered_param in filtered_boring_model_params]) for param in boring_model_params])
assert (len(boring_model_params) == len(filtered_boring_model_params))
large_boring_model_params = list(large_boring_model.parameters())
filtered_large_boring_model_params = filter_learnable_params(large_boring_model_params, large_boring_model)
assert any([(not any([(param is filtered_param) for filtered_param in filtered_large_boring_model_params])) for param in boring_model_params])
assert (len(large_boring_model_params) == (len(filtered_large_boring_model_params) + 2))
|
class TestLrScaler(unittest.TestCase):
def setUp(self) -> None:
self.initial_lr = 2.0
self.batch_size = 16
def test_none_scaler(self) -> None:
lr = scale_learning_rate(self.initial_lr, None, self.batch_size)
assert (lr == self.initial_lr)
lr = scale_learning_rate(self.initial_lr, 'none', self.batch_size)
assert (lr == self.initial_lr)
def test_linear_scaler(self) -> None:
lr = scale_learning_rate(self.initial_lr, 'linear', self.batch_size)
assert (lr == ((self.initial_lr * self.batch_size) / 256))
def test_sqrt_scaler(self) -> None:
lr = scale_learning_rate(self.initial_lr, 'sqrt', self.batch_size)
assert (lr == (self.initial_lr * math.sqrt(self.batch_size)))
|
class TestRetrieveModelParams(unittest.TestCase):
def setUp(self) -> None:
self.linear1 = nn.Linear(5, 5)
self.bn1 = nn.BatchNorm1d(5)
self.linear2 = nn.Linear(5, 5, bias=True)
self.model = nn.Sequential(self.linear1, self.bn1, self.linear2)
self.module_list = list(self.model.modules())
def test_retrieve_model_params_no_filter(self) -> None:
modules_to_filter = []
keys_to_filter = []
(filtered_parameters, other_parameters) = retrieve_model_params(self.model, modules_to_filter, keys_to_filter)
for param in self.model.parameters():
self.assertTrue(any([(other_parameter is not param) for other_parameter in other_parameters]))
self.assertTrue((len(filtered_parameters) == 0))
self.assertTrue((len(list(self.model.parameters())) == len(other_parameters)))
self.assertTrue((len(list(self.model.parameters())) == (len(filtered_parameters) + len(other_parameters))))
def test_retrieve_model_params_filter_module(self) -> None:
modules_to_filter = [nn.BatchNorm1d]
keys_to_filter = []
(filtered_parameters, other_parameters) = retrieve_model_params(self.model, modules_to_filter, keys_to_filter)
for param in self.linear1.parameters():
self.assertTrue(all([(param is not filtered_parameter) for filtered_parameter in filtered_parameters]))
self.assertTrue(any([(param is other_parameter) for other_parameter in other_parameters]))
for param in self.bn1.parameters():
self.assertTrue(any([(param is filtered_parameter) for filtered_parameter in filtered_parameters]))
self.assertTrue(all([(param is not other_parameter) for other_parameter in other_parameters]))
for param in self.linear2.parameters():
self.assertTrue(all([(param is not filtered_parameter) for filtered_parameter in filtered_parameters]))
self.assertTrue(any([(param is other_parameter) for other_parameter in other_parameters]))
self.assertTrue((len(filtered_parameters) == 2))
self.assertTrue((len(other_parameters) == 4))
self.assertTrue((len(list(self.model.parameters())) == (len(filtered_parameters) + len(other_parameters))))
def test_retrieve_model_params_filter_key(self) -> None:
modules_to_filter = []
keys_to_filter = ['bias']
(filtered_parameters, other_parameters) = retrieve_model_params(self.model, modules_to_filter, keys_to_filter)
for (name_param, param) in self.linear1.named_parameters():
if (name_param == 'bias'):
self.assertTrue(any([(param is filtered_parameter) for filtered_parameter in filtered_parameters]))
self.assertTrue(all([(param is not other_parameter) for other_parameter in other_parameters]))
else:
self.assertTrue(all([(param is not filtered_parameter) for filtered_parameter in filtered_parameters]))
self.assertTrue(any([(param is other_parameter) for other_parameter in other_parameters]))
for (name_param, param) in self.bn1.named_parameters():
if (name_param == 'bias'):
self.assertTrue(any([(param is filtered_parameter) for filtered_parameter in filtered_parameters]))
self.assertTrue(all([(param is not other_parameter) for other_parameter in other_parameters]))
else:
self.assertTrue(all([(param is not filtered_parameter) for filtered_parameter in filtered_parameters]))
self.assertTrue(any([(param is other_parameter) for other_parameter in other_parameters]))
for (name_param, param) in self.linear2.named_parameters():
if (name_param == 'bias'):
self.assertTrue((not all([(param is not filtered_parameter) for filtered_parameter in filtered_parameters])))
self.assertTrue(all([(param is not other_parameter) for other_parameter in other_parameters]))
else:
self.assertTrue(all([(param is not filtered_parameter) for filtered_parameter in filtered_parameters]))
self.assertTrue(any([(param is other_parameter) for other_parameter in other_parameters]))
self.assertTrue((len(filtered_parameters) == 3))
self.assertTrue((len(other_parameters) == 3))
self.assertTrue((len(list(self.model.parameters())) == (len(filtered_parameters) + len(other_parameters))))
def test_retrieve_model_params_filter_key_and_module(self) -> None:
modules_to_filter = [nn.BatchNorm1d]
keys_to_filter = ['bias']
(filtered_parameters, other_parameters) = retrieve_model_params(self.model, modules_to_filter, keys_to_filter)
for (name_param, param) in self.linear1.named_parameters():
if (name_param == 'bias'):
self.assertTrue(any([(param is filtered_parameter) for filtered_parameter in filtered_parameters]))
self.assertTrue(all([(param is not other_parameter) for other_parameter in other_parameters]))
else:
self.assertTrue(all([(param is not filtered_parameter) for filtered_parameter in filtered_parameters]))
self.assertTrue(any([(param is other_parameter) for other_parameter in other_parameters]))
for (name_param, param) in self.bn1.named_parameters():
if (name_param == 'bias'):
self.assertTrue(any([(param is filtered_parameter) for filtered_parameter in filtered_parameters]))
self.assertTrue(all([(param is not other_parameter) for other_parameter in other_parameters]))
else:
self.assertTrue(any([(param is filtered_parameter) for filtered_parameter in filtered_parameters]))
self.assertTrue(all([(param is not other_parameter) for other_parameter in other_parameters]))
for (name_param, param) in self.linear2.named_parameters():
if (name_param == 'bias'):
self.assertTrue(any([(param is filtered_parameter) for filtered_parameter in filtered_parameters]))
self.assertTrue(all([(param is not other_parameter) for other_parameter in other_parameters]))
else:
self.assertTrue(all([(param is not filtered_parameter) for filtered_parameter in filtered_parameters]))
self.assertTrue(any([(param is other_parameter) for other_parameter in other_parameters]))
self.assertTrue((len(filtered_parameters) == 4))
self.assertTrue((len(other_parameters) == 2))
self.assertTrue((len(list(self.model.parameters())) == (len(filtered_parameters) + len(other_parameters))))
|
class TestReducedTimestamps(unittest.TestCase):
def test_batch_reduced_timestamps(self):
x = torch.tensor([[[0.5, 0.8], [0.4, 0.6]], [[0.5, 0.8], [0.4, 0.6]], [[0.2, 0.8], [0.4, 0.6]], [[0.2, 0.8], [0.4, 0.6]], [[0.2, 0.8], [0.4, 0.6]], [[0.2, 0.8], [0.4, 0.6]]])
labels = torch.tensor([[[1.0, 0.0], [0.0, 0.0], [1.0, 1.0], [0.0, 1.0]], [[1.0, 0.0], [0.0, 0.0], [1.0, 1.0], [0.0, 1.0]], [[0.0, 1.0], [0.0, 0.0], [1.0, 1.0], [0.0, 1.0]], [[0.0, 1.0], [0.0, 0.0], [1.0, 1.0], [1.0, 1.0]], [[0.0, 0.0], [0.0, 1.0], [0.0, 1.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 1.0], [0.0, 1.0], [0.0, 0.0]]])
has_label = labels.bool()
ignore_class = torch.tensor([[[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], [[0.0, 1.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]]])
timestamps = torch.tensor([[0.0, 0.48, 1.0, 1.48], [0.0, 0.5, 1.0, 1.5], [2.0, 2.5, 3.0, 3.5], [4.0, 4.5, 5.0, 5.5], [6.0, 6.48, 7.0, 7.5], [6.0, 6.5, 7.0, 7.5]])
reduced_batch = BatchReduceTimestamps()({'input': x, 'labels': labels, 'has_label': has_label, 'ignore_class': ignore_class, 'timestamps': timestamps})
expected_reduced_batch = {'input': x, 'labels': torch.tensor([[[1.0, 0.0], [1.0, 1.0]], [[1.0, 0.0], [1.0, 1.0]], [[0.0, 1.0], [1.0, 1.0]], [[0.0, 1.0], [1.0, 1.0]], [[0.0, 1.0], [0.0, 1.0]], [[0.0, 1.0], [0.0, 1.0]]]), 'has_label': torch.tensor([[[1.0, 0.0], [1.0, 1.0]], [[1.0, 0.0], [1.0, 1.0]], [[0.0, 1.0], [1.0, 1.0]], [[0.0, 1.0], [1.0, 1.0]], [[0.0, 1.0], [0.0, 1.0]], [[0.0, 1.0], [0.0, 1.0]]]).bool(), 'ignore_class': torch.tensor([[[0, 0], [0, 0]], [[0, 0], [0, 0]], [[0, 0], [0, 0]], [[0, 1], [0, 0]], [[0, 0], [0, 0]], [[0, 0], [0, 0]]]).to(torch.bool), 'timestamps': torch.tensor([[0.24, 1.24], [0.25, 1.25], [2.25, 3.25], [4.25, 5.25], [6.24, 7.25], [6.25, 7.25]])}
for (key, value) in expected_reduced_batch.items():
assert torch.allclose(reduced_batch[key], value, rtol=0.001, atol=0.001)
def test_reduced_timestamps(self):
x = torch.tensor([[[0.5, 0.8], [0.4, 0.6]], [[0.2, 0.8], [0.4, 0.6]], [[0.2, 0.8], [0.4, 0.6]], [[0.2, 0.8], [0.4, 0.6]]])
labels = torch.tensor([[[1.0, 0.0], [0.0, 0.0], [1.0, 1.0], [0.0, 1.0]], [[0.0, 1.0], [0.0, 0.0], [1.0, 1.0], [0.0, 1.0]], [[0.0, 1.0], [0.0, 0.0], [1.0, 1.0], [1.0, 1.0]], [[0.0, 0.0], [0.0, 1.0], [0.0, 1.0], [0.0, 0.0]]])
has_label = labels.bool()
ignore_class = torch.tensor([[[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], [[0.0, 1.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]]])
timestamps = torch.tensor([[0.0, 0.5, 1.0, 1.5], [2.0, 2.5, 3.0, 3.5], [4.0, 4.5, 5.0, 5.5], [6.0, 6.5, 7.0, 7.5]])
expected_reduced_batch = {'input': x, 'labels': torch.tensor([[[1.0, 0.0], [1.0, 1.0]], [[0.0, 1.0], [1.0, 1.0]], [[0.0, 1.0], [1.0, 1.0]], [[0.0, 1.0], [0.0, 1.0]]]), 'has_label': torch.tensor([[[1.0, 0.0], [1.0, 1.0]], [[0.0, 1.0], [1.0, 1.0]], [[0.0, 1.0], [1.0, 1.0]], [[0.0, 1.0], [0.0, 1.0]]]).bool(), 'timestamps': torch.tensor([[0.25, 1.25], [2.25, 3.25], [4.25, 5.25], [6.25, 7.25]])}
for i in range(len(x)):
reduced = ReduceTimestamps()({'input': x[i], 'labels': labels[i], 'has_label': has_label[i], 'ignore_class': ignore_class[i], 'timestamps': timestamps[i]})
for (key, value) in expected_reduced_batch.items():
assert torch.allclose(reduced[key], value[i], rtol=0.001, atol=0.001)
|
class TestActionSpottingMixup(unittest.TestCase):
def test_mix_action_spotting(self):
x = torch.tensor([[[0.5, 0.8], [0.4, 0.6]], [[0.2, 0.8], [0.4, 0.6]], [[0.5, 0.8], [0.4, 0.5]], [[0.4, 0.2], [0.2, 0.5]]])
mix_value = torch.tensor([[[0.5]], [[0.3]], [[0.7]], [[0.2]]])
permutation = torch.tensor([1, 0, 2, 3])
labels = torch.tensor([[[1.0, 0.0], [0.0, 0.0]], [[0.0, 1.0], [0.0, 0.0]], [[1.0, 1.0], [1.0, 0.0]], [[1.0, 1.0], [0.0, 1.0]]])
ignore_class = torch.tensor([[[1.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 1.0]]])
(x_mixed, labels_mixed, has_label_mixed, ignore_class_mixed, mixed_weights) = mix_spotting(x, mix_value, permutation, labels, labels.bool(), ignore_class)
expected_x_mixed = ((mix_value * x) + ((1 - mix_value) * x[permutation]))
expected_labels_mixed = torch.tensor([[[1.0, 0.0], [0.0, 0.0]], [[0.0, 1.0], [0.0, 0.0]], [[1.0, 1.0], [1.0, 0.0]], [[1.0, 1.0], [0.0, 1.0]], [[0.0, 1.0], [0.0, 0.0]], [[1.0, 0.0], [0.0, 0.0]], [[1.0, 1.0], [1.0, 0.0]], [[1.0, 1.0], [0.0, 1.0]]])
expected_has_label_mixed = torch.tensor([[[1.0, 0.0], [0.0, 0.0]], [[0.0, 1.0], [0.0, 0.0]], [[1.0, 1.0], [1.0, 0.0]], [[1.0, 1.0], [0.0, 1.0]], [[0.0, 1.0], [0.0, 0.0]], [[1.0, 0.0], [0.0, 0.0]], [[1.0, 1.0], [1.0, 0.0]], [[1.0, 1.0], [0.0, 1.0]]]).bool()
expected_ignore_class_mixed = torch.tensor([[[1.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 1.0]], [[0.0, 0.0], [0.0, 0.0]], [[1.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 1.0]]])
expected_mixed_weights = torch.tensor([[[0.5]], [[0.3]], [[0.7]], [[0.2]], [[0.5]], [[0.7]], [[0.3]], [[0.8]]])
assert torch.allclose(x_mixed, expected_x_mixed)
assert torch.allclose(labels_mixed, expected_labels_mixed)
assert torch.allclose(has_label_mixed, expected_has_label_mixed)
assert torch.allclose(ignore_class_mixed, expected_ignore_class_mixed)
assert torch.allclose(mixed_weights, expected_mixed_weights)
def test_action_spotting_mixup(self):
spotting_mixup = SpottingMixup(alpha=0.5)
x = torch.tensor([[[0.5, 0.8], [0.4, 0.6]], [[0.2, 0.8], [0.4, 0.6]], [[0.5, 0.8], [0.4, 0.5]], [[0.4, 0.2], [0.2, 0.5]]])
labels = torch.tensor([[[1.0, 0.0], [0.0, 0.0]], [[0.0, 1.0], [0.0, 0.0]], [[1.0, 1.0], [1.0, 0.0]], [[1.0, 1.0], [0.0, 1.0]]])
ignore_class = torch.tensor([[[1.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 1.0]]])
spotting_mixup(batch={'input': x, 'labels': labels, 'has_label': labels.bool(), 'ignore_class': ignore_class})
|
class RSSMPrior(nn.Module):
c: Config
@nn.compact
def __call__(self, prev_state, context):
inputs = jnp.concatenate([prev_state['sample'], context], (- 1))
hl = nn.relu(nn.Dense(self.c.cell_embed_size)(inputs))
(det_state, det_out) = GRUCell()(prev_state['det_state'], hl)
hl = nn.relu(nn.Dense(self.c.cell_embed_size)(det_out))
mean = nn.Dense(self.c.cell_stoch_size)(hl)
stddev = (nn.softplus(nn.Dense(self.c.cell_stoch_size)((hl + 0.54))) + self.c.cell_min_stddev)
dist = tfd.MultivariateNormalDiag(mean, stddev)
sample = dist.sample(seed=self.make_rng('sample'))
return dict(mean=mean, stddev=stddev, sample=sample, det_out=det_out, det_state=det_state, output=jnp.concatenate([sample, det_out], (- 1)))
|
class RSSMPosterior(nn.Module):
c: Config
@nn.compact
def __call__(self, prior, obs_inputs):
inputs = jnp.concatenate([prior['det_out'], obs_inputs], (- 1))
hl = nn.relu(nn.Dense(self.c.cell_embed_size)(inputs))
hl = nn.relu(nn.Dense(self.c.cell_embed_size)(hl))
mean = nn.Dense(self.c.cell_stoch_size)(hl)
stddev = (nn.softplus(nn.Dense(self.c.cell_stoch_size)((hl + 0.54))) + self.c.cell_min_stddev)
dist = tfd.MultivariateNormalDiag(mean, stddev)
sample = dist.sample(seed=self.make_rng('sample'))
return dict(mean=mean, stddev=stddev, sample=sample, det_out=prior['det_out'], det_state=prior['det_state'], output=jnp.concatenate([sample, prior['det_out']], (- 1)))
|
class RSSMCell(nn.Module):
c: Config
@property
def state_size(self):
return dict(mean=self.c.cell_stoch_size, stddev=self.c.cell_stoch_size, sample=self.c.cell_stoch_size, det_out=self.c.cell_deter_size, det_state=self.c.cell_deter_size, output=(self.c.cell_stoch_size + self.c.cell_deter_size))
def zero_state(self, batch_size, dtype=jnp.float32):
return {k: jnp.zeros((batch_size, v), dtype=dtype) for (k, v) in self.state_size.items()}
@nn.compact
def __call__(self, state, inputs, use_obs):
(obs_input, context) = inputs
prior = RSSMPrior(self.c)(state, context)
posterior = (RSSMPosterior(self.c)(prior, obs_input) if use_obs else prior)
return (posterior, (prior, posterior))
|
class Encoder(nn.Module):
'\n Multi-level Video Encoder.\n 1. Extracts hierarchical features from a sequence of observations.\n 2. Encodes observations using Conv layers, uses them directly for the bottom-most level.\n 3. Uses dense features for each level of the hierarchy above the bottom-most level.\n '
c: Config
@nn.compact
def __call__(self, obs):
'\n Arguments:\n obs : Tensor\n Un-flattened observations (videos) of shape (batch size, timesteps, height, width, channels)\n '
x = obs.reshape((((- 1),) + obs.shape[2:]))
Conv = partial(nn.Conv, kernel_size=(4, 4), strides=(2, 2), padding='VALID')
x = leaky_relu(Conv(self.c.total_filters)(x))
x = leaky_relu(Conv((self.c.total_filters * 2))(x))
x = leaky_relu(Conv((self.c.total_filters * 4))(x))
x = leaky_relu(Conv((self.c.total_filters * 8))(x))
x = x.reshape((obs.shape[:2] + ((- 1),)))
layers = [x]
print(f'Input shape at level 0: {x.shape}')
feat_size = x.shape[(- 1)]
for level in range(1, self.c.levels):
for _ in range((self.c.enc_dense_layers - 1)):
x = nn.relu(nn.Dense(self.c.enc_dense_embed_size)(x))
if (self.c.enc_dense_layers > 0):
x = nn.Dense(feat_size)(x)
layer = x
timesteps_to_merge = (self.c.tmp_abs_factor ** level)
timesteps_to_pad = ((- layer.shape[1]) % timesteps_to_merge)
layer = jnp.pad(layer, ((0, 0), (0, timesteps_to_pad), (0, 0)))
layer = layer.reshape((layer.shape[0], (- 1), timesteps_to_merge, layer.shape[2]))
layer = jnp.sum(layer, axis=2)
layers.append(layer)
print(f'Input shape at level {level}: {layer.shape}')
return layers
|
class Decoder(nn.Module):
' States to Images Decoder.'
c: Config
@nn.compact
def __call__(self, bottom_layer_output):
'\n Arguments:\n bottom_layer_output : Tensor\n State tensor of shape (batch_size, timesteps, feature_dim)\n\n Returns:\n Output video of shape (batch_size, timesteps, 64, 64, out_channels)\n '
x = nn.Dense((self.c.channels_mult * 1024))(bottom_layer_output)
x = jnp.reshape(x, ((- 1), 1, 1, x.shape[(- 1)]))
ConvT = partial(nn.ConvTranspose, strides=(2, 2), padding='VALID')
x = leaky_relu(ConvT((self.c.total_filters * 4), (5, 5))(x))
x = leaky_relu(ConvT((self.c.total_filters * 2), (5, 5))(x))
x = leaky_relu(ConvT(self.c.total_filters, (6, 6))(x))
x = nn.tanh(ConvT(self.c.channels, (6, 6))(x))
return x.reshape((bottom_layer_output.shape[:2] + x.shape[1:]))
|
def must_be(value):
return field(default=value, metadata=dict(choices=[value]))
|
@dataclass
class Config():
config: str
datadir: str
logdir: str
levels: int = 3
tmp_abs_factor: int = 6
dec_stddev: float = 1.0
enc_dense_layers: int = 3
enc_dense_embed_size: int = 1000
cell_stoch_size: int = 20
cell_deter_size: int = 200
cell_embed_size: int = 200
cell_min_stddev: float = 0.0001
use_obs: Optional[str] = None
channels_mult: int = 1
filters: int = 32
dataset: str = field(default='mmnist', metadata=dict(choices=['mmnist', 'minerl', 'mazes']))
seq_len: int = 100
eval_seq_len: int = 1000
channels: int = 1
lr: float = 0.0003
batch_size: int = 50
num_epochs: int = 300
clip_grad_norm_by: float = 10000
seed: int = np.random.randint(np.iinfo(np.int32).max)
open_loop_ctx: int = 36
save_gifs: bool = True
save_scalars_every: int = 1000
save_gifs_every: int = 1000
save_model_every: int = 1000
save_named_model_every: int = 5000
num_examples: int = 100
num_samples: int = 1
no_save_grid: bool = False
cell_type: str = must_be('RSSMCell')
cell_mean_only: str = must_be('false')
cell_reset_state: str = must_be('false')
beta: Optional[float] = must_be(None)
free_nats: Optional[float] = must_be(None)
kl_grad_post_perc: Optional[float] = must_be(None)
num_val_batches: int = must_be(1)
def config_file(self, eval):
return ((Path(self.logdir).parent / 'config.yml') if eval else Path(self.config))
@property
def _run_name(self):
return f'{self.dataset}_cwvae_{self.cell_type.lower()}_{self.levels}l_f{self.tmp_abs_factor}_decsd{self.dec_stddev}_enchl{self.enc_dense_layers}_ences{self.enc_dense_embed_size}_edchnlmult{self.channels_mult}_ss{self.cell_stoch_size}_ds{self.cell_deter_size}_es{self.cell_embed_size}_seq{self.seq_len}_lr{self.lr}_bs{self.batch_size}'
@property
def exp_rootdir(self):
return ((Path(self.logdir) / self.dataset) / self._run_name)
def save(self):
self.exp_rootdir.mkdir(parents=True, exist_ok=True)
with (self.exp_rootdir / 'config.yml').open('w') as f:
yaml.dump(asdict(self), f, default_flow_style=False)
@property
def total_filters(self):
return (self.filters * self.channels_mult)
@property
def use_observations(self) -> List[bool]:
if (self.use_obs is None):
return ([True] * self.levels)
assert (len(self.use_obs) == self.levels)
return [dict(T=True, F=False)[c] for c in self.use_obs.upper()]
@property
def _dataset_name(self):
return dict(minerl='minerl_navigate', mmnist='moving_mnist_2digit', mazes='gqn_mazes')[self.dataset]
def load_dataset(self, eval=False):
import tensorflow as tf
import tensorflow_datasets as tfds
np.random.seed(self.seed)
tf.random.set_seed(self.seed)
if (self.dataset == 'minerl'):
import minerl_navigate
elif (self.dataset == 'mmnist'):
import datasets.moving_mnist
elif (self.dataset == 'mazes'):
import datasets.gqn_mazes
d = tfds.load(self._dataset_name, data_dir=self.datadir, shuffle_files=(not eval))
d = d[('test' if eval else 'train')]
d = d.map((lambda vid: (tf.cast(vid['video'], tf.float32) / 255.0)))
seq_len = (self.eval_seq_len if eval else self.seq_len)
if seq_len:
def split_to_seq_len(seq):
usable_len = (tf.shape(seq)[0] - (tf.shape(seq)[0] % seq_len))
seq = tf.reshape(seq[:usable_len], tf.concat([[(usable_len // seq_len), seq_len], tf.shape(seq)[1:]], (- 1)))
return tf.data.Dataset.from_tensor_slices(seq)
d = d.flat_map(split_to_seq_len)
d = d.prefetch(tf.data.experimental.AUTOTUNE)
if (not eval):
d = d.repeat(self.num_epochs).shuffle((10 * self.batch_size))
d = d.batch(self.batch_size).prefetch(tf.data.experimental.AUTOTUNE)
return tfds.as_numpy(d)
|
def parse_config(eval=False):
p = ArgumentParser()
for f in fields(Config):
kwargs = (dict(action='store_true') if ((f.type is bool) and (not f.default)) else dict(default=f.default, type=f.type))
p.add_argument(f'--{f.name}', **kwargs, **f.metadata)
c = Config(**vars(p.parse_args()))
p.set_defaults(**yaml.full_load(c.config_file(eval).read_text()))
return replace(c, **vars(p.parse_args()))
|
class GqnMazes(tfds.core.GeneratorBasedBuilder):
'DatasetBuilder for GQN Mazes dataset.'
VERSION = tfds.core.Version('1.0.0')
RELEASE_NOTES = {'1.0.0': 'Initial release.'}
def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict(dict(video=tfds.features.Video(shape=(None, 64, 64, 3)))), supervised_keys=None, homepage='https://archive.org/details/gqn_mazes', citation=_CITATION)
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
'Returns SplitGenerators.'
path = dl_manager.download_and_extract(_DOWNLOAD_URL)
return dict(train=self._generate_examples((path / 'train')), test=self._generate_examples((path / 'test')))
def _generate_examples(self, path):
'Yields examples.'
for f in path.glob('*.mp4'):
(yield (str(f), dict(video=str(f.resolve()))))
|
class MovingMnist_2digit(tfds.core.GeneratorBasedBuilder):
'DatasetBuilder for Moving MNIST dataset.'
VERSION = tfds.core.Version('1.0.0')
RELEASE_NOTES = {'1.0.0': 'Initial release.'}
def _info(self) -> tfds.core.DatasetInfo:
'Returns the dataset metadata.'
return tfds.core.DatasetInfo(builder=self, description=_DESCRIPTION, features=tfds.features.FeaturesDict(dict(video=tfds.features.Video(shape=(None, 64, 64, 1)))), supervised_keys=None, homepage='https://archive.org/details/moving_mnist', citation=_CITATION)
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
'Returns SplitGenerators.'
path = dl_manager.download_and_extract(_DOWNLOAD_URL)
return dict(train=self._generate_examples((path / 'train-seq100')), test=self._generate_examples((path / 'test-seq1000')))
def _generate_examples(self, path):
'Yields examples.'
for f in path.glob('*.mp4'):
(yield (str(f), dict(video=str(f.resolve()))))
|
class SSFetcher(threading.Thread):
def __init__(self, parent, init_offset=0, init_reshuffle_count=1, eos_sym=(- 1), skip_utterance=False, skip_utterance_predict_both=False):
threading.Thread.__init__(self)
self.parent = parent
self.rng = numpy.random.RandomState(self.parent.seed)
self.indexes = numpy.arange(parent.data_len)
self.init_offset = init_offset
self.init_reshuffle_count = init_reshuffle_count
self.offset = 0
self.reshuffle_count = 0
self.eos_sym = eos_sym
self.skip_utterance = skip_utterance
self.skip_utterance_predict_both = skip_utterance_predict_both
def apply_reshuffle(self):
self.rng.shuffle(self.indexes)
self.offset = 0
self.reshuffle_count += 1
def run(self):
diter = self.parent
while (self.reshuffle_count < self.init_reshuffle_count):
self.apply_reshuffle()
self.offset = self.init_offset
while (not diter.exit_flag):
last_batch = False
dialogues = []
while (len(dialogues) < diter.batch_size):
if (self.offset == diter.data_len):
if (not diter.use_infinite_loop):
last_batch = True
break
else:
self.apply_reshuffle()
index = self.indexes[self.offset]
s = diter.data[index]
if (len(s) > 0):
if isinstance(s[0], list):
s = [item for sublist in s for item in sublist]
if (not self.skip_utterance):
if ((diter.max_len == (- 1)) or (len(s) <= diter.max_len)):
dialogues.append([s, self.offset, self.reshuffle_count])
else:
s = copy.deepcopy(s)
eos_indices = numpy.where((numpy.asarray(s) == self.eos_sym))[0]
if (not (s[0] == self.eos_sym)):
eos_indices = numpy.insert(eos_indices, 0, [self.eos_sym])
if (not (s[(- 1)] == self.eos_sym)):
eos_indices = numpy.append(eos_indices, [self.eos_sym])
if (len(eos_indices) > 2):
first_utterance_index = self.rng.randint(0, (len(eos_indices) - 2))
s_forward = s[eos_indices[first_utterance_index]:(eos_indices[(first_utterance_index + 2)] + 1)]
s_backward_a = s[eos_indices[(first_utterance_index + 1)]:eos_indices[(first_utterance_index + 2)]]
s_backward_b = s[eos_indices[first_utterance_index]:(eos_indices[(first_utterance_index + 1)] + 1)]
if ((s_backward_a[(- 1)] == self.eos_sym) or (s_backward_b[0] == self.eos_sym)):
s_backward = (s_backward_a + s_backward_b)
else:
s_backward = ((s_backward_a + [self.eos_sym]) + s_backward_b)
else:
s_forward = [self.eos_sym]
s_backward = [self.eos_sym]
if self.skip_utterance_predict_both:
if ((diter.max_len == (- 1)) or (len(s_forward) <= diter.max_len)):
dialogues.append([s_forward, self.offset, self.reshuffle_count])
if ((diter.max_len == (- 1)) or (len(s_backward) <= diter.max_len)):
dialogues.append([s_backward, self.offset, self.reshuffle_count])
elif (self.rng.randint(0, 2) == 0):
if ((diter.max_len == (- 1)) or (len(s_forward) <= diter.max_len)):
dialogues.append([s_forward, self.offset, self.reshuffle_count])
elif ((diter.max_len == (- 1)) or (len(s_backward) <= diter.max_len)):
dialogues.append([s_backward, self.offset, self.reshuffle_count])
self.offset += 1
if len(dialogues):
diter.queue.put(dialogues)
if last_batch:
diter.queue.put(None)
return
|
class SSIterator(object):
def __init__(self, dialogue_file, batch_size, seed, max_len=(- 1), use_infinite_loop=True, init_offset=0, init_reshuffle_count=1, eos_sym=(- 1), skip_utterance=False, skip_utterance_predict_both=False):
self.dialogue_file = dialogue_file
self.batch_size = batch_size
self.init_offset = init_offset
self.init_reshuffle_count = init_reshuffle_count
self.eos_sym = eos_sym
self.skip_utterance = skip_utterance
self.skip_utterance_predict_both = skip_utterance_predict_both
args = locals()
args.pop('self')
self.__dict__.update(args)
self.load_files()
self.exit_flag = False
def load_files(self):
self.data = cPickle.load(open(self.dialogue_file, 'r'))
self.data_len = len(self.data)
logger.debug(('Data len is %d' % self.data_len))
def start(self):
self.exit_flag = False
self.queue = Queue.Queue(maxsize=1000)
self.gather = SSFetcher(self, self.init_offset, self.init_reshuffle_count, self.eos_sym, self.skip_utterance, self.skip_utterance_predict_both)
self.gather.daemon = True
self.gather.start()
def __del__(self):
if hasattr(self, 'gather'):
self.gather.exitFlag = True
self.gather.join()
def __iter__(self):
return self
def next(self):
if self.exit_flag:
return None
batch = self.queue.get()
if (not batch):
self.exit_flag = True
return batch
|
def sharedX(value, name=None, borrow=False, dtype=None):
if (dtype is None):
dtype = theano.config.floatX
return theano.shared(theano._asarray(value, dtype=dtype), name=name, borrow=borrow)
|
def Adam(grads, lr=0.0002, b1=0.1, b2=0.001, e=1e-08):
updates = []
varlist = []
i = sharedX(0.0)
i_t = (i + 1.0)
fix1 = (1.0 - ((1.0 - b1) ** i_t))
fix2 = (1.0 - ((1.0 - b2) ** i_t))
lr_t = (lr * (T.sqrt(fix2) / fix1))
for (p, g) in grads.items():
m = sharedX((p.get_value() * 0.0), name=(p.name + '_adam_optimizer_m'))
v = sharedX((p.get_value() * 0.0), name=(p.name + '_adam_optimizer_v'))
m_t = ((b1 * g) + ((1.0 - b1) * m))
v_t = ((b2 * T.sqr(g)) + ((1.0 - b2) * v))
g_t = (m_t / (T.sqrt(v_t) + e))
p_t = (p - (lr_t * g_t))
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((p, p_t))
varlist.append(m)
varlist.append(v)
updates.append((i, i_t))
return (updates, varlist)
|
def safe_pickle(obj, filename):
if os.path.isfile(filename):
logger.info(('Overwriting %s.' % filename))
else:
logger.info(('Saving to %s.' % filename))
with open(filename, 'wb') as f:
cPickle.dump(obj, f, protocol=cPickle.HIGHEST_PROTOCOL)
|
class Model(object):
def __init__(self):
self.floatX = theano.config.floatX
self.params = []
def save(self, filename):
'\n Save the model to file `filename`\n '
vals = dict([(x.name, x.get_value()) for x in self.params])
numpy.savez(filename, **vals)
def load(self, filename, parameter_strings_to_ignore=[]):
'\n Load the model.\n\n Any parameter which has one of the strings inside parameter_strings_to_ignore as a substring,\n will not be loaded from the file (but instead initialized as a new model, which usually means random).\n '
vals = numpy.load(filename)
for p in self.params:
load_parameter = True
for string_to_ignore in parameter_strings_to_ignore:
if (string_to_ignore in p.name):
logger.debug('Initializing parameter {} as in new model'.format(p.name))
load_parameter = False
if load_parameter:
if (p.name in vals):
logger.debug('Loading {} of {}'.format(p.name, p.get_value(borrow=True).shape))
if (p.get_value().shape != vals[p.name].shape):
raise Exception('Shape mismatch: {} != {} for {}'.format(p.get_value().shape, vals[p.name].shape, p.name))
p.set_value(vals[p.name])
else:
logger.error('No parameter {} given: default initialization used'.format(p.name))
unknown = (set(vals.keys()) - {p.name for p in self.params})
if len(unknown):
logger.error('Unknown parameters {} given'.format(unknown))
|
class Timer(object):
def __init__(self):
self.total = 0
def start(self):
self.start_time = time.time()
def finish(self):
self.total += (time.time() - self.start_time)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.