code stringlengths 17 6.64M |
|---|
class DumbDataModule(BaseDataModule):
'Dumb data module for testing models with random data.\n\n Args:\n train: Configuration for the training data to define the loading\n of data and the dataloader.\n val: Configuration for the validation data to define the loading\n of data and the dataloader.\n test: Configuration for the testing data to define the loading\n of data and the dataloader.\n '
def __init__(self, train: Optional[DictConfig]=None, val: Optional[DictConfig]=None, test: Optional[DictConfig]=None, num_classes: int=10) -> None:
super().__init__(datadir='', train=train, val=val, test=test)
self.classes = num_classes
@property
def num_classes(self) -> int:
return self.classes
def setup(self, stage: Optional[str]=None) -> None:
if (stage == 'fit'):
if (self.train is None):
raise RuntimeError('No training configuration has been passed.')
self.train_dataset = DumbDataset(**self.train.dataset)
if (self.val is not None):
self.val_dataset = DumbDataset(**self.val.dataset)
elif (stage == 'test'):
if (self.test is None):
raise RuntimeError('No testing configuration has been passed.')
if (self.test is not None):
self.test_dataset = DumbDataset(**self.test.dataset)
|
class FolderDataModule(BaseDataModule):
'Base datamodule for folder datasets.\n\n Args:\n datadir: Where to save/load the data.\n train: Configuration for the training data to define the loading of data, the transforms and the dataloader.\n val: Configuration for the validation data to define the loading of data, the transforms and the dataloader.\n test: Configuration for the testing data to define the loading of data, the transforms and the dataloader.\n '
def __init__(self, datadir: str, train: Optional[DictConfig]=None, val: Optional[DictConfig]=None, test: Optional[DictConfig]=None) -> None:
super().__init__(datadir=datadir, train=train, val=val, test=test)
@property
def class_list(self) -> Optional[List[str]]:
'If not None, list of class selected.'
return None
def _verify_classes(self, split: str='train') -> None:
split_dir = (self.datadir / split)
dirs = [dir.stem for dir in split_dir.iterdir() if dir.is_dir()]
assert (len(dirs) == self.num_classes), f'{len(dirs)}/{self.num_classes} classes found: {dirs}'
def _verify_split(self, split: str) -> None:
dirs = [dir.stem for dir in self.datadir.iterdir()]
if (split not in dirs):
raise FileNotFoundError(f"the split '{split}' was not found in {os.path.abspath(self.datadir)}, make sure the folder contains a subfolder named {split}")
def prepare_data(self) -> None:
if (self.train is not None):
self._verify_split('train')
self._verify_classes('train')
if (self.val is not None):
self._verify_split('val')
self._verify_classes('val')
if (self.test is not None):
self._verify_split('test')
def setup(self, stage: Optional[str]=None) -> None:
if (stage == 'fit'):
if (self.train is None):
raise RuntimeError('No training configuration has been passed.')
traindir = (self.datadir / 'train')
self.train_transform = hydra.utils.instantiate(self.train.transform)
rank_zero_info(f'Train transform: {self.train_transform}')
if self.train.get('dataset'):
self.train_dataset = ImageFolder(traindir, transform=self.train_transform, class_list=self.class_list, **self.train.dataset)
else:
self.train_dataset = ImageFolder(traindir, transform=self.train_transform, class_list=self.class_list)
if (self.val is not None):
valdir = (self.datadir / 'val')
self.val_transform = hydra.utils.instantiate(self.val.transform)
rank_zero_info(f'Val transform: {self.val_transform}')
if ('dataset' in self.val):
self.val_dataset = ImageFolder(valdir, transform=self.val_transform, **self.val.dataset)
else:
self.val_dataset = ImageFolder(valdir, transform=self.val_transform)
elif (stage == 'test'):
if (self.test is None):
raise RuntimeError('No testing configuration has been passed.')
testdir = (self.datadir / 'test')
self.test_transform = hydra.utils.instantiate(self.test.transform)
rank_zero_info(f'Test transform: {self.test_transform}')
if ('dataset' in self.test):
self.test_dataset = ImageFolder(testdir, transform=self.test_transform, **self.test.dataset)
else:
self.test_dataset = ImageFolder(testdir, transform=self.test_transform)
|
class Hmdb51DataModule(VideoBaseDataModule):
'Datamodule for the HMDB51 dataset.\n\n Args:\n datadir: Path to the data (eg: csv, folder, ...).\n train: Configuration for the training data to define the loading of data, the transforms and the dataloader.\n val: Configuration for the validation data to define the loading of data, the transforms and the dataloader.\n test: Configuration for the testing data to define the loading of data, the transforms and the dataloader.\n video_path_prefix: Path to root directory where the videos are stored.\n All the video paths before loading are prefixed with this path.\n decode_audio: If ``True``, decode audio.\n decoder: Defines which backend should be used to decode videos.\n decoder_args: Arguments to configure the decoder.\n split_id: Split used for training and testing.\n '
def __init__(self, datadir: str, train: Optional[DictConfig]=None, val: Optional[DictConfig]=None, test: Optional[DictConfig]=None, split_id: int=1, video_path_prefix: str='', decode_audio: bool=False, decoder: str='pyav', decoder_args: DictConfig={}) -> None:
super().__init__(datadir=datadir, train=train, val=val, test=test, video_path_prefix=video_path_prefix, decode_audio=decode_audio, decoder=decoder, decoder_args=decoder_args)
self.split_id = split_id
@property
def num_classes(self) -> int:
'Number of classes.'
return 51
def _verify_classes(self) -> None:
dirs = [dir.stem for dir in self.datadir.iterdir() if dir.is_dir()]
assert (len(dirs) == self.num_classes), f'{len(dirs)}/{self.num_classes} classes found: {dirs}'
def prepare_data(self) -> None:
self._verify_classes()
def setup(self, stage: Optional[str]=None) -> None:
if (stage == 'fit'):
if (self.train is None):
raise RuntimeError('No training configuration has been passed.')
self.train_transform = hydra.utils.instantiate(self.train.transform)
self.train_clip_sampler = hydra.utils.instantiate(self.train.clip_sampler)
rank_zero_info(f'Train transform: {self.train_transform}')
self.train_dataset = Hmdb51(self.traindir, clip_sampler=self.train_clip_sampler, transform=self.train_transform, video_path_prefix=self.train_video_path_prefix, split_id=self.split_id, split_type='train', decode_audio=self.decode_audio, decoder=self.train_decoder, decoder_args=self.train_decoder_args)
if (self.val is not None):
self.val_transform = hydra.utils.instantiate(self.val.transform)
self.val_clip_sampler = hydra.utils.instantiate(self.val.clip_sampler)
rank_zero_info(f'Val transform: {self.val_transform}')
self.val_dataset = Hmdb51(self.valdir, clip_sampler=self.val_clip_sampler, transform=self.val_transform, video_path_prefix=self.val_video_path_prefix, split_id=self.split_id, split_type='test', decode_audio=self.decode_audio, decoder=self.val_decoder, decoder_args=self.val_decoder_args)
elif (stage == 'test'):
if (self.test is None):
raise RuntimeError('No testing configuration has been passed.')
self.test_transform = hydra.utils.instantiate(self.test.transform)
self.test_clip_sampler = hydra.utils.instantiate(self.test.clip_sampler)
rank_zero_info(f'Test transform: {self.test_transform}')
self.test_dataset = Hmdb51(self.testdir, clip_sampler=self.test_clip_sampler, transform=self.test_transform, video_path_prefix=self.test_video_path_prefix, split_id=self.split_id, split_type='test', decode_audio=self.decode_audio, decoder=self.test_decoder, decoder_args=self.test_decoder_args)
|
class ImagenetDataModule(FolderDataModule):
'Base datamodule for the Imagenet dataset.\n\n Args:\n datadir: Where to load the data.\n train: Configuration for the training data to define the loading of data, the transforms and the dataloader.\n val: Configuration for the validation data to define the loading of data, the transforms and the dataloader.\n test: Configuration for the testing data to define the loading of data, the transforms and the dataloader.\n '
def __init__(self, datadir: str, train: Optional[DictConfig]=None, val: Optional[DictConfig]=None, test: Optional[DictConfig]=None) -> None:
super().__init__(datadir=datadir, train=train, val=val, test=test)
def prepare_data(self) -> None:
if (self.train is not None):
self._verify_split('train')
self._verify_classes('train')
if (self.val is not None):
self._verify_split('val')
self._verify_classes('val')
if (self.test is not None):
self._verify_split('val')
self._verify_classes('val')
def setup(self, stage: Optional[str]=None) -> None:
if (stage != 'test'):
super().setup(stage)
else:
if (self.test is None):
raise RuntimeError('No testing configuration has been passed.')
testdir = (self.datadir / 'val')
self.test_transform = hydra.utils.instantiate(self.test.transform)
rank_zero_info(f'Test transform: {self.test_transform}')
if ('dataset' in self.test):
self.test_dataset = ImageFolder(testdir, transform=self.test_transform, **self.test.dataset)
else:
self.test_dataset = ImageFolder(testdir, transform=self.test_transform)
@property
def num_classes(self) -> int:
'Number of classes.'
return 1000
|
class Imagenet100DataModule(ImagenetDataModule):
'Base datamodule for the Imagenet100 dataset.\n\n Args:\n datadir: Where to load the data.\n train: Configuration for the training data to define the loading of data, the transforms and the dataloader.\n val: Configuration for the validation data to define the loading of data, the transforms and the dataloader.\n test: Configuration for the testing data to define the loading of data, the transforms and the dataloader.\n\n Example::\n\n datamodule = Imagenet100DataModule(datadir)\n '
def __init__(self, datadir: str, train: Optional[DictConfig]=None, val: Optional[DictConfig]=None, test: Optional[DictConfig]=None) -> None:
super().__init__(datadir=datadir, train=train, val=val, test=test)
def _verify_classes(self, split: str='train') -> None:
split_dir = (self.datadir / split)
dirs = [dir.stem for dir in split_dir.iterdir() if dir.is_dir()]
assert all([(class_ in dirs) for class_ in self.class_list]), f'error classes found: {dirs}'
@property
def class_list(self) -> List[str]:
'List of classes.'
return ['n07836838', 'n04111531', 'n04493381', 'n02093428', 'n04067472', 'n01773797', 'n02108089', 'n02113978', 'n03930630', 'n02085620', 'n02138441', 'n02090622', 'n04238763', 'n03637318', 'n01692333', 'n02804414', 'n02113799', 'n01978455', 'n02089973', 'n01749939', 'n03794056', 'n03017168', 'n04435653', 'n03785016', 'n02114855', 'n04336792', 'n02259212', 'n03775546', 'n01558993', 'n03891251', 'n03777754', 'n02109047', 'n02326432', 'n02091831', 'n02123045', 'n03642806', 'n02119022', 'n01729322', 'n02105505', 'n04026417', 'n03494278', 'n03584829', 'n02231487', 'n03085013', 'n04229816', 'n07714571', 'n04429376', 'n03594734', 'n04517823', 'n01855672', 'n02018207', 'n03259280', 'n03837869', 'n03424325', 'n03764736', 'n04592741', 'n02104029', 'n04127249', 'n02100583', 'n13040303', 'n03062245', 'n02087046', 'n02869837', 'n04485082', 'n02172182', 'n02396427', 'n03787032', 'n03903868', 'n02107142', 'n02788148', 'n02974003', 'n02106550', 'n03492542', 'n03530642', 'n02086240', 'n02859443', 'n03379051', 'n01735189', 'n04589890', 'n07753275', 'n04136333', 'n02089867', 'n04099969', 'n03032252', 'n02483362', 'n03947888', 'n02488291', 'n04418357', 'n01983481', 'n01820546', 'n01980166', 'n02086910', 'n02701002', 'n02009229', 'n02877765', 'n07831146', 'n07715103', 'n13037406', 'n02116738', 'n02099849']
@property
def num_classes(self) -> int:
'Number of classes.'
return 100
|
class KineticsDataModule(VideoBaseDataModule, ABC):
'Base datamodule for the Kinetics datasets.\n\n Args:\n datadir: Path to the data (eg: csv, folder, ...).\n train: Configuration for the training data to define the loading of data, the transforms and the dataloader.\n val: Configuration for the validation data to define the loading of data, the transforms and the dataloader.\n test: Configuration for the testing data to define the loading of data, the transforms and the dataloader.\n video_path_prefix: Path to root directory where the videos are stored. All the video paths before loading are prefixed with this path.\n decode_audio: If ``True``, decode audio.\n decoder: Defines which backend should be used to decode videos.\n decoder_args: Arguments to configure the decoder.\n '
def __init__(self, datadir: str, train: Optional[DictConfig]=None, val: Optional[DictConfig]=None, test: Optional[DictConfig]=None, video_path_prefix: str='', decode_audio: bool=False, decoder: str='pyav', decoder_args: DictConfig={}) -> None:
super().__init__(datadir=datadir, train=train, val=val, test=test, video_path_prefix=video_path_prefix, decode_audio=decode_audio, decoder=decoder, decoder_args=decoder_args)
def _verify_classes(self, split: str='train') -> None:
split_dir = (self.datadir / split)
dirs = [dir.stem for dir in split_dir.iterdir() if dir.is_dir()]
assert (len(dirs) == self.num_classes), f'{len(dirs)}/{self.num_classes} classes found: {dirs}'
def _verify_split(self, split: str) -> None:
dirs = [dir.stem for dir in self.datadir.iterdir()]
if (split not in dirs):
raise FileNotFoundError(f"the split '{split}' was not found in {os.path.abspath(self.datadir)}, make sure the folder contains a subfolder named {split}")
def prepare_data(self) -> None:
if (self.train is not None):
self._verify_split('train')
self._verify_classes('train')
if ((self.val is not None) or (self.test is not None)):
self._verify_split('val')
self._verify_classes('val')
def setup(self, stage: Optional[str]=None) -> None:
if (stage == 'fit'):
if (self.train is None):
raise RuntimeError('No training configuration has been passed.')
self.train_transform = hydra.utils.instantiate(self.train.transform)
self.train_clip_sampler = hydra.utils.instantiate(self.train.clip_sampler)
rank_zero_info(f'Train transform: {self.train_transform}')
self.train_dataset = Kinetics(self.traindir, clip_sampler=self.train_clip_sampler, transform=self.train_transform, video_path_prefix=self.train_video_path_prefix, decode_audio=self.decode_audio, decoder=self.train_decoder, decoder_args=self.train_decoder_args)
if (self.val is not None):
self.val_transform = hydra.utils.instantiate(self.val.transform)
self.val_clip_sampler = hydra.utils.instantiate(self.val.clip_sampler)
rank_zero_info(f'Val transform: {self.val_transform}')
self.val_dataset = Kinetics(self.valdir, clip_sampler=self.val_clip_sampler, transform=self.val_transform, video_path_prefix=self.val_video_path_prefix, decode_audio=self.decode_audio, decoder=self.val_decoder, decoder_args=self.val_decoder_args)
elif (stage == 'test'):
if (self.test is None):
raise RuntimeError('No testing configuration has been passed.')
self.test_transform = hydra.utils.instantiate(self.test.transform)
self.test_clip_sampler = hydra.utils.instantiate(self.test.clip_sampler)
rank_zero_info(f'Test transform: {self.test_transform}')
self.test_dataset = Kinetics(self.testdir, clip_sampler=self.test_clip_sampler, transform=self.test_transform, video_path_prefix=self.test_video_path_prefix, decode_audio=self.decode_audio, decoder=self.test_decoder, decoder_args=self.test_decoder_args)
|
class Kinetics400DataModule(KineticsDataModule):
'Datamodule for the Kinetics400 datasets.\n\n Args:\n datadir: Path to the data (eg: csv, folder, ...).\n train: Configuration for the training data to define the loading of data, the transforms and the dataloader.\n val: Configuration for the validation data to define the loading of data, the transforms and the dataloader.\n test: Configuration for the testing data to define the loading of data, the transforms and the dataloader.\n video_path_prefix: Path to root directory where the videos are stored. All the video paths before loading are prefixed with this path.\n decode_audio: If ``True``, decode audio.\n decoder: Defines which backend should be used to decode videos.\n decoder_args: Arguments to configure the decoder.\n '
def __init__(self, datadir: str, train: Optional[DictConfig]=None, val: Optional[DictConfig]=None, test: Optional[DictConfig]=None, video_path_prefix: str='', decode_audio: bool=False, decoder: str='pyav', decoder_args: DictConfig={}) -> None:
super().__init__(datadir=datadir, train=train, val=val, test=test, video_path_prefix=video_path_prefix, decode_audio=decode_audio, decoder=decoder, decoder_args=decoder_args)
@property
def num_classes(self) -> int:
return 400
|
class Kinetics200DataModule(KineticsDataModule):
'Datamodule for the Mini-Kinetics200 dataset.\n\n Args:\n datadir: Path to the data (eg: csv, folder, ...).\n train: Configuration for the training data to define the loading of data, the transforms and the dataloader.\n val: Configuration for the validation data to define the loading of data, the transforms and the dataloader.\n test: Configuration for the testing data to define the loading of data, the transforms and the dataloader.\n video_path_prefix: Path to root directory with the videos that are loaded in LabeledVideoDataset. All the video paths before loading are prefixed with this path.\n decode_audio : If True, decode audio.\n decoder: Defines which backend should be used to decode videos.\n decoder_args: Arguments to configure the decoder.\n '
def __init__(self, datadir: str, train: Optional[DictConfig]=None, val: Optional[DictConfig]=None, test: Optional[DictConfig]=None, video_path_prefix: str='', decode_audio: bool=False, decoder: str='pyav', decoder_args: DictConfig={}) -> None:
super().__init__(datadir=datadir, train=train, val=val, test=test, video_path_prefix=video_path_prefix, decode_audio=decode_audio, decoder=decoder, decoder_args=decoder_args)
@property
def num_classes(self) -> int:
'Number of classes.'
return 200
|
class Kinetics600DataModule(KineticsDataModule):
'Datamodule for the Kinetics600 datasets.\n\n Args:\n datadir: Path to the data (eg: csv, folder, ...).\n train: Configuration for the training data to define the loading of data, the transforms and the dataloader.\n val: Configuration for the validation data to define the loading of data, the transforms and the dataloader.\n test: Configuration for the testing data to define the loading of data, the transforms and the dataloader.\n video_path_prefix: Path to root directory where the videos are stored. All the video paths before loading are prefixed with this path.\n decode_audio: If ``True``, decode audio.\n decoder: Defines which backend should be used to decode videos.\n decoder_args: Arguments to configure the decoder.\n '
def __init__(self, datadir: str, train: Optional[DictConfig]=None, val: Optional[DictConfig]=None, test: Optional[DictConfig]=None, video_path_prefix: str='', decode_audio: bool=False, decoder: str='pyav', decoder_args: DictConfig={}) -> None:
super().__init__(datadir=datadir, train=train, val=val, test=test, video_path_prefix=video_path_prefix, decode_audio=decode_audio, decoder=decoder, decoder_args=decoder_args)
@property
def num_classes(self) -> int:
'Number of classes.'
return 600
|
class Kinetics700DataModule(KineticsDataModule):
'Datamodule for the Kinetics700 datasets.\n\n Args:\n datadir: Path to the data (eg: csv, folder, ...).\n train: Configuration for the training data to define the loading of data, the transforms and the dataloader.\n val: Configuration for the validation data to define the loading of data, the transforms and the dataloader.\n test: Configuration for the testing data to define the loading of data, the transforms and the dataloader.\n video_path_prefix: Path to root directory where the videos are stored. All the video paths before loading are prefixed with this path.\n decode_audio: If ``True``, decode audio.\n decoder: Defines which backend should be used to decode videos.\n decoder_args: Arguments to configure the decoder.\n '
def __init__(self, datadir: str, train: Optional[DictConfig]=None, val: Optional[DictConfig]=None, test: Optional[DictConfig]=None, video_path_prefix: str='', decode_audio: bool=False, decoder: str='pyav', decoder_args: DictConfig={}) -> None:
super().__init__(datadir=datadir, train=train, val=val, test=test, video_path_prefix=video_path_prefix, decode_audio=decode_audio, decoder=decoder, decoder_args=decoder_args)
@property
def num_classes(self) -> int:
'Number of classes.'
return 700
|
class SoccerNetDataModule(VideoBaseDataModule, ABC):
'Base datamodule for the SoccerNet datasets.\n\n Args:\n datadir: Path to the data (eg: csv, folder, ...).\n train: Configuration for the training data to define the loading of data, the transforms and the dataloader.\n val: Configuration for the validation data to define the loading of data, the transforms and the dataloader.\n test: Configuration for the testing data to define the loading of data, the transforms and the dataloader.\n video_path_prefix: Path to root directory where the videos are stored. All the video paths before loading are prefixed with this path.\n decoder: Defines which backend should be used to decode videos.\n decoder_args: Arguments to configure the decoder.\n '
def __init__(self, datadir: str, train: Optional[DictConfig]=None, val: Optional[DictConfig]=None, test: Optional[DictConfig]=None, video_path_prefix: str='', decoder: str='frame', decoder_args: DictConfig={}) -> None:
super().__init__(datadir=datadir, train=train, val=val, test=test, video_path_prefix=video_path_prefix, decoder=decoder, decode_audio=False, decoder_args=decoder_args)
self.train_clip_sampler = None
self.val_clip_sampler = None
self.test_clip_sampler = None
@property
def num_classes(self) -> int:
return 17
def prepare_data(self) -> None:
super().prepare_data()
@property
def train_num_samples(self) -> int:
'Number of samples in the training dataset.'
if (type(self.train_clip_sampler) is SoccerNetClipSamplerDistributedSamplerWrapper):
return len(self.train_clip_sampler.dataset)
return (len(self.train_clip_sampler) if self.train_clip_sampler else 0)
@property
def val_num_samples(self) -> int:
'Number of samples in the validation dataset.'
if (type(self.val_clip_sampler) is SoccerNetClipSamplerDistributedSamplerWrapper):
return len(self.val_clip_sampler.dataset)
return (len(self.val_clip_sampler) if self.val_clip_sampler else 0)
@property
def test_num_samples(self) -> int:
'Number of samples in the testing dataset.'
if (type(self.test_clip_sampler) is SoccerNetClipSamplerDistributedSamplerWrapper):
return len(self.test_clip_sampler.dataset)
return (len(self.test_clip_sampler) if self.test_clip_sampler else 0)
def setup(self, stage: Optional[str]=None) -> None:
if (stage == 'fit'):
if (self.train is None):
raise RuntimeError('No training configuration has been passed.')
self.train_transform = hydra.utils.instantiate(self.train.transform)
rank_zero_info(f'Train transform: {self.train_transform}')
self.train_dataset = soccernet_dataset(self.traindir, transform=self.train_transform, video_path_prefix=self.train_video_path_prefix, decoder=self.train_decoder, decoder_args=self.train_decoder_args, label_args=self.train.dataset.get('label_args', {}), features_args=self.train.dataset.get('feature_args', None), task=self.train.dataset.get('task', SoccerNetTask.ACTION))
self.train_clip_sampler = hydra.utils.instantiate(self.train.clip_sampler, data_source=self.train_dataset)
if (dist.is_available() and dist.is_initialized()):
self.train_clip_sampler = SoccerNetClipSamplerDistributedSamplerWrapper(self.train_clip_sampler)
rank_zero_info(f'Use {self.train_clip_sampler} for train sampler, make sure you correctly configured the sampler.')
if (self.val is not None):
self.val_transform = hydra.utils.instantiate(self.val.transform)
rank_zero_info(f'Val transform: {self.val_transform}')
self.val_dataset = soccernet_dataset(self.valdir, transform=self.val_transform, video_path_prefix=self.val_video_path_prefix, decoder=self.val_decoder, decoder_args=self.val_decoder_args, label_args=self.val.dataset.get('label_args', {}), features_args=self.val.dataset.get('feature_args', None), task=self.val.dataset.get('task', SoccerNetTask.ACTION))
self.val_clip_sampler = hydra.utils.instantiate(self.val.clip_sampler, data_source=self.val_dataset)
if (dist.is_available() and dist.is_initialized()):
self.val_clip_sampler = SoccerNetClipSamplerDistributedSamplerWrapper(self.val_clip_sampler)
rank_zero_info(f'Use {self.val_clip_sampler} for val sampler, make sure you correctly configured the sampler.')
elif (stage == 'test'):
if (self.test is None):
raise RuntimeError('No testing configuration has been passed.')
self.test_transform = hydra.utils.instantiate(self.test.transform)
rank_zero_info(f'Test transform: {self.test_transform}')
self.test_dataset = soccernet_dataset(self.testdir, transform=self.test_transform, video_path_prefix=self.test_video_path_prefix, decoder=self.test_decoder, decoder_args=self.test_decoder_args, label_args=self.test.dataset.get('label_args', {}), features_args=self.test.dataset.get('feature_args', None), task=self.test.dataset.get('task', SoccerNetTask.ACTION))
self.test_clip_sampler = hydra.utils.instantiate(self.test.clip_sampler, data_source=self.test_dataset)
if (dist.is_available() and dist.is_initialized()):
self.test_clip_sampler = SoccerNetClipSamplerDistributedSamplerWrapper(self.test_clip_sampler)
rank_zero_info(f'Use {self.test_clip_sampler} for test sampler, make sure you correctly configured the sampler.')
def train_dataloader(self) -> DataLoader:
if (self.train is None):
raise RuntimeError('No passed training configuration so dataloader cannot be retrieved.')
loader = DataLoader(self.train_dataset, batch_size=self.train_local_batch_size, collate_fn=self.train_collate_fn, sampler=self.train_clip_sampler, **self.train.loader)
return loader
def val_dataloader(self) -> DataLoader:
if (self.val is None):
raise RuntimeError('No passed validation configuration so dataloader cannot be retrieved.')
loader = DataLoader(self.val_dataset, batch_size=self.val_local_batch_size, collate_fn=self.val_collate_fn, sampler=self.val_clip_sampler, **self.val.loader)
return loader
def test_dataloader(self) -> DataLoader:
if (self.test is None):
raise RuntimeError('No passed testing configuration so dataloader cannot be retrieved.')
loader = DataLoader(self.test_dataset, batch_size=self.test_local_batch_size, collate_fn=self.test_collate_fn, sampler=self.test_clip_sampler, **self.test.loader)
return loader
|
class ImageSoccerNetDataModule(SoccerNetDataModule):
'Base datamodule for the SoccerNet image datasets.\n\n Args:\n datadir: Path to the data (eg: csv, folder, ...).\n train: Configuration for the training data to define the loading of data, the transforms and the dataloader.\n val: Configuration for the validation data to define the loading of data, the transforms and the dataloader.\n test: Configuration for the testing data to define the loading of data, the transforms and the dataloader.\n video_path_prefix: Path to root directory where the videos are stored. All the video paths before loading are prefixed with this path.\n decoder: Defines which backend should be used to decode videos.\n decoder_args: Arguments to configure the decoder.\n '
def __init__(self, datadir: str, train: Optional[DictConfig]=None, val: Optional[DictConfig]=None, test: Optional[DictConfig]=None, video_path_prefix: str='', decoder: str='frame', decoder_args: DictConfig={}) -> None:
super().__init__(datadir=datadir, train=train, val=val, test=test, video_path_prefix=video_path_prefix, decoder=decoder, decoder_args=decoder_args)
self.train_clip_sampler = None
self.val_clip_sampler = None
self.test_clip_sampler = None
@property
def train_num_samples(self) -> int:
'Number of samples in the training dataset.'
if (type(self.train_clip_sampler) is SoccerNetClipSamplerDistributedSamplerWrapper):
return len(self.train_clip_sampler.dataset)
return (len(self.train_clip_sampler) if self.train_clip_sampler else 0)
@property
def val_num_samples(self) -> int:
'Number of samples in the validation dataset.'
if (type(self.val_clip_sampler) is SoccerNetClipSamplerDistributedSamplerWrapper):
return len(self.val_clip_sampler.dataset)
return (len(self.val_clip_sampler) if self.val_clip_sampler else 0)
@property
def test_num_samples(self) -> int:
'Number of samples in the testing dataset.'
if (type(self.test_clip_sampler) is SoccerNetClipSamplerDistributedSamplerWrapper):
return len(self.test_clip_sampler.dataset)
return (len(self.test_clip_sampler) if self.test_clip_sampler else 0)
def setup(self, stage: Optional[str]=None) -> None:
if (stage == 'fit'):
if (self.train is None):
raise RuntimeError('No training configuration has been passed.')
self.train_transform = hydra.utils.instantiate(self.train.transform)
rank_zero_info(f'Train transform: {self.train_transform}')
self.train_dataset = image_soccernet_dataset(self.traindir, transform=self.train_transform, video_path_prefix=self.train_video_path_prefix, decoder=self.train_decoder, decoder_args=self.train_decoder_args)
self.train_clip_sampler = hydra.utils.instantiate(self.train.clip_sampler, data_source=self.train_dataset)
if (dist.is_available() and dist.is_initialized()):
self.train_clip_sampler = SoccerNetClipSamplerDistributedSamplerWrapper(self.train_clip_sampler)
rank_zero_info(f'Use {self.train_clip_sampler} for train sampler, make sure you correctly configured the sampler.')
if (self.val is not None):
self.val_transform = hydra.utils.instantiate(self.val.transform)
rank_zero_info(f'Val transform: {self.val_transform}')
self.val_dataset = image_soccernet_dataset(self.valdir, transform=self.val_transform, video_path_prefix=self.val_video_path_prefix, decoder=self.val_decoder, decoder_args=self.val_decoder_args)
self.val_clip_sampler = hydra.utils.instantiate(self.val.clip_sampler, data_source=self.val_dataset)
if (dist.is_available() and dist.is_initialized()):
self.val_clip_sampler = SoccerNetClipSamplerDistributedSamplerWrapper(self.val_clip_sampler)
rank_zero_info(f'Use {self.val_clip_sampler} for val sampler, make sure you correctly configured the sampler.')
elif (stage == 'test'):
if (self.test is None):
raise RuntimeError('No testing configuration has been passed.')
self.test_transform = hydra.utils.instantiate(self.test.transform)
rank_zero_info(f'Test transform: {self.test_transform}')
self.test_dataset = image_soccernet_dataset(self.testdir, transform=self.test_transform, video_path_prefix=self.test_video_path_prefix, decoder=self.test_decoder, decoder_args=self.test_decoder_args)
self.test_clip_sampler = hydra.utils.instantiate(self.test.clip_sampler, data_source=self.test_dataset)
if (dist.is_available() and dist.is_initialized()):
self.test_clip_sampler = SoccerNetClipSamplerDistributedSamplerWrapper(self.test_clip_sampler)
rank_zero_info(f'Use {self.test_clip_sampler} for test sampler, make sure you correctly configured the sampler.')
|
class SpotDataModule(VideoBaseDataModule, ABC):
'Base datamodule for the SoccerNet datasets.\n\n Args:\n datadir: Path to the data (eg: csv, folder, ...).\n train: Configuration for the training data to define the loading of data, the transforms and the dataloader.\n val: Configuration for the validation data to define the loading of data, the transforms and the dataloader.\n test: Configuration for the testing data to define the loading of data, the transforms and the dataloader.\n video_path_prefix: Path to root directory where the videos are stored. All the video paths before loading are prefixed with this path.\n decoder: Defines which backend should be used to decode videos.\n decoder_args: Arguments to configure the decoder.\n '
def __init__(self, datadir: str, train: Optional[DictConfig]=None, val: Optional[DictConfig]=None, test: Optional[DictConfig]=None, video_path_prefix: str='', decoder: str='frame', decoder_args: DictConfig={}) -> None:
super().__init__(datadir=datadir, train=train, val=val, test=test, video_path_prefix=video_path_prefix, decoder=decoder, decode_audio=False, decoder_args=decoder_args)
self.train_clip_sampler = None
self.val_clip_sampler = None
self.test_clip_sampler = None
@property
def num_classes(self) -> int:
return (- 1)
def prepare_data(self) -> None:
super().prepare_data()
@property
def train_num_samples(self) -> int:
'Number of samples in the training dataset.'
if (type(self.train_clip_sampler) is SpotClipSamplerDistributedSamplerWrapper):
return len(self.train_clip_sampler.dataset)
return (len(self.train_clip_sampler) if self.train_clip_sampler else 0)
@property
def val_num_samples(self) -> int:
'Number of samples in the validation dataset.'
if (type(self.val_clip_sampler) is SpotClipSamplerDistributedSamplerWrapper):
return len(self.val_clip_sampler.dataset)
return (len(self.val_clip_sampler) if self.val_clip_sampler else 0)
@property
def test_num_samples(self) -> int:
'Number of samples in the testing dataset.'
if (type(self.test_clip_sampler) is SpotClipSamplerDistributedSamplerWrapper):
return len(self.test_clip_sampler.dataset)
return (len(self.test_clip_sampler) if self.test_clip_sampler else 0)
def setup(self, stage: Optional[str]=None) -> None:
if (stage == 'fit'):
if (self.train is None):
raise RuntimeError('No training configuration has been passed.')
self.train_transform = hydra.utils.instantiate(self.train.transform)
rank_zero_info(f'Train transform: {self.train_transform}')
self.train_dataset = spot_dataset(self.traindir, transform=self.train_transform, video_path_prefix=self.train_video_path_prefix, decoder=self.train_decoder, decoder_args=self.train_decoder_args, label_args=self.train.dataset.get('label_args', {}), features_args=self.train.dataset.get('feature_args', None), dataset=self.train.dataset.get('dataset', SpotDatasets.TENNIS))
self.train_clip_sampler = hydra.utils.instantiate(self.train.clip_sampler, data_source=self.train_dataset)
if (dist.is_available() and dist.is_initialized()):
self.train_clip_sampler = SpotClipSamplerDistributedSamplerWrapper(self.train_clip_sampler)
rank_zero_info(f'Use {self.train_clip_sampler} for train sampler, make sure you correctly configured the sampler.')
if (self.val is not None):
self.val_transform = hydra.utils.instantiate(self.val.transform)
rank_zero_info(f'Val transform: {self.val_transform}')
self.val_dataset = spot_dataset(self.valdir, transform=self.val_transform, video_path_prefix=self.val_video_path_prefix, decoder=self.val_decoder, decoder_args=self.val_decoder_args, label_args=self.val.dataset.get('label_args', {}), features_args=self.val.dataset.get('feature_args', None), dataset=self.val.dataset.get('dataset', SpotDatasets.TENNIS))
self.val_clip_sampler = hydra.utils.instantiate(self.val.clip_sampler, data_source=self.val_dataset)
if (dist.is_available() and dist.is_initialized()):
self.val_clip_sampler = SpotClipSamplerDistributedSamplerWrapper(self.val_clip_sampler)
rank_zero_info(f'Use {self.val_clip_sampler} for val sampler, make sure you correctly configured the sampler.')
elif (stage == 'test'):
if (self.test is None):
raise RuntimeError('No testing configuration has been passed.')
self.test_transform = hydra.utils.instantiate(self.test.transform)
rank_zero_info(f'Test transform: {self.test_transform}')
self.test_dataset = spot_dataset(self.testdir, transform=self.test_transform, video_path_prefix=self.test_video_path_prefix, decoder=self.test_decoder, decoder_args=self.test_decoder_args, label_args=self.test.dataset.get('label_args', {}), features_args=self.test.dataset.get('feature_args', None), dataset=self.test.dataset.get('dataset', SpotDatasets.TENNIS))
self.test_clip_sampler = hydra.utils.instantiate(self.test.clip_sampler, data_source=self.test_dataset)
if (dist.is_available() and dist.is_initialized()):
self.test_clip_sampler = SpotClipSamplerDistributedSamplerWrapper(self.test_clip_sampler)
rank_zero_info(f'Use {self.test_clip_sampler} for test sampler, make sure you correctly configured the sampler.')
def train_dataloader(self) -> DataLoader:
if (self.train is None):
raise RuntimeError('No passed training configuration so dataloader cannot be retrieved.')
loader = DataLoader(self.train_dataset, batch_size=self.train_local_batch_size, collate_fn=self.train_collate_fn, sampler=self.train_clip_sampler, **self.train.loader)
return loader
def val_dataloader(self) -> DataLoader:
if (self.val is None):
raise RuntimeError('No passed validation configuration so dataloader cannot be retrieved.')
loader = DataLoader(self.val_dataset, batch_size=self.val_local_batch_size, collate_fn=self.val_collate_fn, sampler=self.val_clip_sampler, **self.val.loader)
return loader
def test_dataloader(self) -> DataLoader:
if (self.test is None):
raise RuntimeError('No passed testing configuration so dataloader cannot be retrieved.')
loader = DataLoader(self.test_dataset, batch_size=self.test_local_batch_size, collate_fn=self.test_collate_fn, sampler=self.test_clip_sampler, **self.test.loader)
return loader
|
class STL10DataModule(BaseDataModule):
'Datamodule for the STL10 dataset in SSL setting.\n\n Args:\n datadir: Where to save/load the data.\n train: Configuration for the training data to define the loading of data, the transforms and the dataloader.\n val: Configuration for the validation data to define the loading of data, the transforms and the dataloader.\n test: Configuration for the testing data to define the loading of data, the transforms and the dataloader.\n folds: One of :math:`{0-9}` or ``None``. For training, loads one of the 10 pre-defined folds of 1k samples for the standard evaluation procedure. If no value is passed, loads the 5k samples.\n training_split: Split used for the training dataset.\n '
def __init__(self, datadir: str, train: Optional[DictConfig]=None, val: Optional[DictConfig]=None, test: Optional[DictConfig]=None, folds: Optional[int]=None, training_split: str='unlabeled') -> None:
super().__init__(datadir=datadir, train=train, val=val, test=test)
self.folds = folds
self.training_split = training_split
@property
def num_classes(self) -> int:
'Number of classeS.'
return 10
def prepare_data(self):
if (self.train is not None):
STL10(self.datadir, split='unlabeled', download=True, transform=transforms.PILToTensor())
STL10(self.datadir, folds=self.folds, split='train', download=True, transform=transforms.PILToTensor())
if ((self.val is not None) or (self.test is not None)):
STL10(self.datadir, split='test', download=True, transform=transforms.PILToTensor())
def setup(self, stage: Optional[str]=None):
if (stage == 'fit'):
assert (self.training_split in ['train', 'train+unlabeled', 'unlabeled'])
self.train_transform = hydra.utils.instantiate(self.train.transform)
self.train_dataset = DictDataset(STL10(self.datadir, folds=self.folds, split=self.training_split, download=False, transform=self.train_transform))
if (self.val is not None):
self.val_transform = hydra.utils.instantiate(self.val.transform)
self.val_dataset = DictDataset(STL10(self.datadir, split='test', download=False, transform=self.val_transform))
elif (stage == 'test'):
self.test_transform = hydra.utils.instantiate(self.test.transform)
self.test_dataset = DictDataset(STL10(self.datadir, split='test', transform=self.test_transform))
|
class TinyImagenetDataModule(ImagenetDataModule):
'Base datamodule for the Tiny Imagenet dataset.\n\n Args:\n datadir: Where to load the data.\n train: Configuration for the training data to define the loading of data, the transforms and the dataloader.\n val: Configuration for the validation data to define the loading of data, the transforms and the dataloader.\n test: Configuration for the testing data to define the loading of data, the transforms and the dataloader.\n '
def __init__(self, datadir: str, train: Optional[DictConfig]=None, val: Optional[DictConfig]=None, test: Optional[DictConfig]=None) -> None:
super().__init__(datadir=datadir, train=train, val=val, test=test)
@property
def num_classes(self) -> int:
'Number of classes.'
return 200
|
class Ucf101DataModule(VideoBaseDataModule):
'Datamodule for the HMDB51 dataset.\n\n Args:\n datadir: Path to the data (eg: csv, folder, ...).\n train: Configuration for the training data to define the loading of data, the transforms and the dataloader.\n val: Configuration for the validation data to define the loading of data, the transforms and the dataloader.\n test: Configuration for the testing data to define the loading of data, the transforms and the dataloader.\n video_path_prefix: Path to root directory where the videos are stored. All the video paths before loading are prefixed with this path.\n decode_audio: If ``True``, decode audio.\n decoder: Defines which backend should be used to decode videos.\n decoder_args: Arguments to configure the decoder.\n split_id: Split used for training and testing.\n '
def __init__(self, datadir: str, train: Optional[DictConfig]=None, val: Optional[DictConfig]=None, test: Optional[DictConfig]=None, video_path_prefix: str='', decode_audio: bool=False, decoder: str='pyav', decoder_args: DictConfig={}, split_id: int=1) -> None:
super().__init__(datadir=datadir, train=train, val=val, test=test, video_path_prefix=video_path_prefix, decode_audio=decode_audio, decoder=decoder, decoder_args=decoder_args)
self.split_id = split_id
@property
def num_classes(self) -> int:
'Number of classes.'
return 101
def _verify_classes(self) -> None:
dirs = [dir.stem for dir in self.datadir.iterdir() if dir.is_dir()]
assert (len(dirs) == self.num_classes), f'{len(dirs)}/{self.num_classes} classes found: {dirs}'
def prepare_data(self) -> None:
self._verify_classes()
def setup(self, stage: Optional[str]=None) -> None:
if (stage == 'fit'):
if (self.train is None):
raise RuntimeError('No training configuration has been passed.')
self.train_transform = hydra.utils.instantiate(self.train.transform)
self.train_clip_sampler = hydra.utils.instantiate(self.train.clip_sampler)
rank_zero_info(f'Train transform: {self.train_transform}')
self.train_dataset = Ucf101(self.traindir, clip_sampler=self.train_clip_sampler, transform=self.train_transform, video_path_prefix=self.train_video_path_prefix, split_id=self.split_id, split_type='train', decode_audio=self.decode_audio, decoder=self.train_decoder, decoder_args=self.train_decoder_args)
if (self.val is not None):
self.val_transform = hydra.utils.instantiate(self.val.transform)
self.val_clip_sampler = hydra.utils.instantiate(self.val.clip_sampler)
rank_zero_info(f'Val transform: {self.val_transform}')
self.val_dataset = Ucf101(self.valdir, clip_sampler=self.val_clip_sampler, transform=self.val_transform, video_path_prefix=self.val_video_path_prefix, split_id=self.split_id, split_type='test', decode_audio=self.decode_audio, decoder=self.val_decoder, decoder_args=self.val_decoder_args)
elif (stage == 'test'):
if (self.test is None):
raise RuntimeError('No testing configuration has been passed.')
self.test_transform = hydra.utils.instantiate(self.test.transform)
self.test_clip_sampler = hydra.utils.instantiate(self.test.clip_sampler)
rank_zero_info(f'Test transform: {self.test_transform}')
self.test_dataset = Ucf101(self.testdir, clip_sampler=self.test_clip_sampler, transform=self.test_transform, video_path_prefix=self.test_video_path_prefix, split_id=self.split_id, split_type='test', decode_audio=self.decode_audio, decoder=self.test_decoder, decoder_args=self.test_decoder_args)
|
class VideoBaseDataModule(BaseDataModule, ABC):
"Abstract class that inherits from BaseDataModule to follow standardized preprocessing for video datamodules.\n\n Args:\n datadir: Path to the data (eg: csv, folder, ...).\n train: Configuration for the training data to define the loading of data, the transforms and the dataloader.\n val Configuration for the validation data to define the loading of data, the transforms and the dataloader.\n test: Configuration for the testing data to define the loading of data, the transforms and the dataloader.\n video_path_prefix: Path to root directory where the videos are stored. All the video paths before loading are prefixed with this path.\n decode_audio: If ``True``, decode audio.\n decoder: Defines which backend should be used to decode videos by default.\n decoder_args: Arguments to configure the default decoder.\n\n .. warning::\n The loader subconfigurations must not contain 'batch_size' that is automatically computed from the 'global_batch_size' specified in the configuration.\n "
def __init__(self, datadir: str, train: Optional[DictConfig]=None, val: Optional[DictConfig]=None, test: Optional[DictConfig]=None, video_path_prefix: str='', decode_audio: bool=False, decoder: str='pyav', decoder_args: DictConfig={}) -> None:
super().__init__(datadir=datadir, train=train, val=val, test=test)
self.datadir = Path(datadir)
self.video_path_prefix = video_path_prefix
self.decode_audio = decode_audio
self.decoder = decoder
self.decoder_args = dict(decoder_args)
_instantiate_decoder_args(self.decoder_args, 'default')
if (train is not None):
train_decoder = train.get('decoder', None)
train_decoder_args = train.get('decoder_args', None)
self.train_decoder = (train_decoder if (train_decoder is not None) else self.decoder)
if (train_decoder_args is not None):
self.train_decoder_args = dict(train_decoder_args)
_instantiate_decoder_args(self.train_decoder_args, 'train')
else:
self.train_decoder_args = self.decoder_args
self.train_video_path_prefix = (self.train.dataset.video_path_prefix if (self.train.get('dataset') and self.train.dataset.get('video_path_prefix')) else self.video_path_prefix)
if (val is not None):
val_decoder = val.get('decoder', None)
val_decoder_args = val.get('decoder_args', None)
self.val_decoder = (val_decoder if (val_decoder is not None) else self.decoder)
if (val_decoder_args is not None):
self.val_decoder_args = dict(val_decoder_args)
_instantiate_decoder_args(self.val_decoder_args, 'val')
else:
self.val_decoder_args = self.decoder_args
self.val_video_path_prefix = (self.val.dataset.video_path_prefix if (self.val.get('dataset') and self.val.dataset.get('video_path_prefix')) else self.video_path_prefix)
if (test is not None):
test_decoder = test.get('decoder', None)
test_decoder_args = test.get('decoder_args', None)
self.test_decoder = (test_decoder if (test_decoder is not None) else self.decoder)
if (test_decoder_args is not None):
self.test_decoder_args = dict(test_decoder_args)
_instantiate_decoder_args(self.test_decoder_args, 'test')
else:
self.test_decoder_args = self.decoder_args
self.test_video_path_prefix = (self.test.dataset.video_path_prefix if (self.test.get('dataset') and self.test.dataset.get('video_path_prefix')) else self.video_path_prefix)
|
def _instantiate_decoder_args(decoder_args: (Dict | None), split: str='train') -> None:
for (key, value) in decoder_args.items():
if ((key == 'frame_filter') and (not callable(value))):
decoder_args[key] = get_subsample_fn(decoder_args['frame_filter']['subsample_type'], decoder_args['frame_filter']['num_samples'])
elif ((key == 'transform') and (not callable(value))):
decoder_args[key] = hydra.utils.instantiate(decoder_args[key])
rank_zero_info(f'Decoder {split} transform: {decoder_args[key]}')
|
class ConstantClipsPerVideoSampler(ClipSampler):
'Evenly splits the video into clips_per_video increments and samples clips of size clip_duration at these\n increments.\n\n Args:\n clip_duration: Duration of a clip.\n clips_per_video: Number of temporal clips to sample per video.\n augs_per_clip: Number of augmentations to be applied on each clip.\n '
def __init__(self, clip_duration: float, clips_per_video: int, augs_per_clip: int=1) -> None:
super().__init__(clip_duration)
self._clips_per_video = clips_per_video
self._augs_per_clip = augs_per_clip
def __call__(self, last_clip_time: float, video_duration: float, annotation: Dict[(str, Any)]) -> ClipInfo:
"\n Args:\n last_clip_time: Not used for ConstantClipsPerVideoSampler.\n video_duration: the duration (in seconds) for the video that's\n being sampled.\n annotation: Not used by this sampler.\n\n Returns:\n the clip information composed of (clip_start_time,\n clip_end_time, clip_index, aug_index, is_last_clip). The times are in seconds.\n is_last_clip is True after clips_per_video clips have been sampled or the end\n of the video is reached.\n "
max_possible_clip_start = Fraction(max((video_duration - self._clip_duration), 0))
uniform_clip = Fraction(max_possible_clip_start, max((self._clips_per_video - 1), 1))
clip_start_sec = (uniform_clip * self._current_clip_index)
clip_index = self._current_clip_index
aug_index = self._current_aug_index
self._current_aug_index += 1
if (self._current_aug_index >= self._augs_per_clip):
self._current_clip_index += 1
self._current_aug_index = 0
is_last_clip = False
if ((self._current_clip_index >= self._clips_per_video) or ((uniform_clip * self._current_clip_index) > max_possible_clip_start)):
self._current_clip_index = 0
is_last_clip = True
if is_last_clip:
self.reset()
return ClipInfo(clip_start_sec, (clip_start_sec + self._clip_duration), clip_index, aug_index, is_last_clip)
def reset(self):
self._current_clip_index = 0
self._current_aug_index = 0
|
class ConstantClipsWithHalfOverlapPerVideoClipSampler(ClipSampler):
'Evenly splits the video into clips_per_video increments and samples clips of size clip_duration at these\n increments.\n\n Args:\n clip_duration: Duration of a clip.\n augs_per_clip: Number of augmentations to be applied on each clip.\n '
def __init__(self, clip_duration: float, augs_per_clip: int=1) -> None:
super().__init__(clip_duration)
self._augs_per_clip = augs_per_clip
def __call__(self, last_clip_time: float, video_duration: float, annotation: Dict[(str, Any)]) -> ClipInfo:
"\n Args:\n last_clip_time: Not used for ConstantClipsPerVideoSampler.\n video_duration:: the duration (in seconds) for the video that's\n being sampled.\n annotation: Not used by this sampler.\n\n Returns:\n the clip information composed of (clip_start_time,\n clip_end_time, clip_index, aug_index, is_last_clip). The times are in seconds.\n is_last_clip is True after clips_per_video clips have been sampled or the end\n of the video is reached.\n "
num_clips = max((((video_duration // self._clip_duration) * 2) - 1), 0)
clip_start_sec = (Fraction(self._clip_duration, 2) * self._current_clip_index)
clip_index = self._current_clip_index
aug_index = self._current_aug_index
self._current_aug_index += 1
if (self._current_aug_index >= self._augs_per_clip):
self._current_clip_index += 1
self._current_aug_index = 0
is_last_clip = False
if (self._current_clip_index >= num_clips):
self._current_clip_index = 0
is_last_clip = True
if is_last_clip:
self.reset()
return ClipInfo(clip_start_sec, (clip_start_sec + self._clip_duration), clip_index, aug_index, is_last_clip)
def reset(self):
self._current_clip_index = 0
self._current_aug_index = 0
|
class MinimumFullCoverageClipSampler(ClipSampler):
'Find the minmimum number of clips to cover the full video.\n\n Args:\n clip_duration: Duration of a clip.\n augs_per_clip: Number of augmentations to be applied on each clip.\n '
def __init__(self, clip_duration: float, augs_per_clip: int=1) -> None:
super().__init__(clip_duration)
self._augs_per_clip = augs_per_clip
def __call__(self, last_clip_time: float, video_duration: float, annotation: Dict[(str, Any)]) -> ClipInfo:
"\n Args:\n last_clip_time: Not used.\n video_duration:: The duration (in seconds) for the video that's\n being sampled.\n annotation: Not used by this sampler.\n Returns:\n includes the clip information of (clip_start_time,\n clip_end_time, clip_index, aug_index, is_last_clip). The times are in seconds.\n ``is_last_clip`` is ``True`` after the end of the video is reached.\n\n "
max_possible_clip_start = Fraction(max((video_duration - self._clip_duration), 0))
clips_per_video = ceil(Fraction((video_duration / self._clip_duration)))
uniform_clip = Fraction(max_possible_clip_start, max((clips_per_video - 1), 1))
clip_start_sec = (uniform_clip * self._current_clip_index)
clip_index = self._current_clip_index
aug_index = self._current_aug_index
self._current_aug_index += 1
if (self._current_aug_index >= self._augs_per_clip):
self._current_clip_index += 1
self._current_aug_index = 0
is_last_clip = False
if ((self._current_clip_index >= clips_per_video) or ((uniform_clip * self._current_clip_index) > max_possible_clip_start)):
self._current_clip_index = 0
is_last_clip = True
if is_last_clip:
self.reset()
return ClipInfo(clip_start_sec, (clip_start_sec + self._clip_duration), clip_index, aug_index, is_last_clip)
def reset(self):
self._current_clip_index = 0
self._current_aug_index = 0
|
def compute_jittered_speed(factor: float, speed: int) -> float:
'Compute jittered speed.\n\n Args:\n factor: The jittering factor.\n speed: The speed to jitter.\n\n Returns:\n float: the jitter speed.\n '
min_speed = (speed * (1 - factor))
max_speed = (speed * (1 + factor))
jittered_speed = np.random.uniform(min_speed, max_speed)
return jittered_speed
|
class RandomClipSampler(ClipSampler):
'Randomly samples clip of size clip_duration from the videos.\n\n Args:\n clip_duration: Duration of a clip.\n speeds: If not ``None``, the list of speeds to randomly apply on clip duration. At each call, :math:`clip\\_duration *= choice(speeds)`.\n jitter_factor: The jitter factor bound to apply on clip duration. At each call, :math:`clip\\_duration *= (1 + \\pm rand(0, jitter\\_factor))`.\n '
def __init__(self, clip_duration: Union[(float, Fraction)], speeds: Optional[List[int]]=None, jitter_factor: float=0.0) -> None:
super().__init__(clip_duration)
self._speeds = speeds
if ((self._speeds is not None) and (len(self._speeds) == 1) and (self._speeds[0] == 1)):
self._speeds = None
self._jitter_factor = jitter_factor
def __call__(self, last_clip_time: float, video_duration: float, annotation: Dict[(str, Any)]) -> ClipInfo:
if (self._speeds is not None):
speed = np.random.choice(self._speeds)
speed_fraction = Fraction(speed)
clip_duration = Fraction(self._clip_duration, Fraction(1, speed_fraction))
else:
clip_duration = self._clip_duration
if (self._jitter_factor != 0.0):
jittered_speed = compute_jittered_speed(self._jitter_factor, 1)
jittered_speed_fraction = Fraction(jittered_speed)
clip_duration = Fraction(clip_duration, Fraction(1, jittered_speed_fraction))
max_possible_clip_start = max((video_duration - clip_duration), 0)
clip_start_sec = Fraction(random.uniform(0, max_possible_clip_start))
return ClipInfo(clip_start_sec, (clip_start_sec + clip_duration), 0, 0, True)
|
class RandomMultiClipSampler(RandomClipSampler):
'Randomly samples multiple clip of size clip_duration from the videos.\n\n Args:\n clip_duration: Duration of a clip.\n num_clips: Number of clips to sample.\n speeds: If not ``None``, the list of speeds to randomly apply on clip duration. At each call, :math:`clip\\_duration *= choice(speeds)`.\n jitter_factor: The jitter factor bound to apply on clip duration. At each call, :math:`clip\\_duration *= (1 + \\pm rand(0, jitter\\_factor))`.\n '
def __init__(self, clip_duration: float, num_clips: int, speeds: Optional[List[int]]=None, jitter_factor: float=0.0) -> None:
super().__init__(clip_duration, speeds=speeds, jitter_factor=jitter_factor)
self._num_clips = num_clips
def __call__(self, last_clip_time: float, video_duration: float, annotation: Dict[(str, Any)]) -> ClipInfoList:
(clip_start_list, clip_end_list, clip_index_list, aug_index_list, is_last_clip_list) = ((self._num_clips * [None]), (self._num_clips * [None]), (self._num_clips * [None]), (self._num_clips * [None]), (self._num_clips * [None]))
for i in range(self._num_clips):
(clip_start_list[i], clip_end_list[i], clip_index_list[i], aug_index_list[i], is_last_clip_list[i]) = super().__call__(last_clip_time, video_duration, annotation)
return ClipInfoList(clip_start_list, clip_end_list, clip_index_list, aug_index_list, is_last_clip_list)
|
class RandomCVRLSampler(ClipSampler):
'Randomly samples two clip of size clip_duration from the videos. The second clip is sampled after the first\n one following a Power law for the starting time.\n\n References:\n - https://arxiv.org/abs/2008.03800\n\n Args:\n clip_duration: Duration of a clip.\n speeds: If not ``None``, the list of speeds to randomly apply on clip duration. At each call, :math:`clip\\_duration *= choice(speeds)`.\n jitter_factor: The jitter factor bound to apply on clip duration. At each call, :math:`clip\\_duration *= (1 + \\pm rand(0, jitter\\_factor))`.\n shuffle: If ``True``, shuffle the clip order for the output.\n power_cdf: Power coefficient for the power law.\n decreasing_cdf: Whether the power law curve is ascending or descending.\n '
def __init__(self, clip_duration: Union[(float, Fraction)], speeds: Optional[List[int]]=None, jitter_factor: float=0.0, shuffle: bool=True, power_cdf: float=1.0, decreasing_cdf: bool=True) -> None:
super().__init__(clip_duration)
self._speeds = speeds
if ((self._speeds is not None) and (len(self._speeds) == 1) and (self._speeds[0] == 1)):
self._speeds = None
self._jitter_factor = jitter_factor
self._shuffle = shuffle
self._power_cdf = power_cdf
self._decreasing_cdf = decreasing_cdf
def __call__(self, last_clip_time: float, video_duration: float, annotation: Dict[(str, Any)]) -> ClipInfo:
if (self._speeds is not None):
speed = np.random.choice(self._speeds)
speed_fraction = Fraction(speed)
clip_duration = Fraction(self._clip_duration, Fraction(1, speed_fraction))
else:
clip_duration = self._clip_duration
if (self._jitter_factor != 0.0):
jittered_speed = compute_jittered_speed(self._jitter_factor, 1)
jittered_speed_fraction = Fraction(jittered_speed)
clip_duration = Fraction(clip_duration, Fraction(1, jittered_speed_fraction))
if (video_duration <= clip_duration):
start = Fraction(0)
end = (Fraction(video_duration) + clip_duration)
return ClipInfoList([start, start], [end, end], [0, 0], [0, 0], [True, True])
else:
max_start = float((video_duration - clip_duration))
max_start_tensor = torch.tensor(max_start)
max_start_fraction = Fraction(max_start)
def cdf(k: Tensor, power: float=1.0):
if self._decreasing_cdf:
p = (((- torch.pow(k, (power + 1))) / (power * torch.pow(max_start_tensor, (power + 1)))) + ((k * (power + 1)) / (power * max_start_tensor)))
else:
p = ((torch.pow(k, (power + 1)) / (power * torch.pow(max_start_tensor, (power + 1)))) + ((k * (power + 1)) / (power * max_start_tensor)))
return p
u = torch.rand(1)
k_low = Fraction(0)
k_up = max_start_fraction
two = Fraction(2.0)
k = Fraction(max_start_fraction, two)
while (abs((k_up - k_low)) > 0.001):
k = Fraction((k_up + k_low), two)
if (cdf(torch.tensor(float(k)), self._power_cdf) > u):
k_up = k
else:
k_low = k
max_start_1 = (max_start_fraction - k)
start_1 = (Fraction(float(torch.rand(1))) * max_start_1)
start_2 = (start_1 + k)
keep_order = (random.randint(0, 1) if self._shuffle else 1)
if keep_order:
starts = [start_1, start_2]
ends = [(start_1 + clip_duration), (start_2 + clip_duration)]
else:
starts = [start_2, start_1]
ends = [(start_2 + clip_duration), (start_1 + clip_duration)]
return ClipInfoList(starts, ends, [0, 0], [0, 0], [True, True])
|
class ActionWindowSoccerNetClipSampler(SoccerNetClipSampler):
'Sampler windows randomly around actions in SoccerNet videos.\n\n Args:\n data_source: SoccerNet dataset.\n window_duration: Duration of a window.\n offset_action: Minimum offset before and after the action.\n shuffle: Whether to shuffle indices.\n '
def __init__(self, data_source: SoccerNet, window_duration: float=32.0, offset_action: float=0.0, shuffle: bool=False) -> None:
super().__init__(data_source, shuffle=shuffle)
self.window_duration = window_duration
self.offset_action = offset_action
self.num_actions = self.data_source._annotated_videos.num_actions
def __iter__(self) -> List[Any]:
g = torch.Generator()
g.manual_seed((self.seed + self.epoch))
indices = [None for i in range(self.num_actions)]
global_idx = 0
for idx in range(len(self.data_source)):
video_metadata = self.data_source.get_video_metadata(idx)
for half_idx in range(video_metadata['num_halves']):
for position in video_metadata['annotations'][half_idx]['position']:
if (position <= self.offset_action):
min_clip_start_sec = 0
max_clip_start_sec = 0
elif (position >= (video_metadata['duration'][half_idx] - self.offset_action)):
min_clip_start_sec = (video_metadata['duration'][half_idx] - self.window_duration)
max_clip_start_sec = (video_metadata['duration'][half_idx] - self.window_duration)
else:
min_clip_start_sec = max(floor(((position - self.window_duration) + self.offset_action)), 0)
max_clip_start_sec = max(min((position - self.offset_action), (video_metadata['duration'][half_idx] - self.window_duration)), 0)
clip_start_sec = float((((min_clip_start_sec - max_clip_start_sec) * torch.rand(1, generator=g)) + max_clip_start_sec))
clip_end_sec = (clip_start_sec + self.window_duration)
indices[global_idx] = (idx, half_idx, clip_start_sec, clip_end_sec)
global_idx += 1
if self._shuffle:
indices = [indices[idx] for idx in torch.randperm(len(indices), generator=g)]
return iter(indices)
def __len__(self) -> int:
return self.num_actions
def __repr__(self) -> str:
return f'{__class__.__name__}(len={self.__len__()}, window_duration={self.window_duration}, offset_action={self.offset_action}, shuffle={self._shuffle}, seed={self.seed})'
|
class _DatasetSamplerWrapper(Dataset):
'Dataset to create indexes from `SoccerNetClipSampler`.'
def __init__(self, sampler: SoccerNetClipSampler) -> None:
self._sampler = sampler
self._sampler_list: Optional[List[Any]] = None
def __getitem__(self, index: int) -> Any:
if (self._sampler_list is None):
self._sampler_list = list(self._sampler)
return self._sampler_list[index]
def __len__(self) -> int:
return len(self._sampler)
def set_epoch(self, epoch: int) -> None:
self._sampler.set_epoch(epoch)
return
def reset(self) -> None:
'Reset the sampler list in order to get new sampling.'
self._sampler_list = list(self._sampler)
def __repr__(self) -> str:
return str(self._sampler)
|
class SoccerNetClipSamplerDistributedSamplerWrapper(DistributedSampler):
"Wrapper over ``Sampler`` for distributed training.\n\n Note:\n The purpose of this wrapper is to take care of sharding the sampler indices. It is up to the underlying\n sampler to handle randomness and shuffling. The ``shuffle`` and ``seed`` arguments on this wrapper won't\n have any effect.\n "
def __init__(self, sampler: SoccerNetClipSampler, *args: Any, **kwargs: Any) -> None:
shuffle = sampler.shuffle
sampler.set_shuffle(False)
super().__init__(_DatasetSamplerWrapper(sampler), *args, seed=sampler.seed, shuffle=shuffle, **kwargs)
def __iter__(self) -> Iterator:
self.dataset.reset()
return (self.dataset[index] for index in super().__iter__())
def set_epoch(self, epoch: int) -> None:
super().set_epoch(epoch)
self.dataset.set_epoch(epoch)
return
def __repr__(self) -> str:
return f'{__class__.__name__}(sampler={self.dataset}, shuffle={self.shuffle}, seed={self.seed})'
|
class FeatureExtractionSoccerNetClipSampler(SoccerNetClipSampler):
'Sampler windows that slide across the whole video to extract features at a specified fps.\n\n Args:\n data_source: SoccerNet dataset.\n window_duration: Duration of a window.\n fps: fps to extract features.\n shuffle: Whether to shuffle indices.\n '
def __init__(self, data_source: SoccerNet, window_duration: float=2.56, fps: int=2, shuffle: bool=False) -> None:
super().__init__(data_source, shuffle=shuffle)
self.window_duration = window_duration
self.fps = fps
self._shuffle = shuffle
self.indices = self._precompute_indices()
def _precompute_indices(self) -> List[Any]:
indices = []
frac_fps = Fraction(self.fps)
over_frac_fps = Fraction(1, self.fps)
for i in range(self.data_source.num_videos):
video_metadata = self.data_source.get_video_metadata(i)
for j in range(video_metadata['num_halves']):
start_sec = 0
end_sec = Fraction(Fraction(int((video_metadata['duration'][j] * self.fps))), frac_fps)
all_times = torch.arange(start_sec, float(end_sec), float(over_frac_fps))
start_times = torch.maximum((all_times - (self.window_duration / 2)), torch.tensor(0))
end_times = torch.minimum((all_times + (self.window_duration / 2)), torch.tensor(float(end_sec)))
indices.extend([(i, j, start, end) for (start, end) in zip(start_times, end_times)])
return indices
def __iter__(self) -> List[Any]:
indices = self.indices
if self._shuffle:
g = torch.Generator()
g.manual_seed((self.seed + self.epoch))
indices = [indices[idx] for idx in torch.randperm(len(indices), generator=g)]
return iter(indices)
def __len__(self) -> int:
return len(self.indices)
def __repr__(self) -> str:
return f'{__class__.__name__}(len={self.__len__()}, window_duration={self.window_duration}, fps={self.fps}, shuffle={self._shuffle}, seed={self.seed})'
|
class ImagesSoccerNetClipSampler(SoccerNetClipSampler):
'Sampler of images in an ImageSoccerNet dataset.\n\n Args:\n data_source: SoccerNet dataset.\n images_per_video: Number of images per video to sample.\n shuffle: Whether to shuffle indices.\n '
def __init__(self, data_source: ImageSoccerNet, images_per_video: (int | None)=None, shuffle: bool=False) -> None:
super().__init__(data_source, shuffle=shuffle)
self.images_per_video = images_per_video
assert ((self.images_per_video is None) or ((self.images_per_video % 2) == 0)), 'images_per_video should be divible by 2 to correctly sample in the halves.'
def __iter__(self) -> List[Any]:
g = torch.Generator()
g.manual_seed((self.seed + self.epoch))
indices = [None for i in range(len(self))]
global_idx = 0
for idx in range(self.data_source.num_videos):
video_metadata = self.data_source.get_video_metadata(idx)
for half_idx in range(video_metadata['num_halves']):
num_frames_half = video_metadata['num_frames_fps'][half_idx]
if (self.images_per_video is None):
for i in range(num_frames_half):
indices[global_idx] = (idx, half_idx, i)
global_idx += 1
else:
random_frames = torch.randperm(num_frames_half, generator=g)[:(self.images_per_video // 2)]
random_frames = torch.sort(random_frames)[0].tolist()
for i in random_frames:
indices[global_idx] = (idx, half_idx, i)
global_idx += 1
if self._shuffle:
indices = [indices[idx] for idx in torch.randperm(len(indices), generator=g)]
return iter(indices)
def __len__(self) -> int:
return (len(self.data_source) if (self.images_per_video is None) else (self.images_per_video * self.data_source.num_videos))
def __repr__(self) -> str:
return f'{__class__.__name__}(images_per_video={self.images_per_video}, shuffle={self._shuffle}, seed={self.seed})'
|
class RandomWindowSoccerNetClipSampler(SoccerNetClipSampler):
'Sampler randomly windows in SoccerNet videos.\n\n Args:\n data_source: SoccerNet dataset.\n windows_per_video: Number of windows to sampler per video.\n window_duration: Duration of a window.\n sample_edges: Whether to force the sample of edges in the videos. Useful for kick-offs or last second actions.\n shuffle: Whether to shuffle indices.\n '
def __init__(self, data_source: SoccerNet, windows_per_video: int=50, window_duration: float=32.0, sample_edges: bool=False, shuffle: bool=False) -> None:
super().__init__(data_source, shuffle=shuffle)
assert ((windows_per_video % 2) == 0), 'Windows per video should be an even number.'
self.windows_per_video = windows_per_video
self.windows_per_half = (windows_per_video // 2)
self.window_duration = window_duration
self.sample_edges = sample_edges
self._shuffle = shuffle
def __iter__(self) -> List[Any]:
g = torch.Generator()
g.manual_seed((self.seed + self.epoch))
indices = [None for i in range((len(self.data_source) * self.windows_per_video))]
global_idx = 0
for idx in range(len(self.data_source)):
video_metadata = self.data_source.get_video_metadata(idx)
for half_idx in range(video_metadata['num_halves']):
max_different_windows = (video_metadata['duration'][half_idx] / self.window_duration)
has_overlap = (max_different_windows < self.windows_per_half)
max_possible_clip_start = Fraction(max((video_metadata['duration'][half_idx] - self.window_duration), 0))
if (self.sample_edges and (not has_overlap)):
indices[global_idx] = (idx, half_idx, Fraction(0), Fraction(self.window_duration))
global_idx += 1
max_possible_clip_start -= (self.window_duration * 2)
uniform_clip = (Fraction(max_possible_clip_start, max((self.windows_per_half - 3), 1)) if has_overlap else Fraction((video_metadata['duration'][half_idx] - (self.window_duration * 2)), (self.windows_per_half - 2)))
else:
uniform_clip = (Fraction(max_possible_clip_start, max((self.windows_per_half - 1), 1)) if has_overlap else Fraction(video_metadata['duration'][half_idx], self.windows_per_half))
windows_per_half = ((self.windows_per_half - 2) if (self.sample_edges and (not has_overlap)) else self.windows_per_half)
for i in range(windows_per_half):
if has_overlap:
clip_start_sec = (uniform_clip * i)
else:
clip_start_sec = (((uniform_clip - self.window_duration) * (1 - torch.rand(1, generator=g).item())) + (i * uniform_clip))
if self.sample_edges:
clip_start_sec += self.window_duration
clip_end_sec = (clip_start_sec + self.window_duration)
indices[global_idx] = (idx, half_idx, Fraction(clip_start_sec), Fraction(clip_end_sec))
global_idx += 1
if (self.sample_edges and (not has_overlap)):
indices[global_idx] = (idx, half_idx, Fraction((video_metadata['duration'][half_idx] - self.window_duration)), Fraction(video_metadata['duration'][half_idx]))
global_idx += 1
if self._shuffle:
indices = [indices[idx] for idx in torch.randperm(len(indices), generator=g)]
return iter(indices)
def __len__(self) -> int:
return (len(self.data_source) * self.windows_per_video)
def __repr__(self) -> str:
return f'{__class__.__name__}(len={self.__len__()}, windows_per_video={self.windows_per_video}, window_duration={self.window_duration}, sample_edges={self.sample_edges}, shuffle={self._shuffle}, seed={self.seed})'
|
class SlidingWindowSoccerNetClipSampler(SoccerNetClipSampler):
'Sampler windows that slide across the whole video. Possibility to overlap windows. The last window is always between (half_duration - window_duration, window_duration).\n\n Args:\n data_source: SoccerNet dataset.\n window_duration: Duration of a window.\n overlap_window: Overlap duration between two windows.\n shuffle: Whether to shuffle indices.\n '
def __init__(self, data_source: SoccerNet, window_duration: float=32.0, overlap_window: float=1.0, shuffle: bool=False) -> None:
super().__init__(data_source, shuffle=shuffle)
self.window_duration = window_duration
self.overlap_window = overlap_window
self._shuffle = shuffle
self.correct_window_per_half = ((self.data_source._annotated_videos._duration_per_half - overlap_window) // (window_duration - overlap_window)).to(dtype=torch.int)
self.one_more_window_per_half = (((self.data_source._annotated_videos._duration_per_half - overlap_window) % (window_duration - overlap_window)) > 0).to(dtype=torch.int)
self.total_windows_per_half = (self.correct_window_per_half + self.one_more_window_per_half)
self.total_windows = self.total_windows_per_half.sum(0)
self.window_to_sample = self.total_windows
self._precompute_indices()
def _precompute_indices(self) -> List[Any]:
indices = [None for _ in range(self.total_windows)]
video_idx = 0
half_idx = 0
global_idx = 0
for i in range(len(self.total_windows_per_half)):
for j in range(self.correct_window_per_half[i]):
clip_start_sec = Fraction(float((j * (self.window_duration - self.overlap_window))))
clip_end_sec = (clip_start_sec + self.window_duration)
indices[global_idx] = (video_idx, (half_idx % 2), clip_start_sec, clip_end_sec)
global_idx += 1
if self.one_more_window_per_half[i]:
duration = self.data_source._annotated_videos._duration_per_half[i]
clip_start_sec = Fraction((floor(duration) - self.window_duration))
clip_end_sec = (clip_start_sec + self.window_duration)
indices[global_idx] = (video_idx, (half_idx % 2), clip_start_sec, clip_end_sec)
global_idx += 1
half_idx += 1
if ((half_idx % 2) == 0):
video_idx += 1
self._raw_indices = indices
def __iter__(self) -> List[Any]:
indices = self._raw_indices
if self._shuffle:
g = torch.Generator()
g.manual_seed((self.seed + self.epoch))
if self._shuffle:
indices = [indices[idx] for idx in torch.randperm(len(indices), generator=g)]
return iter(indices)
def __len__(self) -> int:
return self.window_to_sample
def __repr__(self) -> str:
return f'{__class__.__name__}(len={self.__len__()}, window_duration={self.window_duration}, overlap_window={self.overlap_window}, shuffle={self._shuffle}, seed={self.seed})'
|
class SoccerNetClipSampler(Sampler, ABC):
'Base class for SoccerNet clip samplers.\n\n Args:\n data_source: SoccerNet dataset.\n shuffle: Whether to shuffle indices.\n '
def __init__(self, data_source: SoccerNet, shuffle: bool=False) -> None:
super().__init__(data_source)
self.data_source = data_source
self.epoch = 0
self.seed = get_default_seed()
self._shuffle = shuffle
@property
def shuffle(self) -> bool:
return self._shuffle
def set_shuffle(self, shuffle: bool) -> None:
'Set shuffle value.\n\n Args:\n shuffle: Value for shuffle.\n '
self._shuffle = shuffle
def set_epoch(self, epoch: int) -> None:
'Sets the epoch for this sampler.\n\n This ensures that at each epoch the windows are not the same for relevant subclass samplers.\n\n Args:\n epoch: Epoch number.\n '
self.epoch = epoch
|
class UniformWindowSoccerNetClipSampler(SoccerNetClipSampler):
'Sampler uniformly randomly windows in SoccerNet videos.\n\n Args:\n data_source: SoccerNet dataset.\n windows_per_video: Number of windows to sampler per video.\n window_duration: Duration of a window.\n sample_edges: Whether to force the sample of edges in the videos. Useful for kick-offs or last second actions.\n shuffle: Whether to shuffle indices.\n '
def __init__(self, data_source: SoccerNet, windows_per_video: int=50, window_duration: float=32.0, sample_edges: bool=False, shuffle: bool=False) -> None:
super().__init__(data_source, shuffle=shuffle)
assert ((windows_per_video % 2) == 0), 'Windows per video should be an even number.'
self.windows_per_video = windows_per_video
self.windows_per_half = (windows_per_video // 2)
self.window_duration = window_duration
self.sample_edges = sample_edges
self._shuffle = shuffle
def __iter__(self) -> List[Any]:
g = torch.Generator()
g.manual_seed((self.seed + self.epoch))
indices = [None for i in range((len(self.data_source) * self.windows_per_video))]
global_idx = 0
for idx in range(len(self.data_source)):
video_metadata = self.data_source.get_video_metadata(idx)
for half_idx in range(video_metadata['num_halves']):
max_possible_clip_start = Fraction(max((video_metadata['duration'][half_idx] - self.window_duration), 0))
if self.sample_edges:
indices[global_idx] = (idx, half_idx, Fraction(0), Fraction(self.window_duration))
global_idx += 1
windows_per_half = ((self.windows_per_half - 2) if self.sample_edges else self.windows_per_half)
windows_start = (float(max_possible_clip_start) * torch.rand(windows_per_half).sort()[0])
for window_start in windows_start.tolist():
indices[global_idx] = (idx, half_idx, Fraction(window_start), Fraction((window_start + self.window_duration)))
global_idx += 1
if self.sample_edges:
indices[global_idx] = (idx, half_idx, Fraction((video_metadata['duration'][half_idx] - self.window_duration)), Fraction(video_metadata['duration'][half_idx]))
global_idx += 1
if self._shuffle:
indices = [indices[idx] for idx in torch.randperm(len(indices), generator=g)]
return iter(indices)
def __len__(self) -> int:
return (len(self.data_source) * self.windows_per_video)
def __repr__(self) -> str:
return f'{__class__.__name__}(len={self.__len__()}, windows_per_video={self.windows_per_video}, window_duration={self.window_duration}, sample_edges={self.sample_edges}, shuffle={self._shuffle}, seed={self.seed})'
|
def random_start_subsequences(clip_duration: float=32, video_duration: float=2700, num_clips: int=50, fps: int=25, sample_edges: bool=True, prevent_resample_edges: bool=True, generator: (torch.Generator | None)=None):
'Sammple starting point of clips inside a video uniformly. Prevent overlap.\n\n Args:\n clip_duration: Duration of a clip.\n video_duration: Duration of the video.\n num_clips: Number of clips to sample.\n fps: FPS of the clips.\n sample_edges: Whether to force the sample of edges in the videos. Useful for first or last second actions.\n prevent_resample_edges: Whether to prevent resample of edges. If True, prevent half of the window duration of edges to be sampled again.\n generator: Generator for generating random Pytorch tensors.\n\n Raises:\n AttributeError: _description_\n\n Returns:\n _type_: _description_\n '
possible_start_idx: np.ndarray = np.arange(0, int(((video_duration - clip_duration) * fps)))
subsequences = [None for _ in range(num_clips)]
if sample_edges:
subsequences[(- 1)] = Fraction(0)
subsequences[(- 2)] = Fraction((video_duration - clip_duration))
if prevent_resample_edges:
possible_start_idx = possible_start_idx[(ceil((clip_duration / 2)) * fps):]
possible_start_idx = possible_start_idx[:((- floor((clip_duration / 2))) * fps)]
num_clips -= 2
max_possible_start = possible_start_idx[(- 1)]
for i in range(num_clips):
if (possible_start_idx.shape[0] == 0):
raise AttributeError(f'Impossible to sample without overlap {num_clips} clips of {clip_duration} seconds in video of {video_duration} seconds, try changing the numbers.')
idx_tensor: int = torch.randint(0, possible_start_idx.shape[0], size=(1,), generator=generator).item()
start_idx = possible_start_idx[idx_tensor]
start_sec = Fraction(start_idx, fps)
min_remove = max(((start_idx - int((clip_duration * fps))) + 1), 0)
max_remove = min(((start_idx + int((clip_duration * fps))) + 1), max_possible_start)
possible_start_idx = possible_start_idx[np.logical_or((possible_start_idx < min_remove), (possible_start_idx > max_remove))]
subsequences[i] = start_sec
return subsequences
|
class UniformWindowWithoutOverlapSoccerNetClipSampler(SoccerNetClipSampler):
'Sampler uniformly randoml windows in SoccerNet videos.\n\n Args:\n data_source: SoccerNet dataset.\n windows_per_video: Number of windows to sampler per video.\n window_duration: Duration of a window.\n sample_edges: Whether to force the sample of edges in the videos. Useful for first or last second actions.\n prevent_resample_edges: Whether to prevent resample of edges. If True, prevent half of the window duration of edges to be sampled again.\n shuffle: Whether to shuffle indices.\n '
def __init__(self, data_source: SoccerNet, windows_per_video: int=50, window_duration: float=32.0, sample_edges: bool=False, prevent_resample_edges: bool=True, shuffle: bool=False) -> None:
super().__init__(data_source, shuffle=shuffle)
assert ((windows_per_video % 2) == 0), 'Windows per video should be an even number.'
self.windows_per_video = windows_per_video
self.windows_per_half = (windows_per_video // 2)
self.window_duration = window_duration
self.sample_edges = sample_edges
self.prevent_resample_edges = prevent_resample_edges
self._shuffle = shuffle
def __iter__(self) -> List[Any]:
g = torch.Generator()
g.manual_seed((self.seed + self.epoch))
indices = [None for _ in range((len(self.data_source) * self.windows_per_video))]
global_idx = 0
for idx in range(len(self.data_source)):
video_metadata = self.data_source.get_video_metadata(idx)
for half_idx in range(video_metadata['num_halves']):
half_random_starts = random_start_subsequences(clip_duration=self.window_duration, video_duration=video_metadata['duration'][half_idx], num_clips=self.windows_per_half, fps=self.data_source._annotated_videos.fps_videos, sample_edges=self.sample_edges, prevent_resample_edges=self.prevent_resample_edges, generator=g)
for half_random_start in half_random_starts:
indices[global_idx] = (idx, half_idx, half_random_start, (half_random_start + self.window_duration))
global_idx += 1
if self._shuffle:
indices = [indices[idx] for idx in torch.randperm(len(indices), generator=g)]
return iter(indices)
def __len__(self) -> int:
return (len(self.data_source) * self.windows_per_video)
def __repr__(self) -> str:
return f'{__class__.__name__}(len={self.__len__()}, windows_per_video={self.windows_per_video}, window_duration={self.window_duration}, sample_edges={self.sample_edges}, prevent_resample_edges={self.prevent_resample_edges} shuffle={self._shuffle}, seed={self.seed})'
|
class _DatasetSamplerWrapper(Dataset):
'Dataset to create indexes from `SpotClipSampler`.'
def __init__(self, sampler: SpotClipSampler) -> None:
self._sampler = sampler
self._sampler_list: Optional[List[Any]] = None
def __getitem__(self, index: int) -> Any:
if (self._sampler_list is None):
self._sampler_list = list(self._sampler)
return self._sampler_list[index]
def __len__(self) -> int:
return len(self._sampler)
def set_epoch(self, epoch: int) -> None:
self._sampler.set_epoch(epoch)
return
def reset(self) -> None:
'Reset the sampler list in order to get new sampling.'
self._sampler_list = list(self._sampler)
def __repr__(self) -> str:
return str(self._sampler)
|
class SpotClipSamplerDistributedSamplerWrapper(DistributedSampler):
"Wrapper over ``Sampler`` for distributed training.\n\n Note:\n The purpose of this wrapper is to take care of sharding the sampler indices. It is up to the underlying\n sampler to handle randomness and shuffling. The ``shuffle`` and ``seed`` arguments on this wrapper won't\n have any effect.\n "
def __init__(self, sampler: SpotClipSampler, *args: Any, **kwargs: Any) -> None:
shuffle = sampler.shuffle
sampler.set_shuffle(False)
super().__init__(_DatasetSamplerWrapper(sampler), *args, seed=sampler.seed, shuffle=shuffle, **kwargs)
def __iter__(self) -> Iterator:
self.dataset.reset()
return (self.dataset[index] for index in super().__iter__())
def set_epoch(self, epoch: int) -> None:
super().set_epoch(epoch)
self.dataset.set_epoch(epoch)
return
def __repr__(self) -> str:
return f'{__class__.__name__}(sampler={self.dataset}, shuffle={self.shuffle}, seed={self.seed})'
|
class FeatureExtractionSpotClipSampler(SpotClipSampler):
'Sampler windows that slide across the whole video to extract features.\n\n Args:\n data_source: SoccerNet dataset.\n window_num_frames: Duration of a window.\n shuffle: Whether to shuffle indices.\n '
def __init__(self, data_source: SoccerNet, window_num_frames: float=16, shuffle: bool=False) -> None:
super().__init__(data_source, shuffle=shuffle)
self.window_num_frames = window_num_frames
self._shuffle = shuffle
self.indices = self._precompute_indices()
def _precompute_indices(self) -> List[Any]:
indices = []
for i in range(self.data_source.num_videos):
video_metadata = self.data_source.get_video_metadata(i)
start_frame = 0
end_frame = (int(video_metadata['num_frames']) - 1)
all_frames = torch.arange(start_frame, (end_frame + 1)).long()
start_frames = torch.maximum((all_frames - (self.window_num_frames // 2)), torch.tensor(0))
end_frames = torch.minimum(((all_frames + (self.window_num_frames // 2)) - 1), torch.tensor(end_frame))
indices.extend([(i, start, end) for (start, end) in zip(start_frames, end_frames)])
return indices
def __iter__(self) -> List[Any]:
indices = self.indices
if self._shuffle:
g = torch.Generator()
g.manual_seed((self.seed + self.epoch))
indices = [indices[idx] for idx in torch.randperm(len(indices), generator=g)]
return iter(indices)
def __len__(self) -> int:
return len(self.indices)
def __repr__(self) -> str:
return f'{__class__.__name__}(len={self.__len__()}, window_num_frames={self.window_num_frames}, shuffle={self._shuffle}, seed={self.seed})'
|
class ImagesSpotClipSampler(SpotClipSampler):
'Sampler of images in an ImageSoccerNet dataset.\n\n Args:\n data_source: SoccerNet dataset.\n images_per_video: Number of images per video to sample.\n shuffle: Whether to shuffle indices.\n '
def __init__(self, data_source: Spot, images_per_video: (int | None)=None, shuffle: bool=False) -> None:
super().__init__(data_source, shuffle=shuffle)
self.images_per_video = images_per_video
def __iter__(self) -> List[Any]:
g = torch.Generator()
g.manual_seed((self.seed + self.epoch))
indices = [None for i in range(len(self))]
global_idx = 0
for idx in range(self.data_source.num_videos):
video_metadata = self.data_source.get_video_metadata(idx)
num_frames = video_metadata['num_frames']
if (self.images_per_video is None):
for i in range(num_frames):
indices[global_idx] = (idx, i, i)
global_idx += 1
else:
random_frames = torch.randperm(num_frames, generator=g)[:self.images_per_video]
random_frames = torch.sort(random_frames)[0].tolist()
for i in random_frames:
indices[global_idx] = (idx, i, i)
global_idx += 1
if self._shuffle:
indices = [indices[idx] for idx in torch.randperm(len(indices), generator=g)]
return iter(indices)
def __len__(self) -> int:
return (self.data_source._annotated_videos.cum_num_frames_per_video[(- 1)] if (self.images_per_video is None) else (self.images_per_video * self.data_source.num_videos))
def __repr__(self) -> str:
return f'{__class__.__name__}(images_per_video={self.images_per_video}, shuffle={self._shuffle}, seed={self.seed})'
|
class SlidingWindowSpotClipSampler(SpotClipSampler):
'Sampler windows that slide across the whole video. Possibility to overlap windows. The last window is always between (half_duration - window_num_frames, window_num_frames).\n\n Args:\n data_source: SoccerNet dataset.\n window_num_frames: Duration of a window.\n overlap_window: Overlap duration between two windows.\n shuffle: Whether to shuffle indices.\n '
def __init__(self, data_source: Spot, window_num_frames: int=32, overlap_window: int=1, shuffle: bool=False) -> None:
super().__init__(data_source, shuffle=shuffle)
self.window_num_frames = window_num_frames
self.overlap_window = overlap_window
self._shuffle = shuffle
self.correct_window_per_video = ((self.data_source._annotated_videos.num_frames_per_video - overlap_window) // (window_num_frames - overlap_window)).to(dtype=torch.int)
self.one_more_window_per_video = (((self.data_source._annotated_videos.num_frames_per_video - overlap_window) % (window_num_frames - overlap_window)) > 0).to(dtype=torch.int)
self.total_windows_per_video = (self.correct_window_per_video + self.one_more_window_per_video)
self.total_windows = self.total_windows_per_video.sum(0)
self.window_to_sample = self.total_windows
self._precompute_indices()
def _precompute_indices(self) -> List[Any]:
indices = [None for _ in range(self.total_windows)]
video_idx = 0
global_idx = 0
for i in range(len(self.total_windows_per_video)):
for j in range(self.correct_window_per_video[i]):
clip_start_frame = (j * (self.window_num_frames - self.overlap_window))
clip_end_frame = ((clip_start_frame + self.window_num_frames) - 1)
indices[global_idx] = (video_idx, clip_start_frame, clip_end_frame)
global_idx += 1
if self.one_more_window_per_video[i]:
duration = self.data_source._annotated_videos.num_frames_per_video[i]
clip_start_frame = (floor(duration) - self.window_num_frames)
clip_end_frame = ((clip_start_frame + self.window_num_frames) - 1)
indices[global_idx] = (video_idx, clip_start_frame, clip_end_frame)
global_idx += 1
video_idx += 1
self._raw_indices = indices
def __iter__(self) -> List[Any]:
indices = self._raw_indices
if self._shuffle:
g = torch.Generator()
g.manual_seed((self.seed + self.epoch))
if self._shuffle:
indices = [indices[idx] for idx in torch.randperm(len(indices), generator=g)]
return iter(indices)
def __len__(self) -> int:
return self.window_to_sample
def __repr__(self) -> str:
return f'{__class__.__name__}(len={self.__len__()}, window_num_frames={self.window_num_frames}, overlap_window={self.overlap_window}, shuffle={self._shuffle}, seed={self.seed})'
|
class SpotClipSampler(Sampler, ABC):
'Base class for Spot clip samplers.\n\n Args:\n data_source: Spot dataset.\n shuffle: Whether to shuffle indices.\n '
def __init__(self, data_source: Spot, shuffle: bool=False) -> None:
super().__init__(data_source)
self.data_source = data_source
self.epoch = 0
self.seed = get_default_seed()
self._shuffle = shuffle
@property
def shuffle(self) -> bool:
return self._shuffle
def set_shuffle(self, shuffle: bool) -> None:
'Set shuffle value.\n\n Args:\n shuffle: Value for shuffle.\n '
self._shuffle = shuffle
def set_epoch(self, epoch: int) -> None:
'Sets the epoch for this sampler.\n\n This ensures that at each epoch the windows are not the same for relevant subclass samplers.\n\n Args:\n epoch: Epoch number.\n '
self.epoch = epoch
|
def random_start_subsequences(clip_duration: int=32, video_num_frames: int=2700, num_subsequences: int=50, sample_edges: bool=True, prevent_resample_edges: bool=True, generator: (torch.Generator | None)=None):
possible_start_idx: np.ndarray = np.arange(0, (video_num_frames - clip_duration))
subsequences = [None for _ in range(num_subsequences)]
if sample_edges:
subsequences[(- 1)] = 0
subsequences[(- 2)] = (video_num_frames - clip_duration)
if prevent_resample_edges:
possible_start_idx = possible_start_idx[ceil((clip_duration / 2)):]
possible_start_idx = possible_start_idx[:(- floor((clip_duration / 2)))]
num_subsequences -= 2
max_possible_start = possible_start_idx[(- 1)]
for i in range(num_subsequences):
if (possible_start_idx.shape[0] == 0):
raise AttributeError(f'Impossible to sample without overlap {num_subsequences} clips of {clip_duration} seconds in video of {video_num_frames} num frames, try changing the numbers.')
idx_tensor: int = torch.randint(0, possible_start_idx.shape[0], size=(1,), generator=generator).item()
start_idx = possible_start_idx[idx_tensor]
min_remove = max(((start_idx - clip_duration) + 1), 0)
max_remove = min(((start_idx + clip_duration) + 1), max_possible_start)
possible_start_idx = possible_start_idx[np.logical_or((possible_start_idx < min_remove), (possible_start_idx > max_remove))]
subsequences[i] = start_idx
return subsequences
|
class UniformWindowWithoutOverlapSpotClipSampler(SpotClipSampler):
'Sampler uniformly randoml windows in Spot videos.\n\n Args:\n data_source: Spot dataset.\n windows_per_video: Number of windows to sampler per video.\n window_num_frames: Duration of a window.\n sample_edges: Whether to force the sample of edges in the videos. Useful for first or last second actions.\n prevent_resample_edges: Whether to prevent resample of edges. If True, prevent half of the window duration of edges to be sampled again.\n shuffle: Whether to shuffle indices.\n '
def __init__(self, data_source: Spot, windows_per_video: int=50, window_num_frames: int=32, sample_edges: bool=False, prevent_resample_edges: bool=True, shuffle: bool=False) -> None:
super().__init__(data_source, shuffle=shuffle)
self.windows_per_video = windows_per_video
self.window_num_frames = window_num_frames
self.sample_edges = sample_edges
self.prevent_resample_edges = prevent_resample_edges
self._shuffle = shuffle
def __iter__(self) -> List[Any]:
g = torch.Generator()
g.manual_seed((self.seed + self.epoch))
indices = [None for _ in range((len(self.data_source) * self.windows_per_video))]
global_idx = 0
for idx in range(len(self.data_source)):
video_metadata = self.data_source.get_video_metadata(idx)
video_random_starts = random_start_subsequences(clip_duration=self.window_num_frames, video_num_frames=video_metadata['num_frames'], num_subsequences=self.windows_per_video, sample_edges=self.sample_edges, prevent_resample_edges=self.prevent_resample_edges, generator=g)
for video_random_start in video_random_starts:
indices[global_idx] = (idx, video_random_start, ((video_random_start + self.window_num_frames) - 1))
global_idx += 1
if self._shuffle:
indices = [indices[idx] for idx in torch.randperm(len(indices), generator=g)]
return iter(indices)
def __len__(self) -> int:
return (len(self.data_source) * self.windows_per_video)
def __repr__(self) -> str:
return f'{__class__.__name__}(len={self.__len__()}, windows_per_video={self.windows_per_video}, window_num_frames={self.window_num_frames}, sample_edges={self.sample_edges}, prevent_resample_edges={self.prevent_resample_edges} shuffle={self._shuffle}, seed={self.seed})'
|
def get_collate_fn(name: str) -> Callable:
'Get a Collate function from its name through the _COLLATE_FUNCTIONS dictionary.\n\n Args:\n name: The collate function name.\n\n Raises:\n NotImplementedError: If the name is not supported.\n\n Returns:\n Callable: the collate function\n '
if (name in _COLLATE_FUNCTIONS):
return _COLLATE_FUNCTIONS[name]
else:
raise NotImplementedError(f'{name} not supported: try a name in {_COLLATE_FUNCTIONS.keys()}')
|
def multiple_samples_collate(batch: List[Dict[(str, List[Any])]]) -> Dict[(str, Any)]:
'Collate function for repeated augmentation. Each instance in the batch has more than one sample.\n\n Args:\n batch: Batch of data before collate.\n\n Returns:\n The collated batch.\n '
batch_dict = {}
if (type(batch[0]) is not dict):
keys = batch[0][0].keys()
else:
keys = batch[0].keys()
for k in keys:
v_iter = []
for samples_dict in batch:
if (type(samples_dict) is dict):
samples_dict = [samples_dict]
for sample_dict in samples_dict:
v_iter += [sample_dict[k]]
batch_dict[k] = default_collate(v_iter)
return batch_dict
|
class DecoderType(Enum):
PYAV = 'pyav'
TORCHVISION = 'torchvision'
FRAME = 'frame'
DUMB = 'dumb'
|
class DumbSoccerNetVideo(Video):
'DumbSoccerNetVideo is an abstractions for accessing clips based on their start and end time for a video\n where each frame is randomly generated.\n\n Args:\n video_path: The path of the video.\n half_path: The path of the half.\n duration: The duration of the video in seconds.\n fps_video: The fps of the video.\n fps: The target fps for the video. This is needed to link the frames\n to a second timestamp in the video.\n num_frames: The number of frames of the video.\n min_clip_duration: The minimum duration of a clip.\n num_decode: Number of duplicate output clip.\n '
def __init__(self, video_path: (str | Path), half_path: (str | Path), duration: float, fps_video: int, fps: int, num_frames: int, min_clip_duration: float, num_decode: int, **kwargs) -> None:
self._duration = duration
self._fps_video = fps_video
self._fps = fps
self._different_fps = (self._fps_video != self._fps)
assert (self._fps_video >= self._fps)
self._num_frames = num_frames
self._video_path = video_path
self._half_path = half_path
self._name = (Path(Path(self._video_path).name) / Path(self._half_path.name))
self._min_clip_duration = min_clip_duration
self._num_decode = num_decode
@property
def name(self) -> str:
'The name of the video.'
return self._name
@property
def duration(self) -> float:
"The video's duration/end-time in seconds."
return self._duration
def _get_frame_index_for_time(self, time_sec: float, fps: int) -> int:
return round((fps * time_sec))
def get_timestamps_and_frame_indices(self, start_sec: float, end_sec: float) -> tuple[(torch.Tensor, torch.Tensor)]:
'Retrieves timestamps and frame indices from the stored video at the specified start and end times in\n seconds.\n\n Args:\n start_sec: The clip start time in seconds\n end_sec: The clip end time in seconds\n\n Returns:\n The timestamps and the frame indices.\n '
if ((start_sec < 0) or (start_sec > self._duration) or (end_sec > self._duration)):
logger.warning(f'No frames found within {start_sec} and {end_sec} seconds. Video startsat time 0 and ends at {self._duration}.')
return None
frac_fps = Fraction(self._fps)
frac_fps_video = Fraction(self._fps_video)
over_frac_fps_video = Fraction(1, frac_fps_video)
start_sec = Fraction(Fraction(int((start_sec * self._fps))), frac_fps)
end_sec = Fraction(Fraction(int((end_sec * self._fps))), frac_fps)
if self._different_fps:
if ((start_sec % self._fps_video) != 0):
start_sec = Fraction(Fraction(int((start_sec * self._fps_video))), frac_fps_video)
if ((end_sec % over_frac_fps_video) != 0):
end_sec = Fraction(Fraction(int((end_sec * self._fps_video))), frac_fps_video)
start_frame_index = self._get_frame_index_for_time(start_sec, self._fps_video)
end_frame_index = self._get_frame_index_for_time(end_sec, self._fps_video)
fps_video_frame_indices = torch.arange(start_frame_index, end_frame_index)
timestamps = (fps_video_frame_indices * float(over_frac_fps_video))
if self._different_fps:
keep_indices = torch.tensor([i for i in range(0, self._fps_video) for j in range(0, self._fps) if ((round((Fraction(frac_fps_video, frac_fps) * j)) - i) == 0)])
keep_timestamp = torch.isin((fps_video_frame_indices % self._fps_video), keep_indices)
fps_video_frame_indices = fps_video_frame_indices[keep_timestamp]
timestamps = timestamps[keep_timestamp]
timestamps = ((timestamps * self._fps).round() / self._fps)
frame_indices = torch.arange((timestamps[0] * self._fps), ((timestamps[(- 1)] * self._fps) + 1)).round().to(dtype=torch.long)
else:
frame_indices = fps_video_frame_indices
if ((self._min_clip_duration > 0) and (len(frame_indices) < (self._min_clip_duration * self._fps))):
num_lacking_frames = ((self._min_clip_duration * self._fps) - len(frame_indices))
if (start_frame_index == 0):
fps_video_frame_indices = torch.cat([torch.zeros(num_lacking_frames, dtype=fps_video_frame_indices.dtype), fps_video_frame_indices])
frame_indices = torch.cat([torch.zeros(num_lacking_frames, dtype=frame_indices.dtype), frame_indices])
timestamps = torch.cat([torch.zeros(num_lacking_frames, dtype=timestamps.dtype), timestamps])
else:
fps_video_frame_indices = torch.cat([fps_video_frame_indices, torch.tensor([fps_video_frame_indices[(- 1)] for _ in range(num_lacking_frames)], dtype=fps_video_frame_indices.dtype)])
frame_indices = torch.cat([frame_indices, torch.tensor([frame_indices[(- 1)] for _ in range(num_lacking_frames)], dtype=frame_indices.dtype)])
timestamps = torch.cat([timestamps, torch.tensor([timestamps[(- 1)] for _ in range(num_lacking_frames)], dtype=timestamps.dtype)])
return (timestamps, frame_indices, fps_video_frame_indices)
def get_clip(self, start_sec: float, end_sec: float) -> dict[(str, ((torch.Tensor | None) | list[torch.Tensor]))]:
'Retrieves frames from the stored video at the specified start and end times in seconds (the video always\n starts at 0 seconds). Returned frames will be in [start_sec, end_sec). Given that PathManager may be\n fetching the frames from network storage, to handle transient errors, frame reading is retried N times.\n Note that as end_sec is exclusive, so you may need to use `get_clip(start_sec, duration + EPS)` to get the\n last frame.\n\n Args:\n start_sec: The clip start time in seconds\n end_sec: The clip end time in seconds\n\n Returns:\n A dictionary containing the clip data and information.\n '
(timestamps, frame_indices, fps_video_frame_indices) = self.get_timestamps_and_frame_indices(start_sec, end_sec)
videos = torch.randn((3, timestamps.shape[0], 224, 224), dtype=torch.float32)
if (self._num_decode > 1):
videos = [videos for _ in range(self._num_decode)]
return {'video': videos, 'clip_start': timestamps[0].item(), 'clip_end': timestamps[(- 1)].item(), 'clip_duration': (timestamps[(- 1)] - timestamps[0]).item(), 'frame_indices': frame_indices, 'fps_video_frame_indices': fps_video_frame_indices, 'timestamps': timestamps}
|
class DumbSpotVideo(Video):
'DumbSpotVideo is an abstractions for accessing clips based on their start and end time for a video where\n each frame is randomly generated.\n\n Args:\n video_path: The path of the video.\n fps: The target fps for the video. This is needed to link the frames\n to a second timestamp in the video.\n num_frames: The number of frames of the video.\n min_clip_duration: The minimum duration of a clip.\n num_decode: Number of duplicate output clip.\n '
def __init__(self, video_path: (str | Path), fps: int, num_frames: int, num_decode: int=1, min_clip_duration: float=0, **kwargs) -> None:
self._fps = fps
self._num_frames = num_frames
self._video_path = video_path
self._name = (Path(Path(self._video_path).name) / Path(self._half_path.name))
self._num_decode = num_decode
self._min_clip_duration = min_clip_duration
@property
def name(self) -> str:
'The name of the video.'
return self._name
def get_frame_indices(self, start_frame: float, end_frame: float) -> tuple[(torch.Tensor, torch.Tensor)]:
'Retrieves frame indices from the stored video at the specified start and end frames.\n\n Args:\n start_frame: The clip start frame\n end_frame: The clip end frame\n\n Returns:\n The frame indices.\n '
if ((start_frame < 0) or (start_frame >= self._num_frames) or (end_frame >= self._num_frames)):
logger.warning(f'No frames found within {start_frame} and {end_frame} frames. Video startsat frame 0 and ends at {self._num_frames}.')
return None
video_frame_indices = torch.arange(start_frame, (end_frame + 1))
if ((self._min_clip_duration > 0) and (len(video_frame_indices) < self._min_clip_duration)):
num_lacking_frames = ((self._min_clip_duration * self._fps) - len(video_frame_indices))
if (start_frame == 0):
video_frame_indices = torch.cat([torch.zeros(num_lacking_frames, dtype=video_frame_indices.dtype), video_frame_indices])
else:
video_frame_indices = torch.cat([video_frame_indices, torch.tensor([video_frame_indices[(- 1)] for _ in range(num_lacking_frames)], dtype=video_frame_indices.dtype)])
return video_frame_indices
def get_clip(self, start_frame: int, end_frame: int) -> dict[(str, ((torch.Tensor | None) | list[torch.Tensor]))]:
'Retrieves frames from the stored video at the specified starting and ending frames.\n\n Args:\n start_frame: The clip start frame\n end_frame: The clip end frame\n\n Returns:\n A dictionary containing the clip data and information.\n '
frame_indices = self.get_frame_indices(start_frame, end_frame)
videos = torch.randn((3, frame_indices.shape[0], 224, 224), dtype=torch.float32)
if (self._num_decode > 1):
videos = [videos for _ in range(self._num_decode)]
return {'video': videos, 'clip_start': frame_indices[0].item(), 'clip_end': frame_indices[(- 1)].item(), 'frame_indices': frame_indices}
|
class FrameSpotVideo(GeneralFrameVideo):
'FrameSpotVideo is an abstractions for accessing clips based on their start and end time for a video where\n each frame is stored as an image.\n\n Args:\n video_path: The path of the video.\n num_frames: The number of frames of the video.\n transform: The transform to apply to the frames.\n video_frame_to_path_fn: A function that maps from the video path and a frame\n index integer to the file path where the frame is located.\n num_threads_io: Controls whether parallelizable io operations are\n performed across multiple threads.\n num_threads_decode: Controls whether parallelizable decode operations are\n performed across multiple threads.\n num_decode: Number of decode to perform. If > 1, the videos decoded are stored in a list.\n mask_ratio: Masking ratio for the video.\n mask_ratio: Sequence tube size for masking the video.\n min_clip_duration: The minimum duration of a clip.\n decode_float: Whether to decode the clip as float.\n '
def __init__(self, video_path: (str | Path), num_frames: int, transform: (Callable | None)=None, video_frame_to_path_fn: Callable[([str, int], int)]=get_video_to_frame_path_fn(zeros=6, incr=0), num_threads_io: int=0, num_threads_decode: int=0, num_decode: int=1, mask_ratio: float=0.0, mask_tube: int=2, min_clip_duration: float=0, decode_float: bool=False) -> None:
super().__init__(num_threads_io=num_threads_io, num_threads_decode=num_threads_decode, transform=transform)
self._num_frames = num_frames
self._decode_float = decode_float
self._video_frame_to_path_fn = video_frame_to_path_fn
self._video_path = video_path
self._name: Path = (Path(Path(self._video_path).name) / Path(self._video_path.name))
self._num_decode = num_decode
self._mask_ratio = mask_ratio
self._mask_tube = mask_tube
self._min_clip_duration = min_clip_duration
@property
def name(self) -> str:
'The name of the video.'
return self._name
@property
def duration(self) -> int:
return self._num_frames
def get_frame_indices(self, start_frame: int, end_frame: int) -> tuple[(torch.Tensor, torch.Tensor)]:
'Retrieves frame indices from the stored video at the specified starting frame and end frame.\n\n Args:\n start_frame: The clip start frame\n end_frame: The clip end frame\n\n Returns:\n The frame indices.\n '
if ((start_frame < 0) or (start_frame >= self._num_frames) or (end_frame >= self._num_frames)):
logger.warning(f'No frames found within {start_frame} and {end_frame} seconds. Video starts at frame 0 and ends at {self._num_frames}.')
return None
video_frame_indices = torch.arange(start_frame, (end_frame + 1))
if ((self._min_clip_duration > 0) and (len(video_frame_indices) < self._min_clip_duration)):
num_lacking_frames = (self._min_clip_duration - len(video_frame_indices))
if (start_frame == 0):
video_frame_indices = torch.cat([torch.zeros(num_lacking_frames, dtype=video_frame_indices.dtype), video_frame_indices])
else:
video_frame_indices = torch.cat([video_frame_indices, torch.tensor([video_frame_indices[(- 1)] for _ in range(num_lacking_frames)], dtype=video_frame_indices.dtype)])
return video_frame_indices
def get_clip(self, start_frame: float, end_frame: float) -> dict[(str, ((torch.Tensor | None) | list[torch.Tensor]))]:
'Retrieves frames from the stored video at the specified starting and ending frames.\n\n Args:\n start_frame: The clip start frame\n end_frame: The clip end frame\n\n Returns:\n A dictionary containing the clip data and information.\n '
frame_indices = self.get_frame_indices(start_frame, end_frame)
if (self._mask_ratio > 0):
t = frame_indices.shape[0]
(_, indices_kept, inversed_temporal_masked_indices, _) = mask_tube_in_sequence(self._mask_ratio, self._mask_tube, t, 'cpu')
frame_indices_to_decode = frame_indices[indices_kept]
else:
frame_indices_to_decode = frame_indices
clip_paths = [self._video_frame_to_path(i) for i in frame_indices_to_decode]
clip_frames = self._load_images_with_retries(clip_paths)
clip_frames = clip_frames.permute(1, 0, 2, 3)
if self._decode_float:
clip_frames = clip_frames.to(torch.float32)
if (self._num_decode > 1):
videos = [clip_frames for _ in range(self._num_decode)]
else:
videos = clip_frames
out = {'video': videos, 'frame_start': frame_indices[0].item(), 'frame_end': frame_indices[(- 1)].item(), 'frame_indices': frame_indices}
if (self._mask_ratio > 0.0):
out['inversed_temporal_masked_indices'] = inversed_temporal_masked_indices
return out
def _video_frame_to_path(self, frame_index: int) -> str:
return self._video_frame_to_path_fn(self._video_path, frame_index)
|
class DictDataset(Dataset):
'Wrapper around a Dataset to have a dictionary as input for models.\n\n Args:\n dataset: dataset to wrap around.\n '
def __init__(self, dataset: Dataset) -> None:
super().__init__()
self.source_dataset = dataset
def __getitem__(self, idx: int) -> Dict[(str, Any)]:
super_output = self.source_dataset[idx]
if isinstance(super_output, Mapping):
if ('idx' in super_output):
return super_output
else:
super_output['idx'] = idx
return super_output
elif isinstance(super_output, Iterable):
if (len(super_output) == 1):
return {'input': super_output[0], 'idx': idx}
elif (len(super_output) == 2):
return {'input': super_output[0], 'label': super_output[1], 'idx': idx}
else:
raise NotImplementedError('Impossible to know what is in the list of super_ouput.')
else:
return {'input': super_output, 'idx': idx}
def __len__(self):
return len(self.source_dataset)
|
class DictCIFAR10(DictDataset):
'`CIFAR10 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ dict dataset.\n\n Args:\n root: Root directory of dataset where directory\n ``cifar-10-batches-py`` exists or will be saved to if download is set to ``True``.\n train: If ``True``, creates dataset from training set, otherwise\n creates from test set.\n transform: A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform: A function/transform that takes in the\n target and transforms it.\n download: If ``True``, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n '
def __init__(self, root: str, train: bool=True, transform: Optional[Callable]=None, target_transform: Optional[Callable]=None, download: bool=False) -> None:
dataset = CIFAR10(root, train, transform, target_transform, download)
super().__init__(dataset)
|
class DictCIFAR100(DictDataset):
'`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ dict dataset.\n\n Args:\n root: Root directory of dataset where directory\n ``cifar-100-batches-py`` exists or will be saved to if download is set to ``True``.\n train: If ``True``, creates dataset from training set, otherwise\n creates from test set.\n transform: A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform: A function/transform that takes in the\n target and transforms it.\n download: If ``True``, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n '
def __init__(self, root: str, train: bool=True, transform: Optional[Callable]=None, target_transform: Optional[Callable]=None, download: bool=False) -> None:
dataset = CIFAR100(root, train, transform, target_transform, download)
super().__init__(dataset)
|
class DumbDataset(Dataset):
'Dumb dataset that always provide random data. Useful for testing models or pipelines.\n\n Args:\n shape: shape of data to generate.\n len_dataset: length of the dataset. Used by dataloaders.\n '
def __init__(self, shape: List[int], len_dataset: int) -> None:
super().__init__()
self.shape = list(shape)
self.len_dataset = len_dataset
def __getitem__(self, idx: int) -> Dict[(str, Any)]:
data = torch.randn(self.shape)
label = 0
return {'input': data, 'label': label, 'idx': idx}
def __len__(self):
return self.len_dataset
|
def has_file_allowed_extension(filename: str, extensions: Union[(str, Tuple[(str, ...)])]) -> bool:
'Checks if a file is an allowed extension.\n\n Args:\n filename: Path to a file.\n extensions: Extensions to consider (lowercase).\n\n Returns:\n ``True`` if the filename ends with one of given extensions.\n '
return filename.lower().endswith((extensions if isinstance(extensions, str) else tuple(extensions)))
|
def is_image_file(filename: str) -> bool:
'Checks if a file is an allowed image extension.\n\n Args:\n filename: Path to a file.\n\n Returns:\n ``True`` if the filename ends with a known image extension.\n '
return has_file_allowed_extension(filename, IMG_EXTENSIONS)
|
def find_classes(directory: str) -> Tuple[(List[str], Dict[(str, int)])]:
'Finds the class folders in a dataset.\n\n See :class:`DatasetFolder` for details.\n '
classes = sorted((entry.name for entry in os.scandir(directory) if entry.is_dir()))
if (not classes):
raise FileNotFoundError(f"Couldn't find any class folder in {directory}.")
class_to_idx = {cls_name: i for (i, cls_name) in enumerate(classes)}
return (classes, class_to_idx)
|
def make_dataset(directory: str, class_to_idx: Optional[Dict[(str, int)]]=None, extensions: Optional[Union[(str, Tuple[(str, ...)])]]=None, is_valid_file: Optional[Callable[([str], bool)]]=None) -> List[Tuple[(str, int)]]:
'Generates a list of samples of a form (path_to_sample, class).\n\n See :class:`DatasetFolder` for details.\n\n Note: The ``class_to_idx`` parameter is here optional and will use the logic of the ``find_classes`` function\n by default.\n '
directory = os.path.expanduser(directory)
if (class_to_idx is None):
(_, class_to_idx) = find_classes(directory)
elif (not class_to_idx):
raise ValueError("'class_to_index' must have at least one entry to collect any samples.")
both_none = ((extensions is None) and (is_valid_file is None))
both_something = ((extensions is not None) and (is_valid_file is not None))
if (both_none or both_something):
raise ValueError('Both extensions and is_valid_file cannot be None or not None at the same time')
if (extensions is not None):
def is_valid_file(x: str) -> bool:
return has_file_allowed_extension(x, extensions)
is_valid_file = cast(Callable[([str], bool)], is_valid_file)
instances = []
available_classes = set()
for target_class in sorted(class_to_idx.keys()):
class_index = class_to_idx[target_class]
target_dir = os.path.join(directory, target_class)
if (not os.path.isdir(target_dir)):
continue
for (root, _, fnames) in sorted(os.walk(target_dir, followlinks=True)):
for fname in sorted(fnames):
path = os.path.join(root, fname)
if is_valid_file(path):
item = (path, class_index)
instances.append(item)
if (target_class not in available_classes):
available_classes.add(target_class)
empty_classes = (set(class_to_idx.keys()) - available_classes)
if empty_classes:
msg = f"Found no valid file for the classes {', '.join(sorted(empty_classes))}. "
if (extensions is not None):
msg += f"Supported extensions are: {(extensions if isinstance(extensions, str) else ', '.join(extensions))}"
raise FileNotFoundError(msg)
return instances
|
class DatasetFolder(VisionDataset):
'A generic data loader.\n\n This default directory structure can be customized by overriding the\n :meth:`find_classes` method.\n\n Args:\n root: Root directory path.\n loader: A function to load a sample given its path.\n extensions: A list of allowed extensions.\n both extensions and is_valid_file should not be passed.\n transform: A function/transform that takes in\n a sample and returns a transformed version.\n E.g, ``transforms.RandomCrop`` for images.\n target_transform: A function/transform that takes\n in the target and transforms it.\n is_valid_file: A function that takes path of a file\n and check if the file is a valid file (used to check of corrupt files)\n both extensions and is_valid_file should not be passed.\n class_ratio: Ratio of classes to use if ``class_list`` is ``None``.\n sample_ratio: Ratio of samples to use.\n class_list: If not ``None``, list of classes to use.\n sample_list_path: If not ``None``, list of samples to use.\n seed: If not ``None``, seed used to randomly choose class and samples.\n '
def __init__(self, root: str, loader: Callable[([str], Any)], extensions: Optional[Tuple[(str, ...)]]=None, transform: Optional[Callable]=None, target_transform: Optional[Callable]=None, is_valid_file: Optional[Callable[([str], bool)]]=None, class_ratio: float=1.0, sample_ratio: float=1.0, class_list: Optional[Iterable[str]]=None, sample_list_path: Optional[str]=None, seed: Optional[int]=None) -> None:
super().__init__(root, transform=transform, target_transform=target_transform)
assert (0 < class_ratio <= 1.0), 'class_ratio should be comprised between 0 (excluded) and 1. (included)'
assert (0 < sample_ratio <= 1.0), 'sample_ratio should be comprised between 0 (excluded) and 1. (included)'
(classes, class_to_idx) = self.find_classes(self.root)
if ((class_ratio < 1.0) or (sample_ratio < 1.0)):
global_seed = int(os.getenv('PL_GLOBAL_SEED'))
if ((global_seed is None) and (seed is None)):
rank_zero_warn('PL_GLOBAL_SEED environment variable is not defined as well as the seed argument, the default seed used is 0 for class_ratio and sample_ratio.')
seed = (seed or global_seed or 0)
g = torch.Generator()
g.manual_seed(seed)
if (class_list is not None):
for cls in class_list:
if (cls not in classes):
raise AttributeError(f'Class {cls} specified in class_list not found in folder.')
classes = sorted(class_list)
class_to_idx = {class_name: i for (i, class_name) in enumerate(classes)}
elif (class_ratio < 1.0):
num_classes = round((len(classes) * class_ratio))
indices = list(torch.randperm(len(classes))[:num_classes])
classes = sorted([classes[idx] for idx in indices])
class_to_idx = {cls: class_to_idx[cls] for cls in classes}
samples = self.make_dataset(self.root, class_to_idx, extensions, is_valid_file)
if (sample_list_path is not None):
with open(sample_list_path) as f:
sample_list = f.read()
sample_list = sample_list.split('\n')
fn_to_sample = {elt[0].split('/')[(- 1)]: elt for elt in samples}
samples = [fn_to_sample[file] for file in sample_list]
if (sample_ratio < 1.0):
num_images = round((len(samples) * sample_ratio))
indices = list(torch.randperm(len(samples))[:num_images])
samples = [samples[idx] for idx in indices]
self.loader = loader
self.extensions = extensions
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.targets = [s[1] for s in samples]
@staticmethod
def make_dataset(directory: str, class_to_idx: Dict[(str, int)], extensions: Optional[Tuple[(str, ...)]]=None, is_valid_file: Optional[Callable[([str], bool)]]=None) -> List[Tuple[(str, int)]]:
'Generates a list of samples of a form (path_to_sample, class).\n\n This can be overridden to e.g. read files from a compressed zip file instead of from the disk.\n\n Args:\n directory: Root dataset directory, corresponding to ``self.root``.\n class_to_idx: Dictionary mapping class name to class index.\n extensions: A list of allowed extensions.\n Either extensions or is_valid_file should be passed.\n is_valid_file: A function that takes path of a file\n and checks if the file is a valid file\n (used to check of corrupt files) both extensions and\n ``is_valid_file`` should not be passed.\n\n Raises:\n ValueError: In case ``class_to_idx`` is empty.\n ValueError: In case ``extensions`` and ``is_valid_file`` are ``None`` or both are not ``None``.\n FileNotFoundError: In case no valid file was found for any class.\n\n Returns:\n Samples of a form (path_to_sample, class)\n '
if (class_to_idx is None):
raise ValueError('The class_to_idx parameter cannot be None.')
return make_dataset(directory, class_to_idx, extensions=extensions, is_valid_file=is_valid_file)
def find_classes(self, directory: str) -> Tuple[(List[str], Dict[(str, int)])]:
'Find the class folders in a dataset structured as follows::\n\n directory/\n ├── class_x\n │ ├── xxx.ext\n │ ├── xxy.ext\n │ └── ...\n │ └── xxz.ext\n └── class_y\n ├── 123.ext\n ├── nsdf3.ext\n └── ...\n └── asd932_.ext\n\n This method can be overridden to only consider\n a subset of classes, or to adapt to a different dataset directory structure.\n\n Args:\n directory: Root directory path, corresponding to ``self.root``\n\n Raises:\n FileNotFoundError: If ``dir`` has no class folders.\n\n Returns:\n List of all classes and dictionary mapping each class to an index.\n '
return find_classes(directory)
def __getitem__(self, index: int) -> Tuple[(Any, Any)]:
'\n Args:\n index (int): Index\n\n Returns:\n Where target is class_index of the target class.\n '
(path, target) = self.samples[index]
sample = self.loader(path)
if (self.transform is not None):
sample = self.transform(sample)
if (self.target_transform is not None):
target = self.target_transform(target)
sample_dict = {'input': sample, 'label': target, 'idx': index}
return sample_dict
def __len__(self) -> int:
return len(self.samples)
|
def pil_loader(path: str) -> Image.Image:
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
|
def accimage_loader(path: str) -> Any:
import accimage
try:
return accimage.Image(path)
except OSError:
return pil_loader(path)
|
def default_loader(path: str) -> Any:
from torchvision import get_image_backend
if (get_image_backend() == 'accimage'):
return accimage_loader(path)
else:
return pil_loader(path)
|
class ImageFolder(DatasetFolder):
'A generic data loader where the images are arranged in this way by default: ::\n\n root/dog/xxx.png\n root/dog/xxy.png\n root/dog/[...]/xxz.png\n\n root/cat/123.png\n root/cat/nsdf3.png\n root/cat/[...]/asd932_.png\n\n This class inherits from :class:`~torchvision.datasets.DatasetFolder` so\n the same methods can be overridden to customize the dataset.\n\n Args:\n root: Root directory path.\n transform: A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n loader: A function to load an image given its path.\n is_valid_file: A function that takes path of an Image file\n and check if the file is a valid file (used to check of corrupt files)\n class_ratio: Ratio of classes to use if ``class_list`` is ``None``. Defaults to :math:`1`.\n sample_ratio: Ratio of samples to use. Defaults to `math`:1:.\n class_list: If not ``None``, List of classes to use. Defaults to ``None``.\n seed: If not ``None``, seed used to randomly choose class and samples. Defaults to ``None``.\n\n Attributes:\n classes: List of the class names sorted alphabetically.\n class_to_idx: Dict with items (class_name, class_index).\n imgs: List of (image path, class_index) tuples\n '
def __init__(self, root: str, transform: Optional[Callable]=None, target_transform: Optional[Callable]=None, loader: Callable[([str], Any)]=default_loader, is_valid_file: Optional[Callable[([str], bool)]]=None, class_ratio: float=1.0, sample_ratio: float=1.0, sample_list_path: Optional[str]=None, class_list: Optional[Iterable[str]]=None, seed: Optional[int]=None):
super().__init__(root, loader, (IMG_EXTENSIONS if (is_valid_file is None) else None), transform=transform, target_transform=target_transform, is_valid_file=is_valid_file, class_ratio=class_ratio, sample_ratio=sample_ratio, sample_list_path=sample_list_path, class_list=class_list, seed=seed)
self.imgs = self.samples
|
def create_hmdb51_files_for_frames(folder_files: str, frames_folder: str, split_id: int):
'Create the HMDB51 csv files for frame decoders.\n\n Args:\n folder_files: Path to the original hmdb51 split files.\n frames_folder: Path to the frame folders.\n split_id: The split id.\n\n Raises:\n ImportError: If pandas is not installed.\n '
if (not _HAS_PD):
raise ImportError('pandas is required to use this function.')
folder_files = Path(folder_files)
frames_folder = Path(frames_folder)
file_name_format = ('_test_split' + str(int(split_id)))
files = sorted((f for f in folder_files.iterdir() if (f.is_file() and (f.suffix == '.txt') and (file_name_format in f.stem))))
action_dict = {}
data = []
for file in files:
curr_data = pandas.read_csv(file, sep=' ', header=None, names=['video', 'split'], usecols=[0, 1])
action_name = '_'
action_name = action_name.join(file.stem.split('_')[:(- 2)])
if (action_name not in action_dict):
action_dict[action_name] = len(action_dict)
curr_data['class'] = action_dict[action_name]
curr_data.video = ((action_name + '/') + curr_data.video)
curr_data.video = curr_data.video.map(remove_suffix)
curr_data['duration'] = curr_data.video.map(partial(get_raw_video_duration, frames_folder))
data.append(curr_data)
data = pandas.concat(data, axis=0)
set_to_num = {'train': 1, 'test': 2, 'unlabeled': 0}
for set in ['train', 'test', 'unlabeled']:
output_file = (frames_folder / f'{set}list{split_id:02d}.txt')
output_data = data.loc[(set_to_num[set] == data.split)]
output_data = output_data.drop(columns=['split'])
output_data.to_csv(output_file, sep=' ', header=None, index=None)
|
class Hmdb51LabeledVideoPaths():
'Pre-processor for Hmbd51 dataset mentioned here - https://serre-lab.clps.brown.edu/resource/hmdb-a-large-\n human-motion-database/\n\n This dataset consists of classwise folds with each class consisting of 3\n folds (splits).\n\n The videos directory is of the format,\n video_dir_path/class_x/<somevideo_name>.avi\n ...\n video_dir_path/class_y/<somevideo_name>.avi\n\n The splits/fold directory is of the format,\n folds_dir_path/class_x_test_split_1.txt\n folds_dir_path/class_x_test_split_2.txt\n folds_dir_path/class_x_test_split_3.txt\n ...\n folds_dir_path/class_y_test_split_1.txt\n folds_dir_path/class_y_test_split_2.txt\n folds_dir_path/class_y_test_split_3.txt\n\n And each text file in the splits directory class_x_test_split_<1 or 2 or 3>.txt\n <a video as in video_dir_path/class_x> <0 or 1 or 2>\n where 0,1,2 corresponds to unused, train split respectively.\n\n Each video has name of format\n <some_name>_<tag1>_<tag2>_<tag_3>_<tag4>_<tag5>_<some_id>.avi\n For more details on tags -\n https://serre-lab.clps.brown.edu/resource/hmdb-a-large-human-motion-database/\n '
_allowed_splits = [1, 2, 3]
_split_type_dict = {'train': 1, 'test': 2, 'unused': 0}
@classmethod
def from_path(cls, data_path: str, split_id: int=1, split_type: str='train', frames: bool=False) -> Hmdb51LabeledVideoPaths:
"Factory function that creates a Hmdb51LabeledVideoPaths object depending on the path type.\n\n - If it is a directory path it uses the Hmdb51LabeledVideoPaths.from_directory function.\n - If it's a file it uses the Hmdb51LabeledVideoPaths.from_csv file.\n Args:\n file_path: The path to the file or directory to be read.\n split_id: Split id. Used if path is a directory.\n split_type: Split type. Used if path is a directory.\n frames: If ``True``, UCF101 is loaded as a frame dataset.\n Returns:\n The Hmdb51LabeledVideoPaths object.\n "
if g_pathmgr.isfile(data_path):
if (pathlib.Path(data_path).suffix == '.json'):
return Hmdb51LabeledVideoPaths.from_json(data_path)
return Hmdb51LabeledVideoPaths.from_csv(data_path)
elif g_pathmgr.isdir(data_path):
split_list = os.path.join(data_path, f'{split_type}list0{split_id}.txt')
if g_pathmgr.isfile(split_list):
return Hmdb51LabeledVideoPaths.from_csv(split_list)
return Hmdb51LabeledVideoPaths.from_directory(data_path, split_id, split_type, frames)
else:
raise FileNotFoundError(f'{data_path} not found.')
@classmethod
def from_csv(cls, file_path: str) -> Hmdb51LabeledVideoPaths:
'Factory function that creates a Hmdb51LabeledVideoPaths object by reading a file with the following\n format:\n\n <path> <integer_label>\n ...\n <path> <integer_label>\n\n Args:\n file_path: The path to the file to be read.\n\n Returns:\n The Hmdb51LabeledVideoPaths object.\n '
assert g_pathmgr.exists(file_path), f'{file_path} not found.'
video_paths_and_label = []
with g_pathmgr.open(file_path, 'r') as f:
for path_label in f.read().splitlines():
line_split = path_label.rsplit(None, 2)
if (len(line_split) == 1):
file_path = line_split[0]
label = (- 1)
num_frames = None
elif (len(line_split) == 2):
(file_path, label) = line_split
num_frames = None
else:
(file_path, label, num_frames) = line_split
num_frames = int(num_frames)
video_paths_and_label.append((file_path, int(label), num_frames))
assert (len(video_paths_and_label) > 0), f'Failed to load dataset from {file_path}.'
return cls(video_paths_and_label)
@classmethod
def from_json(cls, file_path: str) -> Hmdb51LabeledVideoPaths:
'Factory function that creates a Hmdb51LabeledVideoPaths object by reading a json file.\n\n Args:\n file_path: The path to the file to be read.\n\n Returns:\n The Hmdb51LabeledVideoPaths object.\n '
assert g_pathmgr.exists(file_path), f'{file_path} not found.'
video_paths_and_label = []
json_content = json.load(open(file_path))
annotation = json_content['annotation']
videos_id = sorted([x for x in annotation.keys()])
for video_id in videos_id:
label = annotation[video_id]['class']
num_frames = (annotation[video_id]['num_frames'] if ('num_frames' in annotation['video_id']) else None)
video_path = f'{video_id}'
video_paths_and_label.append((video_path, int(label), int(num_frames)))
assert (len(video_paths_and_label) > 0), f'Failed to load dataset from {file_path}.'
return cls(video_paths_and_label)
@classmethod
def from_directory(cls, data_path: str, split_id: int=1, split_type: str='train', frames: bool=False) -> Hmdb51LabeledVideoPaths:
'Factory function that creates Hmdb51LabeledVideoPaths object form a splits/folds directory.\n\n Args:\n data_path (str): The path to the splits/folds directory of HMDB51.\n split_id (int): Fold id to be loaded. Belongs to [1,2,3]\n split_type (str): Split/Fold type to be loaded. It belongs to one of the\n following,\n - "train"\n - "test"\n - "unused" (This is a small set of videos that are neither\n of part of test or train fold.)\n frames (bool): If True, UCF101 is loaded as a frame dataset.\n\n Returns:\n The Hmdb51LabeledVideoPaths object.\n '
data_path = pathlib.Path(data_path)
if (not data_path.is_dir()):
raise RuntimeError(f'{data_path} not found or is not a directory.')
if (not (int(split_id) in cls._allowed_splits)):
raise RuntimeError(f"{split_id} not found in allowed split id's {cls._allowed_splits}.")
file_name_format = ('_test_split' + str(int(split_id)))
file_paths = sorted((f for f in data_path.iterdir() if (f.is_file() and (f.suffix == '.txt') and (file_name_format in f.stem))))
return cls.from_csvs(file_paths, split_type, frames)
@classmethod
def from_csvs(cls, file_paths: list[(pathlib.Path | str)], split_type: str='train', frames: bool=False) -> Hmdb51LabeledVideoPaths:
'Factory function that creates Hmdb51LabeledVideoPaths object form a list of split files of .txt type.\n\n Args:\n file_paths (List[Union[pathlib.Path, str]]) : The path to the splits/folds\n directory of HMDB51.\n split_type (str): Split/Fold type to be loaded.\n - "train"\n - "test"\n - "unused"\n frames: If True, search for the number of frames for each videos.\n\n Returns:\n The LabeledVideoPaths object.\n '
action_name_to_class = {}
video_paths_and_label = []
for file_path in file_paths:
file_path = pathlib.Path(file_path)
assert g_pathmgr.exists(file_path), f'{file_path} not found.'
if (not ((file_path.suffix == '.txt') and ('_test_split' in file_path.stem))):
raise RuntimeError(f'Invalid file: {file_path}')
action_name = '_'
action_name = action_name.join(file_path.stem.split('_')[:(- 2)])
if (action_name not in action_name_to_class):
action_name_to_class[action_name] = len(action_name_to_class)
with g_pathmgr.open(file_path, 'r') as f:
for path_label in f.read().splitlines():
line_split = path_label.rsplit(None, 1)
if (not (int(line_split[1]) == cls._split_type_dict[split_type])):
continue
video_name = pathlib.Path(line_split[0])
if frames:
file_path = (pathlib.Path(action_name) / video_name.stem)
num_frames = len(list(file_path.iterdir()))
else:
file_path = (pathlib.Path(action_name) / video_name)
num_frames = None
video_paths_and_label.append((str(file_path), action_name_to_class[action_name], num_frames))
assert (len(video_paths_and_label) > 0), f'Failed to load dataset from {file_path}.'
return cls(video_paths_and_label)
def __init__(self, paths_and_labels: list[tuple[(str, (dict | None))]], path_prefix='') -> None:
'\n Args:\n paths_and_labels [(str, int)]: a list of tuples containing the video\n path and integer label.\n '
self._paths_and_labels = paths_and_labels
self._path_prefix = path_prefix
def path_prefix(self, prefix):
self._path_prefix = prefix
path_prefix = property(None, path_prefix)
def __getitem__(self, index: int) -> tuple[(str, dict)]:
'\n Args:\n index: the path and label index.\n\n Returns:\n The path and label tuple for the given index.\n '
(path, label, num_frames) = self._paths_and_labels[index]
return (os.path.join(self._path_prefix, path), {'label': label, 'num_frames': num_frames})
def __len__(self) -> int:
'\n Returns:\n The number of video paths and label pairs.\n '
return len(self._paths_and_labels)
|
def Hmdb51(data_path: pathlib.Path, clip_sampler: ClipSampler, transform: (Callable[([dict], Any)] | None)=None, video_path_prefix: str='', split_id: int=1, split_type: str='train', decode_audio=True, decoder: str='pyav', decoder_args: DictConfig={}) -> LabeledVideoDataset:
'A helper function to create ``LabeledVideoDataset`` object for HMDB51 dataset.\n\n Args:\n data_path (pathlib.Path): Path to the data. The path type defines how the data\n should be read:\n\n * For a file path, the file is read and each line is parsed into a\n video path and label.\n * For a directory, the directory structure defines the classes\n (i.e. each subdirectory is a class).\n\n clip_sampler: Defines how clips should be sampled from each\n video. See the clip sampling documentation for more information.\n\n video_sampler: Sampler for the internal\n video container. This defines the order videos are decoded and,\n if necessary, the distributed split.\n\n transform: This callable is evaluated on the clip output before\n the clip is returned. It can be used for user defined preprocessing and\n augmentations to the clips. See the ``LabeledVideoDataset`` class for\n clip output format.\n\n video_path_prefix: Path to root directory with the videos that are\n loaded in LabeledVideoDataset. All the video paths before loading\n are prefixed with this path.\n\n split_id: Fold id to be loaded. Options are 1, 2 or 3\n\n split_type: Split/Fold type to be loaded. Options are ("train", "test" or\n "unused")\n\n decoder: Defines which backend should be used to decode videos.\n\n decoder_args: Arguments to configure the decoder.\n\n Returns:\n The dataset instantiated.\n '
torch._C._log_api_usage_once('PYTORCHVIDEO.dataset.Hmdb51')
labeled_video_paths = Hmdb51LabeledVideoPaths.from_path(data_path, split_id=split_id, split_type=split_type, frames=(DecoderType(decoder) == DecoderType.FRAME))
labeled_video_paths.path_prefix = video_path_prefix
dataset = LabeledVideoDataset(labeled_video_paths, clip_sampler, transform, decode_audio=decode_audio, decoder=decoder, decoder_args=decoder_args)
return dataset
|
def Kinetics(data_path: str, clip_sampler: ClipSampler, transform: Optional[Callable[([Dict[(str, Any)]], Dict[(str, Any)])]]=None, video_path_prefix: str='', decode_audio: bool=True, decoder: str='pyav', decoder_args: DictConfig={}) -> LabeledVideoDataset:
'A helper function to create ``LabeledVideoDataset`` object for the Kinetics dataset.\n\n Args:\n data_path: Path to the data. The path type defines how the data should be read:\n\n * For a file path, the file is read and each line is parsed into a\n video path and label.\n * For a directory, the directory structure defines the classes\n (i.e. each subdirectory is a class).\n\n clip_sampler: Defines how clips should be sampled from each\n video. See the clip sampling documentation for more information.\n transform: This callable is evaluated on the clip output before\n the clip is returned. It can be used for user defined preprocessing and\n augmentations to the clips. See the ``LabeledVideoDataset`` class for clip\n output format.\n video_path_prefix: Path to root directory with the videos that are\n loaded in ``LabeledVideoDataset``. All the video paths before loading\n are prefixed with this path.\n decode_audio: If True, also decode audio from video.\n decoder: Defines what type of decoder used to decode a video.\n decoder_args: Arguments to configure the decoder.\n\n Returns:\n The dataset instantiated.\n '
torch._C._log_api_usage_once('PYTORCHVIDEO.dataset.Kinetics')
return labeled_video_dataset(data_path, clip_sampler, transform, video_path_prefix, decode_audio, decoder, decoder_args)
|
def create_video_files_from_folder(folder: str, output_folder: str, output_filename: str='train.csv'):
'Create the csv files for the dataset.\n\n Args:\n folder: Path to the video folder.\n output_folder: Path to the frame folders.\n output_filename: Name of the output csv file.\n\n Raises:\n ImportError: If pandas is not installed.\n '
if (not _HAS_PD):
raise ImportError('pandas is required to use this function.')
folder = Path(folder)
output_file = (Path(output_folder) / output_filename)
classes = sorted((f.name for f in folder.iterdir() if f.is_dir()))
class_to_idx = {classes[i]: i for i in range(len(classes))}
data = [[f'{class_folder.name}/{video_file.name}', class_to_idx[class_folder.name]] for class_folder in sorted(folder.iterdir()) if class_folder.is_dir() for video_file in sorted(class_folder.iterdir()) if video_file.is_file()]
df = pandas.DataFrame(data, columns=['video', 'class'])
df.to_csv(output_file, sep=' ', header=None, index=None)
|
def create_frames_files_from_folder(folder: str, output_folder: str, output_filename: str='train.csv'):
'Create the dataset csv files for frame decoders.\n\n Args:\n folder: Path to the video folder.\n output_folder: Path to the frame folders.\n output_filename: Name of the output csv file.\n\n Raises:\n ImportError: If pandas is not installed.\n '
if (not _HAS_PD):
raise ImportError('pandas is required to use this function.')
folder = Path(folder)
output_file = (Path(output_folder) / output_filename)
classes = sorted((f.name for f in folder.iterdir() if f.is_dir()))
class_to_idx = {classes[i]: i for i in range(len(classes))}
data = [[f'{class_folder.name}/{video_folder.name}', class_to_idx[class_folder.name], get_raw_video_duration('', video_folder)] for class_folder in sorted(folder.iterdir()) if class_folder.is_dir() for video_folder in sorted(class_folder.iterdir()) if video_folder.is_dir()]
df = pandas.DataFrame(data, columns=['video', 'class', 'duration'])
df.to_csv(output_file, sep=' ', header=None, index=None)
|
class LabeledVideoDataset(Dataset):
"LabeledVideoDataset handles the storage, loading, decoding and clip sampling for a video dataset. It assumes\n each video is stored as either an encoded video (e.g. mp4, avi) or a frame video (e.g. a folder of jpg, or png)\n\n Args:\n labeled_video_paths: List containing\n video file paths and associated labels. If video paths are a folder\n it's interpreted as a frame video, otherwise it must be an encoded\n video.\n clip_sampler: Defines how clips should be sampled from each\n video.\n transform: This callable is evaluated on the clip output before\n the clip is returned. It can be used for user defined preprocessing and\n augmentations on the clips.\n decode_audio: If True, also decode audio from video.\n decoder: Defines what type of decoder used to decode a video.\n decoder_args: Arguments to configure the decoder.\n "
_MAX_CONSECUTIVE_FAILURES = 10
def __init__(self, labeled_video_paths: list[tuple[(str, (dict | None))]], clip_sampler: ClipSampler, transform: (Callable[([dict], Any)] | None)=None, decode_audio: bool=True, decoder: str='pyav', decoder_args: DictConfig={}) -> None:
self._decode_audio = decode_audio
self._transform = transform
self._clip_sampler = clip_sampler
self._labeled_videos = labeled_video_paths
self._decoder = decoder
self._decoder_args = decoder_args
self._database = None
self._loaded_video_label = None
self._loaded_clip = None
self._next_clip_start_time = 0.0
self.video_path_handler = VideoPathHandler()
def __len__(self):
'\n Returns:\n Number of videos in dataset.\n '
return len(self._labeled_videos)
def __getitem__(self, idx: int) -> dict:
"Retrieves the next clip based on the clip sampling strategy and video sampler.\n\n Returns:\n A dictionary with the following format.\n\n .. code-block:: text\n\n {\n 'input': <video_tensor>,\n 'label': <index_label>,\n 'video_label': <index_label>\n 'idx': <idx>,\n 'clip_index': <clip_index>,\n 'aug_index': <aug_index>,\n }\n "
for i_try in range(self._MAX_CONSECUTIVE_FAILURES):
try:
(video_path, info_dict) = self._labeled_videos[idx]
video = self.video_path_handler.video_from_path(video_path, decode_audio=self._decode_audio, decoder=self._decoder, num_frames=info_dict['num_frames'], **self._decoder_args)
self._loaded_video_label = (video, info_dict)
except Exception as e:
old_idx = idx
idx = random.randint(0, (len(self._labeled_videos) - 1))
warnings.warn('Failed to load video idx {} with error: {}; trial {}'.format(old_idx, e, i_try))
continue
sample_dicts = self._load_clips_recursively(video, info_dict, idx, i_try)
self._loaded_video_label[0].close()
self._loaded_video_label = None
self._next_clip_start_time = 0.0
self._clip_sampler.reset()
if (sample_dicts is None):
idx = random.randint(0, (len(self._labeled_videos) - 1))
continue
return sample_dicts
else:
raise RuntimeError(f'Failed to load video after {self._MAX_CONSECUTIVE_FAILURES} retries.')
def _load_clips_recursively(self, video: Any, info_dict: dict[(str, Any)], idx: int, i_try: int) -> ((Any | list[Any]) | None):
is_last_clip = False
is_first_clip = True
sample_dicts = []
while (not is_last_clip):
(clip_start, clip_end, clip_index, aug_index, is_last_clip) = self._clip_sampler(self._next_clip_start_time, video.duration, info_dict)
sample_dict = self._load_clip(video, clip_start, clip_end, clip_index, aug_index, info_dict, idx, i_try)
if (sample_dict is None):
return None
is_last_clip = (is_last_clip[(- 1)] if isinstance(is_last_clip, list) else is_last_clip)
if is_last_clip:
if is_first_clip:
return sample_dict
else:
if (type(sample_dict) is list):
sample_dicts.extend(sample_dict)
else:
sample_dicts.append(sample_dict)
return sample_dicts
elif (type(sample_dict) is list):
sample_dicts.extend(sample_dict)
else:
sample_dicts.append(sample_dict)
is_first_clip = False
def _load_clip(self, video: Any, clip_start: (float | list[float]), clip_end: (float | list[float]), clip_index: int, aug_index: int, info_dict: dict[(str, Any)], idx: int, i_try: int) -> (dict[(str, Any)] | None):
if isinstance(clip_start, list):
if (aug_index[0] == 0):
self._loaded_clip = {}
loaded_clip_list = []
for i in range(len(clip_start)):
clip_dict = video.get_clip(clip_start[i], clip_end[i])
if ((clip_dict is None) or (clip_dict['video'] is None)):
self._loaded_clip = None
break
loaded_clip_list.append(clip_dict)
if (self._loaded_clip is not None):
for key in loaded_clip_list[0].keys():
self._loaded_clip[key] = [x[key] for x in loaded_clip_list]
elif (aug_index == 0):
self._loaded_clip = video.get_clip(clip_start, clip_end)
self._next_clip_start_time = clip_end
video_is_null = ((self._loaded_clip is None) or (self._loaded_clip['video'] is None))
if video_is_null:
if video_is_null:
warnings.warn('Failed to load clip {} idx {}; trial {}'.format(video.name, idx, i_try))
return None
frames = self._loaded_clip['video']
audio_samples = self._loaded_clip['audio']
sample_dict = {'input': frames, 'video_name': video.name, 'idx': idx, 'clip_index': clip_index, 'aug_index': aug_index, **info_dict, **({'audio': audio_samples} if (audio_samples is not None) else {})}
if (self._transform is not None):
sample_dict = self._transform(sample_dict)
return sample_dict
|
def labeled_video_dataset(data_path: str, clip_sampler: ClipSampler, transform: (Callable[([dict[(str, Any)]], dict[(str, Any)])] | None)=None, video_path_prefix: str='', decode_audio: bool=True, decoder: str='pyav', decoder_args: DictConfig={}) -> LabeledVideoDataset:
'A helper function to create ``LabeledVideoDataset`` object for HMDB51, Ucf101 and Kinetics datasets.\n\n Args:\n data_path: Path to the data. The path type defines how the data\n should be read:\n\n * For a file path, the file is read and each line is parsed into a\n video path and label.\n * For a directory, the directory structure defines the classes\n (i.e. each subdirectory is a class).\n\n clip_sampler: Defines how clips should be sampled from each\n video. See the clip sampling documentation for more information.\n\n transform: This callable is evaluated on the clip output before\n the clip is returned. It can be used for user defined preprocessing and\n augmentations to the clips. See the ``LabeledVideoDataset`` class for clip\n output format.\n\n video_path_prefix: Path to root directory with the videos that are\n loaded in ``LabeledVideoDataset``. All the video paths before loading\n are prefixed with this path.\n\n decode_audio: If True, also decode audio from video.\n\n decoder: Defines what type of decoder used to decode a video.\n\n decoder_args: Arguments to configure the decoder.\n\n Returns:\n The dataset instantiated.\n '
labeled_video_paths = LabeledVideoPaths.from_path(data_path)
labeled_video_paths.path_prefix = video_path_prefix
dataset = LabeledVideoDataset(labeled_video_paths, clip_sampler, transform, decode_audio=decode_audio, decoder=decoder, decoder_args=decoder_args)
return dataset
|
class LabeledVideoPaths():
'LabeledVideoPaths contains pairs of video path and integer index label.\n\n Args:\n paths_and_labels: a list of tuples containing the video\n path and integer label.\n '
@classmethod
def from_path(cls, data_path: str) -> LabeledVideoPaths:
"Factory function that creates a LabeledVideoPaths object depending on the path type.\n\n - If it is a directory path it uses the LabeledVideoPaths.from_directory function.\n - If it's a file it uses the LabeledVideoPaths.from_csv file.\n Args:\n file_path: The path to the file to be read.\n\n Returns:\n The LabeledVideoPaths object.\n "
if g_pathmgr.isfile(data_path):
if (pathlib.Path(data_path).suffix == '.json'):
return LabeledVideoPaths.from_json(data_path)
return LabeledVideoPaths.from_csv(data_path)
elif g_pathmgr.isdir(data_path):
return LabeledVideoPaths.from_directory(data_path)
else:
raise FileNotFoundError(f'{data_path} not found.')
@classmethod
def from_csv(cls, file_path: str) -> LabeledVideoPaths:
'Factory function that creates a LabeledVideoPaths object by reading a file with the following format:\n\n <path> <integer_label>\n ...\n <path> <integer_label>\n\n Args:\n file_path: The path to the file to be read.\n\n Returns:\n The LabeledVideoPaths object.\n '
assert g_pathmgr.exists(file_path), f'{file_path} not found.'
video_paths_and_label = []
with g_pathmgr.open(file_path, 'r') as f:
for path_label in f.read().splitlines():
line_split = path_label.rsplit(None, 2)
if (len(line_split) == 1):
file_path = line_split[0]
label = (- 1)
num_frames = None
elif (len(line_split) == 2):
(file_path, label) = line_split
num_frames = None
else:
(file_path, label, num_frames) = line_split
num_frames = int(num_frames)
video_paths_and_label.append((file_path, int(label), num_frames))
assert (len(video_paths_and_label) > 0), f'Failed to load dataset from {file_path}.'
return cls(video_paths_and_label)
@classmethod
def from_json(cls, file_path: str) -> LabeledVideoPaths:
'Factory function that creates a LabeledVideoPaths object by reading a json file.\n\n Args:\n file_path: The path to the file to be read.\n\n Returns:\n The LabeledVideoPaths object.\n '
assert g_pathmgr.exists(file_path), f'{file_path} not found.'
video_paths_and_label = []
json_content = json.load(open(file_path))
annotation = json_content['annotation']
videos_id = sorted([x for x in annotation.keys()])
for video_id in videos_id:
label = annotation[video_id]['class']
num_frames = annotation[video_id]['num_frames']
video_path = f'{video_id}'
video_paths_and_label.append((video_path, int(label), int(num_frames)))
assert (len(video_paths_and_label) > 0), f'Failed to load dataset from {file_path}.'
return cls(video_paths_and_label)
@classmethod
def from_directory(cls, dir_path: str) -> LabeledVideoPaths:
"Factory function that creates a LabeledVideoPaths object by parsing the structure of the given\n directory's subdirectories into the classification labels. It expects the directory format to be the\n following: dir_path/<class_name>/<video_name>.mp4.\n\n Classes are indexed from 0 to the number of classes, alphabetically.\n\n E.g.\n dir_path/class_x/xxx.ext\n dir_path/class_x/xxy.ext\n dir_path/class_x/xxz.ext\n dir_path/class_y/123.ext\n dir_path/class_y/nsdf3.ext\n dir_path/class_y/asd932_.ext\n\n Would produce two classes labeled 0 and 1 with 3 videos paths associated with each.\n\n Args:\n dir_path: Root directory to the video class directories.\n\n Returns:\n The LabeledVideoPaths object.\n "
assert g_pathmgr.exists(dir_path), f'{dir_path} not found.'
classes = sorted((f.name for f in pathlib.Path(dir_path).iterdir() if f.is_dir()))
class_to_idx = {classes[i]: i for i in range(len(classes))}
video_paths_and_label = make_dataset(dir_path, class_to_idx, extensions=('mp4', 'avi'))
video_paths_and_label = [(video, label, None) for (video, label) in video_paths_and_label]
assert (len(video_paths_and_label) > 0), f'Failed to load dataset from {dir_path}.'
return cls(video_paths_and_label)
def __init__(self, paths_and_labels: list[tuple[(str, (int | None))]], path_prefix='') -> None:
self._paths_and_labels = paths_and_labels
self._path_prefix = path_prefix
@property
def path_prefix(self):
'The prefix to add to video paths.'
return self._path_prefix
@path_prefix.setter
def path_prefix(self, value):
self._path_prefix = value
def __getitem__(self, index: int) -> tuple[(str, int)]:
'\n Args:\n index: the path and label index.\n\n Returns:\n The path and label tuple for the given index.\n '
(path, label, num_frames) = self._paths_and_labels[index]
return (os.path.join(self._path_prefix, path), {'label': label, 'num_frames': num_frames})
def __len__(self) -> int:
'\n Returns:\n The number of video paths and label pairs.\n '
return len(self._paths_and_labels)
|
def load_features(features_dir: (str | Path), video_paths: List[(str | Path)], filename: str, video_zip_prefix: str='', as_tensor: bool=False) -> dict[(int, dict[(int, (np.ndarray | Tensor))])]:
'Load SoccerNet features.\n\n Args:\n features_dir: Directory or zip where the features are stored.\n video_paths: Path to the videos in directory.\n filename: Name of the files containing the features.\n video_zip_prefix: Video zip prefix if features stored in Zip.\n as_tensor: Whether to decode the features as tensor or numpy.\n\n Returns:\n The loaded features for each video and half.\n '
features_dir = Path(features_dir)
video_zip_prefix = Path(video_zip_prefix)
video_features = {video_index: {} for video_index in range(len(video_paths))}
from_zip = zipfile.is_zipfile(features_dir)
for (video_index, video_path) in enumerate(video_paths):
if from_zip:
with zipfile.ZipFile(features_dir, 'r') as z:
for half_index in range(1, 3):
with z.open(str(((video_zip_prefix / video_path) / f'{half_index}_{filename}.npy'))) as f:
video_features[video_index][half_index] = np.load(f)
if as_tensor:
video_features[video_index][half_index] = torch.from_numpy(video_features[video_index][half_index])
else:
for half_index in range(1, 3):
video_features[video_index][half_index] = np.load(((features_dir / video_path) / f'{half_index}_{filename}.npy'))
if as_tensor:
video_features[video_index][half_index] = torch.from_numpy(video_features[video_index][half_index])
return video_features
|
def save_features(dataset: Dataset, saving_path: (str | Path), features: dict[(int, dict[(int, Tensor)])], filename: str, make_zip: bool) -> None:
'Save the features, one file per half per match.\n\n Args:\n dataset: Dataset to save the features from.\n saving_path: Path to save the features.\n features: The features to save.\n filename: The filename for each file containing the stored features.\n make_zip: Store the features as a Zip file.\n '
saving_path = Path(saving_path)
for video_index in features:
video_path = (saving_path / str(dataset.get_video_metadata(video_index)['url_local']))
video_path.mkdir(exist_ok=True, parents=True)
for half_index in features[video_index]:
half_path = f'{half_index}_{filename}.npy'
np.save((video_path / half_path), features[video_index][half_index])
if make_zip:
shutil.make_archive(str(saving_path), 'zip', saving_path)
shutil.rmtree(saving_path)
return
|
def pca_features(features: dict[(int, dict[(int, np.ndarray)])], dim: int, standardize: bool=True, **kwargs):
'Apply PCA on the given SoccerNet features.\n\n Args:\n features: The features to apply PCA on.\n dim: The output dimension of the PCA.\n standardize: Whether to standardize features before PCA.\n\n Returns:\n The dimensionally reduced features.\n '
features_all = np.concatenate([features[video_index][half_index] for video_index in features for half_index in features[video_index]])
pca = PCA(n_components=dim, **kwargs)
if standardize:
features_all = ((features_all - np.mean(features_all, 0, keepdims=True)) / np.std(features_all, 0, keepdims=True))
features_all = pca.fit_transform(features_all)
features_pca = {video_index: {} for video_index in features}
slice_min: int = 0
for video_index in features:
for half_index in features[video_index]:
slice_max = (slice_min + features[video_index][half_index].shape[0])
features_pca[video_index][half_index] = features_all[slice_min:slice_max]
slice_min = slice_max
return features_pca
|
class SoccerNetPathHandler():
'Utility class that handles all deciphering and caching of video paths for encoded and frame videos.'
def __init__(self) -> None:
return
def video_from_path(self, decoder: DecoderType, video_path: str, half_path: str, duration: float, fps_video: int, fps: int, num_frames: int, **kwargs) -> Video:
'Retrieve a video from the specified path.\n\n Args:\n decoder: The decoder for the video.\n video_path: The path to the video.\n half_path: The path to the half.\n duration: The duration of the video.\n fps_video: The fps of the video.\n fps: The fps to extract frames.\n num_frames: The number of frames of the video.\n\n Returns:\n The video to decode.\n '
if (DecoderType(decoder) == DecoderType.FRAME):
from eztorch.datasets.decoders.frame_soccernet_video import FrameSoccerNetVideo
return FrameSoccerNetVideo(video_path=video_path, half_path=half_path, duration=duration, fps_video=fps_video, fps=fps, num_frames=num_frames, **kwargs)
elif (DecoderType(decoder) == DecoderType.DUMB):
from eztorch.datasets.decoders.dumb_soccernet_video import DumbSoccerNetVideo
return DumbSoccerNetVideo(video_path=video_path, half_path=half_path, duration=duration, fps_video=fps_video, fps=fps, num_frames=num_frames, **kwargs)
else:
raise NotImplementedError
|
class SoccerNetPaths():
'SoccerNetPaths contains dictionaries describing videos from SoccerNet.\n\n Args:\n annotations: A list of dictionaries describing the videos.\n path_prefix: Path prefix to add to video paths.\n task: The SoccerNet task.\n '
@classmethod
def from_path(cls, data_path: str, path_prefix: str='', task: SoccerNetTask=SoccerNetTask.ACTION) -> SoccerNetPaths:
'Factory function that creates a SoccerNetPaths object depending on the path type.\n\n Only supports json for now.\n\n Args:\n data_path: The path to the file to be read.\n path_prefix: Path prefix to add to video paths.\n task: The SoccerNet task.\n\n Returns:\n The SoccerNetPaths object.\n '
if g_pathmgr.isfile(data_path):
if (Path(data_path).suffix == '.json'):
return SoccerNetPaths.from_json(data_path, path_prefix, task)
raise NotImplementedError
elif g_pathmgr.isdir(data_path):
NotImplementedError
else:
raise FileNotFoundError(f'{data_path} not found.')
@classmethod
def from_json(cls, json_file: str, path_prefix: str='', task: SoccerNetTask=SoccerNetTask.ACTION) -> SoccerNetPaths:
'Factory function that creates a SoccerNetPaths object by parsing the structure of the given json file.\n It expects the json to be created from soccernet_utils jsons.\n\n Args:\n json_file: Root directory to the SoccerNet json.\n path_prefix: Path prefix to add to video paths.\n task: The SoccerNet task.\n\n Returns:\n The SoccerNetPaths object.\n '
assert g_pathmgr.exists(json_file), f'{json_file} not found.'
json_content = json.load(open(json_file))
paths_and_annotations = [{} for match in json_content]
i = 0
for match in json_content:
new_content = {}
new_content['halves'] = {}
for (key, content) in json_content[match].items():
if (key == 'halves'):
for (half, half_content) in content.items():
new_half_content = copy.copy(half_content)
new_half_content['annotations'] = [process_annotation(annotation, task) for annotation in new_half_content['annotations']]
new_half_content['duration'] = float(new_half_content['duration'])
new_half_content['fps'] = int(new_half_content['fps'])
new_half_content['num_frames'] = int(new_half_content['num_frames'])
new_content['halves'][half] = new_half_content
else:
new_content[key] = json_content[match][key]
paths_and_annotations[i] = new_content
i += 1
return cls(paths_and_annotations, path_prefix)
def __init__(self, annotations: list[dict[(str, Any)]], path_prefix: (str | Path)='') -> None:
self._annotations = annotations
self._path_prefix = Path(path_prefix)
self._serialize_annotations()
def _serialize_annotations(self):
'Serialize annotations for the dataset.'
self._video_paths = np.array([match_content['UrlLocal'] for match_content in self._annotations]).astype(np.string_)
self._halves_per_video = torch.tensor([len(match_content['halves']) for match_content in self._annotations], dtype=torch.uint8)
self._half_paths = np.array([half_content['UrlLocal'] for match_content in self._annotations for (_, half_content) in match_content['halves'].items()]).astype(np.string_)
self._half_ids = torch.tensor([int(half_id) for match_content in self._annotations for (half_id, _) in match_content['halves'].items()], dtype=torch.uint8)
self._num_frames_per_half = torch.tensor([half_content['num_frames'] for match_content in self._annotations for (_, half_content) in match_content['halves'].items()], dtype=torch.int32)
self._duration_per_half = torch.tensor([int(half_content['duration']) for match_content in self._annotations for (_, half_content) in match_content['halves'].items()])
self._fps_per_half = torch.tensor([int(half_content['fps']) for match_content in self._annotations for (_, half_content) in match_content['halves'].items()], dtype=torch.uint8)
if (not torch.all((self._fps_per_half == self._fps_per_half[0]))):
raise AttributeError('All videos should have same fps for now')
self._end_video_idx = self._halves_per_video.to(dtype=torch.int32).cumsum(dim=0)
self._start_video_idx = torch.roll(self._end_video_idx, 1)
self._start_video_idx[0] = 0
self._num_annotations_per_half = torch.tensor([len(half_content['annotations']) for match_content in self._annotations for (_, half_content) in match_content['halves'].items()], dtype=torch.int16)
self._end_annotation_half_idx = self._num_annotations_per_half.to(dtype=torch.int32).cumsum(dim=0)
self._start_annotation_half_idx = torch.roll(self._end_annotation_half_idx, 1)
self._start_annotation_half_idx[0] = 0
self._annotations_half = torch.tensor([((video_idx * 2) + half_idx) for (video_idx, match_content) in enumerate(self._annotations) for (half_idx, (_, half_content)) in enumerate(match_content['halves'].items()) for _ in range(len(half_content['annotations']))], dtype=torch.int32)
self._label_annotation_per_half = torch.tensor([annotation_content['label'] for match_content in self._annotations for (_, half_content) in match_content['halves'].items() for annotation_content in half_content['annotations']], dtype=torch.uint8)
self._position_annotation_per_half = torch.tensor([annotation_content['position'] for match_content in self._annotations for (_, half_content) in match_content['halves'].items() for annotation_content in half_content['annotations']])
self._team_per_half = torch.tensor([annotation_content['team'] for match_content in self._annotations for (_, half_content) in match_content['halves'].items() for annotation_content in half_content['annotations']], dtype=torch.uint8)
self._visibility_per_half = torch.tensor([annotation_content['visibility'] for match_content in self._annotations for (_, half_content) in match_content['halves'].items() for annotation_content in half_content['annotations']], dtype=torch.uint8)
self._video_idx_annotation_per_half = torch.tensor([video_idx for (video_idx, match_content) in enumerate(self._annotations) for (_, half_content) in match_content['halves'].items() for annotation_content in half_content['annotations']], dtype=torch.int32)
self._half_idx_annotation_per_half = torch.tensor([half_idx for match_content in self._annotations for (half_idx, (_, half_content)) in enumerate(match_content['halves'].items()) for annotation_content in half_content['annotations']], dtype=torch.int32)
self._cumsum_num_frames_per_half = self._num_frames_per_half.to(dtype=torch.int32).cumsum(dim=0)
self._prev_cumsum_num_frames_per_half = torch.roll(self._cumsum_num_frames_per_half, 1)
self._prev_cumsum_num_frames_per_half[0] = 0
self.set_fps(self._fps_per_half[0])
def get_half_metadata(self, video_index: int, half_index: int):
'Get the metadata of the specified half.\n\n Args:\n video_index: The video index.\n half_index: The half index.\n\n Returns:\n The metadata of the half.\n '
return self.__getitem__((video_index, half_index))
def get_video_metadata(self, video_index):
'Get the metadata of the specified video.\n\n Args:\n video_index: The video index.\n\n Returns:\n The metadata of the video.\n '
num_halves = self._halves_per_video[video_index]
halves_metadata = [self.get_half_metadata(video_index, half_index) for half_index in range(num_halves)]
video_metadata = {'video_path': (self._path_prefix / self._video_paths[video_index].decode()), 'url_local': self._video_paths[video_index].decode(), 'num_halves': num_halves}
for key in halves_metadata[0].keys():
if (key in video_metadata):
continue
video_metadata[key] = [half_metadata[key] for half_metadata in halves_metadata]
return video_metadata
def __getitem__(self, index: tuple[int]) -> dict[(str, Any)]:
'\n Args:\n index: The video index. Tuple containing the video index and the half index.\n\n Returns:\n The video annotation for the given index.\n '
(video_index, half_index) = index
start_video_idx = self._start_video_idx[video_index]
half_annotation_start_idx = self._start_annotation_half_idx[(start_video_idx + half_index)]
half_annotation_end_idx = self._end_annotation_half_idx[(start_video_idx + half_index)]
start_video_idx = self._start_video_idx[video_index]
return {'video_path': (self._path_prefix / self._video_paths[video_index].decode()), 'url_local': self._video_paths[video_index].decode(), 'half_path': (self._path_prefix / self._half_paths[(start_video_idx + half_index)].decode()), 'half_id': self._half_ids[(start_video_idx + half_index)].item(), 'duration': self._duration_per_half[(start_video_idx + half_index)].item(), 'num_frames': self._num_frames_per_half[(start_video_idx + half_index)].item(), 'num_frames_fps': self._num_frames_per_half_fps[(start_video_idx + half_index)].item(), 'annotations': {'label': self._label_annotation_per_half[half_annotation_start_idx:half_annotation_end_idx], 'position': self._position_annotation_per_half[half_annotation_start_idx:half_annotation_end_idx], 'team': self._team_per_half[half_annotation_start_idx:half_annotation_end_idx], 'visibility': self._visibility_per_half[half_annotation_start_idx:half_annotation_end_idx]}, 'start_video_idx': start_video_idx, 'half_idx': (start_video_idx + half_index)}
def set_fps(self, fps: int) -> None:
self._fps = fps
if (fps == self.fps_videos):
self._num_frames_per_half_fps = self._num_frames_per_half
self._cumsum_num_frames_per_half_fps = self._cumsum_num_frames_per_half
self._number_of_frames_fps = self.number_of_frames
self._prev_cumsum_num_frames_per_half_fps = self._prev_cumsum_num_frames_per_half
else:
self._num_frames_per_half_fps = (self._duration_per_half * fps)
self._cumsum_num_frames_per_half_fps = self._num_frames_per_half_fps.cumsum(dim=0)
self._number_of_frames_fps = int(self._cumsum_num_frames_per_half_fps.sum())
self._prev_cumsum_num_frames_per_half_fps = torch.roll(self._cumsum_num_frames_per_half_fps, 1)
self._prev_cumsum_num_frames_per_half_fps[0] = 0
@property
def fps_videos(self) -> int:
return int(self._fps_per_half[0])
@property
def fps(self) -> int:
return self._fps
@property
def num_frames_per_half_fps(self) -> torch.Tensor:
'Number of frames per half for decode fps.'
return self._num_frames_per_half_fps
@property
def cumsum_num_frames_per_half_fps(self) -> torch.Tensor:
'Number of cumulative frames per half for decode fps.'
return self._cumsum_num_frames_per_half_fps
@property
def prev_cumsum_num_frames_per_half_fps(self):
'Number of cumulative frames per half shifted to the right for decode fps.'
return self._prev_cumsum_num_frames_per_half_fps
@property
def number_of_frames_fps(self) -> int:
'Number of total frames in the dataset for decode fps.'
return self._number_of_frames_fps
@property
def cumsum_num_frames_per_half(self):
'Number of cumulative frames per half.'
return self._cumsum_num_frames_per_half
@property
def prev_cumsum_num_frames_per_half(self):
'Number of cumulative frames per half shifted to the right.'
return self._prev_cumsum_num_frames_per_half
@property
def number_of_frames(self) -> int:
'Number of total frames in the dataset.'
return int(self._num_frames_per_half.sum())
@property
def num_videos(self) -> int:
'Number of videos.'
return len(self._video_paths)
@property
def num_halves(self) -> int:
'Number of halves.'
return len(self._half_paths)
@property
def num_actions(self) -> int:
'Number of actions.'
return len(self._position_annotation_per_half)
@property
def path_prefix(self) -> Path:
'The prefix to add to video paths.'
return self._path_prefix
@path_prefix.setter
def path_prefix(self, value: (str | Path)):
self._path_prefix = Path(value)
@property
def global_rank(self):
'Global rank of the process.'
return get_global_rank()
@property
def worlf_size(self):
'World size, number of the processes.'
return get_world_size()
def __len__(self) -> int:
'\n Returns:\n The number of videos.\n '
return len(self._annotations)
|
def load_json(fpath: (str | Path)):
'Load a JSON file.\n\n Args:\n fpath: Path to the JSON file.\n\n Returns:\n The JSON content.\n '
with open(fpath) as fp:
return json.load(fp)
|
def parse_ground_truth(truth: dict) -> dict:
'Parse the ground truth labels.\n\n Args:\n truth: The JSON dataset content.\n\n Returns:\n The parsed labels.\n '
label_dict = defaultdict((lambda : defaultdict(list)))
for x in truth:
for e in x['events']:
label_dict[e['label']][x['video']].append(e['frame'])
return label_dict
|
def get_predictions(pred: dict, label: (str | None)=None) -> list:
'Get the label predictions.\n\n Args:\n pred: All the predictions.\n label: The label to look for.\n\n Returns:\n The predictions for the label.\n '
flat_pred = []
for x in pred:
for e in x['events']:
if ((label is None) or (e['label'] == label)):
flat_pred.append((x['video'], e['frame'], e['score']))
flat_pred.sort(key=(lambda x: x[(- 1)]), reverse=True)
return flat_pred
|
def compute_average_precision(pred: list, truth: np.array, tolerance: int=0, min_precision: int=0) -> float:
'Compute the average precision.\n\n Args:\n pred (list): The label predictions.\n truth (np.array): The truth labels.\n tolerance: The frame tolerance.\n min_precision: The minimum precision.\n\n Returns:\n The average precision.\n '
total = sum([len(x) for x in truth.values()])
recalled = set()
pc = []
_prev_score = 1
for (i, (video, frame, score)) in enumerate(pred, 1):
assert (score <= _prev_score)
_prev_score = score
gt_closest = None
for gt_frame in truth.get(video, []):
if ((video, gt_frame) in recalled):
continue
if ((gt_closest is None) or (abs((frame - gt_closest)) > abs((frame - gt_frame)))):
gt_closest = gt_frame
if ((gt_closest is not None) and (abs((frame - gt_closest)) <= tolerance)):
recalled.add((video, gt_closest))
p = (len(recalled) / i)
pc.append(p)
if (p < min_precision):
break
interp_pc = []
max_p = 0
for p in pc[::(- 1)]:
max_p = max(p, max_p)
interp_pc.append(max_p)
interp_pc.reverse()
return (sum(interp_pc) / total)
|
def compute_mAPs(truth: dict, pred: dict, tolerances: list[int]=[0, 1, 2, 4]):
'Compute the mAPs at different tolerances.\n\n Args:\n truth: The truth labels.\n pred: The label predictions.\n tolerances: The tolerances to compute the mAPs.\n\n Returns:\n The computed mAPs.\n '
assert ({v['video'] for v in truth} == {v['video'] for v in pred}), 'Video set mismatch!'
truth_by_label = parse_ground_truth(truth)
(fig, axes) = (None, None)
class_aps_for_tol = []
mAPs = []
for (i, tol) in enumerate(tolerances):
class_aps = []
for (j, (label, truth_for_label)) in enumerate(sorted(truth_by_label.items())):
ap = compute_average_precision(get_predictions(pred, label=label), truth_for_label, tolerance=tol, plot_ax=(axes[(j, i)] if (axes is not None) else None))
class_aps.append((label, ap))
mAP = np.mean([x[1] for x in class_aps])
mAPs.append(mAP)
class_aps.append(('mAP', mAP))
class_aps_for_tol.append(class_aps)
header = (['AP @ tol'] + tolerances)
rows = []
for (c, _) in class_aps_for_tol[0]:
row = [c]
for class_aps in class_aps_for_tol:
for (c2, val) in class_aps:
if (c2 == c):
row.append((val * 100))
rows.append(row)
return (mAPs, tolerances, header, rows)
|
def load_features(features_dir: (str | Path), video_paths: List[(str | Path)], filename: str, video_zip_prefix: str='', as_tensor: bool=False) -> dict[(int, dict[(int, (np.ndarray | Tensor))])]:
'Load spot features.\n\n Args:\n features_dir: Directory or zip where the features are stored.\n video_paths: Path to the videos in directory.\n filename: Name of the files containing the features.\n video_zip_prefix: Video zip prefix if features stored in Zip.\n as_tensor: Whether to decode the features as tensor or numpy.\n\n Returns:\n The loaded features for each video.\n '
features_dir = Path(features_dir)
video_zip_prefix = Path(video_zip_prefix)
video_features = {video_index: {} for video_index in range(len(video_paths))}
from_zip = zipfile.is_zipfile(features_dir)
for (video_index, video_path) in enumerate(video_paths):
if from_zip:
with zipfile.ZipFile(features_dir, 'r') as z:
with z.open(str(((video_zip_prefix / video_path) / f'{filename}.npy'))) as f:
video_features[video_index] = np.load(f)
if as_tensor:
video_features[video_index] = torch.from_numpy(video_features[video_index])
else:
video_features[video_index] = np.load(((features_dir / video_path) / f'{filename}.npy'))
if as_tensor:
video_features[video_index] = torch.from_numpy(video_features[video_index])
return video_features
|
def save_features(dataset: Dataset, saving_path: (str | Path), features: dict[(int, dict[(int, Tensor)])], filename: str, make_zip: bool) -> None:
'Save the features, one file per video.\n\n Args:\n dataset: Dataset to save the features from.\n saving_path: Path to save the features.\n features: The features to save.\n filename: The filename for each file containing the stored features.\n make_zip: Store the features as a Zip file.\n '
saving_path = Path(saving_path)
for video_index in features:
video_path = (saving_path / str(dataset.get_video_metadata(video_index)['video_name']))
video_path.mkdir(exist_ok=True, parents=True)
half_path = f'{filename}.npy'
np.save((video_path / half_path), features[video_index])
if make_zip:
shutil.make_archive(str(saving_path), 'zip', saving_path)
shutil.rmtree(saving_path)
return
|
def pca_features(features: dict[(int, dict[(int, np.ndarray)])], dim: int, standardize: bool=True, **kwargs):
'Apply PCA on the given Spot features.\n\n Args:\n features: The features to apply PCA on.\n dim: The output dimension of the PCA.\n standardize: Whether to standardize features before PCA.\n\n Returns:\n The dimensionally reduced features.\n '
features_all = np.concatenate([features[video_index] for video_index in features])
pca = PCA(n_components=dim, **kwargs)
if standardize:
features_all = ((features_all - np.mean(features_all, 0, keepdims=True)) / (np.std(features_all, 0, keepdims=True) + 1e-06))
features_all = pca.fit_transform(features_all)
features_pca = {}
slice_min: int = 0
for video_index in features:
slice_max = (slice_min + features[video_index].shape[0])
features_pca[video_index] = features_all[slice_min:slice_max]
slice_min = slice_max
return features_pca
|
class SpotDatasets(Enum):
TENNIS = 'tennis'
FS_COMP = 'fs_comp'
FS_PERF = 'fs_perf'
|
def initialize_predictions(dataset: Dataset, max_video_index: int, min_video_index: int, device: str='cpu') -> Dict[(int, Dict[(int, Tensor)])]:
'Initialize predictions for videos that have indexes between [min_video_index, max_video_index].\n\n Args:\n dataset: The dataset that contains the videos.\n max_video_index: Max video index to keep.\n min_video_index: Min video index to keep.\n device: The device to store predictions.\n\n Returns:\n The initialized predictions.\n '
predictions = {video_idx: torch.zeros((dataset._annotated_videos[video_idx]['num_frames'], dataset.num_classes), device=device) for video_idx in range(min_video_index, (max_video_index + 1))}
return predictions
|
def aggregate_and_filter_clips(class_preds: Tensor, frames: Tensor, num_frames: Tensor, video_indexes: Tensor, max_video_index: Tensor, min_video_index: Tensor) -> (Tuple[Tensor] | None):
'Aggregate and filter only clips that have indexes between [min_video_index, max_video_index]. If none have\n been kept, returns None.\n\n Args:\n class_preds: Predictions to add.\n frames: Frames of the predictions.\n num_frames: Number of frames for the video.\n video_indexes: Indexes of the videos.\n max_video_index: Max video index to keep.\n min_video_index: Min video index to keep.\n\n Returns:\n The filtered tensors or None.\n '
class_preds = concat_all_gather_without_backprop(class_preds)
frames = concat_all_gather_without_backprop(frames.contiguous())
num_frames = concat_all_gather_without_backprop(num_frames.contiguous())
video_indexes = concat_all_gather_without_backprop(video_indexes.contiguous())
shard_preds = torch.logical_and((video_indexes <= max_video_index), (video_indexes >= min_video_index))
if (not torch.any(shard_preds)):
return
shard_preds = torch.nonzero(shard_preds, as_tuple=True)
class_preds = class_preds[shard_preds]
frames = frames[shard_preds]
num_frames = num_frames[shard_preds]
video_indexes = video_indexes[shard_preds]
num_frames = num_frames.cpu()
video_indexes = video_indexes.cpu()
return (class_preds, frames, num_frames, video_indexes)
|
def add_clip_prediction(predictions: Dict[(int, Dict[(int, Tensor)])], class_preds: Tensor, frames: Tensor, video_index: int, merge_predictions_type: str='max') -> None:
'Add the given predictions of classes of the particular timestamps of a video to the stored predictions.\n\n Args:\n predictions: Current predictions of the halves stored in a dictionary.\n class_preds: Predictions to add.\n frames: Timestamp indexes to update.\n video_index: Index of the video.\n merge_predictions_type: Strategy to merge the predictions at same place.\n '
prev_class_preds = predictions[video_index][frames]
class_preds = class_preds.to(dtype=prev_class_preds.dtype)
if (merge_predictions_type == 'max'):
replace_predictions = torch.gt(class_preds, prev_class_preds)
predictions[video_index][frames] = torch.where(replace_predictions, class_preds, prev_class_preds)
elif (merge_predictions_type == 'average'):
average_predictions = torch.gt(prev_class_preds, 0)
predictions[video_index][frames] = torch.where(average_predictions, torch.mean(torch.stack((class_preds, prev_class_preds)), 0), class_preds)
return
|
def add_clips_predictions(predictions: Dict[(int, Dict[(int, Tensor)])], class_preds: Tensor, frames: Tensor, num_frames: Tensor, video_indexes: Tensor, remove_frames_predictions: (int | Tensor)=0, merge_predictions_type: str='max') -> None:
'Add the given predictions of classes of the particular timestamps of the batch to the stored predictions.\n\n Args:\n predictions: Current predictions of the halves stored in a dictionary.\n class_preds: Predictions to add.\n frames: Frames of the predictions.\n num_frames: Number of frames for the half.\n video_indexes: Indexes of the videos.\n remove_frames_predictions: Do not keep first frames and last frames of the predictions.\n merge_predictions_type: Strategy to merge the predictions at same place.\n '
(b, t, c) = class_preds.shape
for i in range(b):
video_class_preds = class_preds[i]
video_frames = frames[i]
video_num_frames = int(num_frames[i])
video_index = int(video_indexes[i])
if ((remove_frames_predictions > 0) and (video_frames.shape[0] > 1)):
remove_before = torch.logical_and(((video_frames - video_frames[0]) < remove_frames_predictions), (video_frames[0] > remove_frames_predictions))
remove_after = torch.logical_and(((video_frames[(- 1)] - video_frames) < remove_frames_predictions), ((video_num_frames - video_frames) > remove_frames_predictions))
keep_predictions = torch.logical_not(torch.logical_or(remove_before, remove_after))
keep_predictions = keep_predictions.nonzero(as_tuple=True)
video_class_preds = video_class_preds[keep_predictions]
video_frames: Tensor = video_frames[keep_predictions]
add_clip_prediction(predictions, video_class_preds, video_frames, video_index, merge_predictions_type)
|
def postprocess_spotting_video_predictions(predictions: Tensor, NMS_args: Dict[(Any, Any)], dataset: SpotDatasets=SpotDatasets.TENNIS) -> List[Dict[(str, Any)]]:
'Postprocess the half predictions for action spotting.\n\n Args:\n predictions: The half predictions.\n half_id: The id of the half.\n step_timestamp: Step between each timestamps, used for aggregating predictions in NMS.\n NMS_args: Arguments to configure the `perform_all_classes_NMS` function.\n\n Returns:\n The predictions of the half in the correct format.\n '
(kept_predictions_per_class, kept_frames_per_class) = perform_all_classes_NMS(predictions, 1, **NMS_args)
kept_predictions_per_class = [t.cpu() for t in kept_predictions_per_class]
kept_frames_per_class = [t.cpu() for t in kept_frames_per_class]
reverse_labels = REVERSE_LABELS_SPOT_DATASETS[dataset]
video_predictions = [{'label': reverse_labels[c], 'frame': int(frame_c), 'score': float(prediction_c)} for (c, (kept_predictions_c, kept_frames_c)) in enumerate(zip(kept_predictions_per_class, kept_frames_per_class)) for (prediction_c, frame_c) in zip(kept_predictions_c, kept_frames_c)]
return video_predictions
|
def save_spotting_predictions(predictions: Dict[(str, Any)], saving_path: (str | Path), dataset: Dataset, NMS_args: Dict[(Any, Any)]) -> None:
'Save the predictions for spotting.\n\n Args:\n predictions: The predictions to save as a dictionary following this format:\n ```\n predictions = {\n <video_name>: torch.tensor(preds),\n ...\n }\n ```\n saving_path: Path to the saving directory.\n dataset: Dataset from which to retrieve metadata.\n NMS_args: Arguments to configure the `perform_all_classes_NMS` function.\n '
saving_path = Path(saving_path)
json_output = []
for video_index in predictions:
predictions_json = {}
video_metadata = dataset.get_video_metadata(video_index)
predictions_json['video'] = video_metadata['video_name']
predictions_video = postprocess_spotting_video_predictions(predictions[video_index], NMS_args, dataset.dataset)
predictions_video.sort(key=(lambda x: int(x['frame'])))
predictions_json['events'] = predictions_video
json_output.append(predictions_json)
saving_path.mkdir(exist_ok=True, parents=True)
json.dumps(json_output, indent=2)
with open((saving_path / 'predictions.json'), 'w+') as out:
json.dump(json_output, out)
return
|
def save_raw_spotting_predictions(predictions: Dict[(str, Any)], saving_path: (str | Path), make_zip: bool=True) -> None:
'Save the raw predictions for spotting.\n\n Args:\n predictions: The predictions to save.\n saving_path: Path to the saving directory.\n make_zip: Whether to make a zip of the predictions.\n '
saving_path = Path(saving_path)
saving_path.mkdir(exist_ok=True, parents=True)
for video_index in predictions:
pred_path = (saving_path / f'preds_video{video_index}.pth')
torch.save(predictions[video_index].cpu(), pred_path)
if make_zip:
shutil.make_archive(str(saving_path), 'zip', saving_path)
shutil.rmtree(saving_path)
return
|
def load_raw_spotting_predictions(saved_path: (Path | str), video_indexes: List[int], device: Any='cpu') -> Dict[(int, Dict[(int, Tensor)])]:
'Load the raw predictions for spotting.\n\n Args:\n saved_path: Where the predictions are saved.\n video_indexes: Indexes of the video to load.\n device: Device to load the predictions.\n '
predictions = {video_index: None for video_index in video_indexes}
saved_path = Path(saved_path)
from_zip = zipfile.is_zipfile(saved_path)
if from_zip:
with zipfile.ZipFile(saved_path, 'r') as z:
for video_index in video_indexes:
with z.open(f'preds_video{video_index}.pth') as f:
predictions[video_index] = torch.load(f, map_location='cpu').to(device=device)
else:
for video_index in video_indexes:
predictions[video_index] = torch.load(str((saved_path / f'preds_video{video_index}.pth')), map_location='cpu').to(device=device)
return predictions
|
def merge_predictions(saving_path: (str | Path), saved_paths: List[(str | Path)], video_indexes: List[int], kind_merge: str='average', device: Any='cpu', make_zip: bool=True):
"Merge several predictions for spotting.\n\n Args:\n saving_path: The path to save the predictions.\n saved_paths: Paths to the saved predictions.\n video_indexes: Indexes of the predictions' videos.\n kind_merge: How to merge the predictions.\n device: Device to merge the features.\n make_zip: Whether to make a zip of the merged predictions.\n "
loaded_predictions = [load_raw_spotting_predictions(Path(saved_path), video_indexes, device) for saved_path in saved_paths]
if (kind_merge == 'average'):
fn_merge = torch.mean
idx_keep = None
elif (kind_merge == 'max'):
fn_merge = torch.max
idx_keep = 0
elif (kind_merge == 'min'):
fn_merge = torch.min
idx_keep = 0
else:
raise NotImplementedError(f'{kind_merge} not defined.')
merged_predictions = {}
for video_idx in loaded_predictions[0]:
merged_prediction = torch.stack([loaded_prediction[video_idx] for loaded_prediction in loaded_predictions])
merged_prediction = fn_merge(merged_prediction, dim=0)
if (idx_keep is not None):
merged_prediction = merged_prediction[idx_keep]
merged_predictions[video_idx] = merged_prediction
save_raw_spotting_predictions(merged_predictions, saving_path, make_zip)
return
|
class SpotPathHandler():
'Utility class that handles all deciphering and caching of video paths for encoded and frame videos.'
def __init__(self) -> None:
return
def video_from_path(self, decoder: DecoderType, video_path: str, num_frames: int, **kwargs) -> Video:
'Retrieve a video from the specified path.\n\n Args:\n decoder: The decoder for the video.\n video_path: The path to the video.\n num_frames: The number of frames of the video.\n\n Returns:\n The video to decode.\n '
if (DecoderType(decoder) == DecoderType.FRAME):
from eztorch.datasets.decoders.frame_spot_video import FrameSpotVideo
return FrameSpotVideo(video_path=video_path, num_frames=num_frames, **kwargs)
elif (DecoderType(decoder) == DecoderType.DUMB):
from eztorch.datasets.decoders.dumb_spot_video import DumbSpotVideo
return DumbSpotVideo(video_path=video_path, num_frames=num_frames, **kwargs)
else:
raise NotImplementedError
|
def process_event(events: dict[(str, Any)], labels_dictionary: dict[(str, int)]):
'Process event from spots dictionary.\n\n Args:\n annotation: The annotation to process.\n labels_dictionary: Labels actions to int.\n\n Returns:\n The processed annotation.\n '
new_annotation = {}
for (element, value) in events.items():
if (element == 'label'):
new_annotation[element] = labels_dictionary[value]
elif (element == 'frame'):
new_annotation[element] = int(value)
elif (element == 'comment'):
new_annotation[element] = value
else:
new_annotation[element] = value
return new_annotation
|
class SpotPaths():
'SpotPaths contains dictionaries describing videos from SoccerNet.\n\n Args:\n annotations: A list of dictionaries describing the videos.\n path_prefix: Path prefix to add to video paths.\n '
@classmethod
def from_path(cls, data_path: str, path_prefix: str='', dataset: SpotDatasets=SpotDatasets.TENNIS) -> SpotPaths:
'Factory function that creates a SpotPaths object depending on the path type.\n\n Only supports json for now.\n\n Args:\n data_path: The path to the file to be read.\n path_prefix: Path prefix to add to video paths.\n\n Returns:\n The SpotPaths object.\n '
if g_pathmgr.isfile(data_path):
if (Path(data_path).suffix == '.json'):
return SpotPaths.from_json(data_path, path_prefix)
raise NotImplementedError
elif g_pathmgr.isdir(data_path):
NotImplementedError
else:
raise FileNotFoundError(f'{data_path} not found.')
@classmethod
def from_json(cls, json_file: str, path_prefix: str='', dataset: SpotDatasets=SpotDatasets.TENNIS) -> SpotPaths:
'Factory function that creates a SpotPaths object by parsing the structure of the given json file. It\n expects the json to be created from soccernet_utils jsons.\n\n Args:\n json_file: Root directory to the SoccerNet json.\n path_prefix: Path prefix to add to video paths.\n\n Returns:\n The SpotPaths object.\n '
assert g_pathmgr.exists(json_file), f'{json_file} not found.'
json_content = json.load(open(json_file))
paths_and_annotations = [{} for _ in json_content]
i = 0
for content in json_content:
new_content = copy.copy(content)
new_content['events'] = [process_event(event, LABELS_SPOT_DATASETS[SpotDatasets(dataset)]) for event in new_content['events']]
new_content['num_frames'] = int(new_content['num_frames'])
new_content['height'] = int(new_content['height'])
new_content['width'] = int(new_content['width'])
new_content['num_events'] = int(new_content['num_events'])
paths_and_annotations[i] = new_content
i += 1
return cls(paths_and_annotations, path_prefix)
def __init__(self, annotations: list[dict[(str, Any)]], path_prefix: (str | Path)='') -> None:
self._annotations = annotations
self._path_prefix = Path(path_prefix)
self._serialize_annotations()
def _serialize_annotations(self):
'Serialize annotations for the dataset.'
self._video_paths = np.array([video_content['video'] for video_content in self._annotations]).astype(np.string_)
self._num_frames_per_video = torch.tensor([video_content['num_frames'] for video_content in self._annotations], dtype=torch.int32)
self._cum_num_frames_per_video = self._num_frames_per_video.cumsum(0)
self._fps_per_video = torch.tensor([int(video_content['fps']) for video_content in self._annotations], dtype=torch.uint8)
self._height_per_video = torch.tensor([int(video_content['height']) for video_content in self._annotations], dtype=torch.int32)
self._width_per_video = torch.tensor([int(video_content['width']) for video_content in self._annotations], dtype=torch.int32)
self._num_events_per_video = torch.tensor([int(video_content['num_events']) for video_content in self._annotations], dtype=torch.int32)
self._num_events_per_video = torch.tensor([int(video_content['num_events']) for video_content in self._annotations], dtype=torch.int16)
self._end_event_video_idx = self._num_events_per_video.to(dtype=torch.int32).cumsum(dim=0)
self._start_event_video_idx = torch.roll(self._end_event_video_idx, 1)
self._start_event_video_idx[0] = 0
self._events_video = torch.tensor([video_idx for (video_idx, video_content) in enumerate(self._annotations) for _ in range(len(video_content['events']))], dtype=torch.int32)
self._label_event_per_video = torch.tensor([event_content['label'] for video_content in self._annotations for event_content in video_content['events']], dtype=torch.uint8)
self._frame_event_per_video = torch.tensor([event_content['frame'] for video_content in self._annotations for event_content in video_content['events']])
self._comment_event_per_video = np.array([event_content['comment'] for video_content in self._annotations for event_content in video_content['events']]).astype(np.string_)
def __getitem__(self, index: int) -> dict[(str, Any)]:
'\n Args:\n index: The video index.\n\n Returns:\n The video annotation for the given index.\n '
video_event_start_idx = self._start_event_video_idx[index]
video_event_end_idx = self._end_event_video_idx[index]
return {'video_path': (self._path_prefix / self._video_paths[index].decode()), 'video_name': self._video_paths[index].decode(), 'num_frames': self._num_frames_per_video[index].item(), 'events': {'label': self._label_event_per_video[video_event_start_idx:video_event_end_idx], 'frame': self._frame_event_per_video[video_event_start_idx:video_event_end_idx], 'comment': self._comment_event_per_video[video_event_start_idx:video_event_end_idx]}}
@property
def num_frames_per_video(self) -> torch.Tensor:
'Number of frames per video for decode.'
return self._num_frames_per_video
@property
def cum_num_frames_per_video(self) -> torch.Tensor:
'Number of cumulative frames per video for decode.'
return self._cum_num_frames_per_video
@property
def number_of_frames(self) -> int:
'Number of total frames in the dataset.'
return int(self._num_frames_per_video.sum())
@property
def num_videos(self) -> int:
'Number of videos.'
return len(self._video_paths)
@property
def path_prefix(self) -> Path:
'The prefix to add to video paths.'
return self._path_prefix
@path_prefix.setter
def path_prefix(self, value: (str | Path)):
self._path_prefix = Path(value)
@property
def global_rank(self):
'Global rank of the process.'
return get_global_rank()
@property
def worlf_size(self):
'World size, number of the processes.'
return get_world_size()
def __len__(self) -> int:
'\n Returns:\n The number of videos.\n '
return len(self._annotations)
|
def create_ucf101_files_for_frames(folder_files: str, frames_folder: str):
'Create the UCF101 csv files for frame decoders.\n\n Args:\n folder_files: Path to the original ucf101 split files.\n frames_folder: Path to the frame folders.\n\n Raises:\n ImportError: If pandas is not installed.\n '
if (not _HAS_PD):
raise ImportError('pandas is required to use this function.')
classes = {}
def get_video_class_index(video: str):
video = Path(video)
if (video.parent.name not in classes):
classes[video.parent.name] = len(classes)
return classes[video.parent.name]
for file in _UCF101_FILES:
file = (Path(folder_files) / file)
data = pandas.read_csv(file, sep=' ', header=None, names=['video', 'label'])
frames_folder = Path(frames_folder)
data['label'] = data.video.map(get_video_class_index)
data.video = data.video.map(remove_suffix)
data['duration'] = data.video.map(partial(get_raw_video_duration, frames_folder))
data.to_csv((frames_folder / file.name), sep=' ', header=None, index=None)
|
class Ucf101LabeledVideoPaths():
'Pre-processor for Ucf101 dataset mentioned here - https://www.crcv.ucf.edu/data/UCF101.php.\n\n This dataset consists of classwise folds with each class consisting of 3\n folds (splits).\n\n The videos directory is of the format,\n video_dir_path/class_x/<somevideo_name>.avi\n ...\n video_dir_path/class_y/<somevideo_name>.avi\n\n The splits/fold directory is of the format,\n folds_dir_path/classInd.txt\n folds_dir_path/testlist01.txt\n folds_dir_path/testlist02.txt\n folds_dir_path/testlist03.txt\n folds_dir_path/trainlist01.txt\n folds_dir_path/trainlist02.txt\n folds_dir_path/trainlist03.txt\n\n Args:\n paths_and_labels: a list of tuples containing the video\n path and integer label.\n '
_allowed_splits = [1, 2, 3]
@classmethod
def from_path(cls, data_path: str, split_id: int=1, split_type: str='train', frames: bool=False) -> Ucf101LabeledVideoPaths:
"Factory function that creates a LabeledVideoPaths object depending on the path type.\n\n - If it is a directory path it uses the LabeledVideoPaths.from_directory function.\n - If it's a file it uses the LabeledVideoPaths.from_csv file.\n Args:\n file_path: The path to the file or directory to be read.\n split_id: Split id. Used if path is a directory.\n split_type: Split type. Used if path is a directory.\n frames: If ``True``, UCF101 is loaded as a frame dataset.\n Returns:\n The Ucf101LabeledVideoPaths object.\n "
if g_pathmgr.isfile(data_path):
if (pathlib.Path(data_path).suffix == '.json'):
return Ucf101LabeledVideoPaths.from_json(data_path)
return Ucf101LabeledVideoPaths.from_csv(data_path)
elif g_pathmgr.isdir(data_path):
return Ucf101LabeledVideoPaths.from_directory(data_path, split_id, split_type, frames)
else:
raise FileNotFoundError(f'{data_path} not found.')
@classmethod
def from_csv(cls, file_path: str) -> Ucf101LabeledVideoPaths:
'Factory function that creates a LabeledVideoPaths object by reading a file with the following format:\n\n <path> <integer_label>\n ...\n <path> <integer_label>\n\n Args:\n file_path: The path to the file to be read.\n Returns:\n The Ucf101LabeledVideoPaths object.\n '
assert g_pathmgr.exists(file_path), f'{file_path} not found.'
video_paths_and_label = []
with g_pathmgr.open(file_path, 'r') as f:
for path_label in f.read().splitlines():
line_split = path_label.rsplit(None, 2)
if (len(line_split) == 1):
file_path = line_split[0]
label = (- 1)
num_frames = None
elif (len(line_split) == 2):
(file_path, label) = line_split
num_frames = None
else:
(file_path, label, num_frames) = line_split
num_frames = int(num_frames)
video_paths_and_label.append((file_path, int(label), num_frames))
assert (len(video_paths_and_label) > 0), f'Failed to load dataset from {file_path}.'
return cls(video_paths_and_label)
@classmethod
def from_json(cls, file_path: str) -> Ucf101LabeledVideoPaths:
'Factory function that creates a LabeledVideoPaths object by reading a json file.\n\n Args:\n file_path: The path to the file to be read.\n Returns:\n The Ucf101LabeledVideoPaths object.\n '
assert g_pathmgr.exists(file_path), f'{file_path} not found.'
video_paths_and_label = []
json_content = json.load(open(file_path))
annotation = json_content['annotation']
videos_id = sorted([x for x in annotation.keys()])
for video_id in videos_id:
label = annotation[video_id]['class']
num_frames = (annotation[video_id]['num_frames'] if ('num_frames' in annotation['video_id']) else None)
video_path = f'{video_id}'
video_paths_and_label.append((video_path, int(label), int(num_frames)))
assert (len(video_paths_and_label) > 0), f'Failed to load dataset from {file_path}.'
return cls(video_paths_and_label)
@classmethod
def from_directory(cls, data_path: str, split_id: int=1, split_type: str='train', frames: bool=False) -> Ucf101LabeledVideoPaths:
"Factory function that creates Ucf101LabeledVideoPaths object form a splits/folds directory.\n\n Args:\n data_path: The path to the splits/folds directory of UCF-101.\n split_id: Fold id to be loaded. Options are: :math:`1`, :math:`2`, :math:`3`.\n split_type: Split/Fold type to be loaded. Options are: ``'train'``, ``'test'``.\n frames: If ``True``, UCF101 is loaded as a frame dataset.\n\n Returns:\n The Ucf101LabeledVideoPaths object.\n "
data_path = pathlib.Path(data_path)
if (not data_path.is_dir()):
raise RuntimeError(f'{data_path} not found or is not a directory.')
if (not (int(split_id) in cls._allowed_splits)):
raise RuntimeError(f"{split_id} not found in allowed split id's {cls._allowed_splits}.")
file_name = (data_path / f'{split_type}list0{split_id}.txt')
label_file_name = (data_path / 'classInd.txt')
return cls.from_csvs(file_name, label_file_name, frames)
@classmethod
def from_csvs(cls, file_path: (pathlib.Path | str), label_file_name: (pathlib.Path | str), frames: bool=False) -> Ucf101LabeledVideoPaths:
'Factory function that creates Ucf101LabeledVideoPaths object form a list of split files of .txt type.\n\n Args:\n file_paths : The path to the splits/folds\n directory of UCF-101.\n split_type: Split/Fold type to be loaded.\n - "train"\n - "test"\n\n Returns:\n The Ucf101LabeledVideoPaths object.\n '
label_file_path = pathlib.Path(label_file_name)
assert g_pathmgr.exists(label_file_path), f'{label_file_path} not found.'
if (not ((label_file_path.suffix == '.txt') and (label_file_path.stem == 'classInd'))):
raise RuntimeError(f'Invalid file: {file_path}')
class_dict = {}
with g_pathmgr.open(label_file_path, 'r') as f:
for line in f.read().splitlines():
line_split = line.rsplit(None, 1)
class_dict[line_split[1]] = (int(line_split[0]) - 1)
video_paths_and_label = []
file_path = pathlib.Path(file_path)
assert g_pathmgr.exists(file_path), f'{file_path} not found.'
if (not (file_path.suffix == '.txt')):
raise RuntimeError(f'Invalid file: {file_path}')
with g_pathmgr.open(file_path, 'r') as f:
for line in f.read().splitlines():
line_split = line.rsplit(None, 2)
video_name = pathlib.Path(line_split[0])
if frames:
video_path = ((pathlib.Path(file_path.parent) / video_name.parent) / video_name.stem)
num_frames = len(list(video_path.iterdir()))
else:
video_path = (pathlib.Path(file_path.parent) / video_name)
num_frames = None
video_label = class_dict[str(video_path.parent.stem)]
video_paths_and_label.append((video_path, video_label, num_frames))
assert (len(video_paths_and_label) > 0), f'Failed to load dataset from {file_path}.'
return cls(video_paths_and_label)
def __init__(self, paths_and_labels: list[tuple[(str, (dict | None))]], path_prefix='') -> None:
self._paths_and_labels = paths_and_labels
self._path_prefix = path_prefix
def path_prefix(self, prefix):
self._path_prefix = prefix
path_prefix = property(None, path_prefix)
def __getitem__(self, index: int) -> tuple[(str, dict)]:
'\n Args:\n index: the path and label index.\n\n Returns:\n The path and label tuple for the given index.\n '
(path, label, num_frames) = self._paths_and_labels[index]
return (os.path.join(self._path_prefix, path), {'label': label, 'num_frames': num_frames})
def __len__(self) -> int:
'\n Returns:\n The number of video paths and label pairs.\n '
return len(self._paths_and_labels)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.