| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| """EPIC-KITCHENS-100 is a large-scale dataset in first-person (egocentric) vision; multi-faceted, audio-visual, |
| non-scripted recordings in native environments - i.e. the wearers' homes, capturing all daily activities |
| in the kitchen over multiple days. Annotations are collected using a novel 'Pause-and-Talk' narration interface. |
| |
| EPIC-KITCHENS-100 is an extension of the EPIC-KITCHENS dataset released in 2018, to 100 hours of footage.""" |
|
|
| import os |
| import csv |
| import datasets |
|
|
|
|
| _CITATION = """ |
| @ARTICLE{Damen2021RESCALING, |
| title={Rescaling Egocentric Vision: Collection, Pipeline and Challenges for EPIC-KITCHENS-100}, |
| author={Damen, Dima and Doughty, Hazel and Farinella, Giovanni Maria and and Furnari, Antonino |
| and Ma, Jian and Kazakos, Evangelos and Moltisanti, Davide and Munro, Jonathan |
| and Perrett, Toby and Price, Will and Wray, Michael}, |
| journal = {International Journal of Computer Vision (IJCV)}, |
| year = {2021}, |
| Url = {https://doi.org/10.1007/s11263-021-01531-2} |
| } |
| @INPROCEEDINGS{Damen2018EPICKITCHENS, |
| title={Scaling Egocentric Vision: The EPIC-KITCHENS Dataset}, |
| author={Damen, Dima and Doughty, Hazel and Farinella, Giovanni Maria and Fidler, Sanja and |
| Furnari, Antonino and Kazakos, Evangelos and Moltisanti, Davide and Munro, Jonathan |
| and Perrett, Toby and Price, Will and Wray, Michael}, |
| booktitle={European Conference on Computer Vision (ECCV)}, |
| year={2018} |
| } |
| """ |
|
|
| _DESCRIPTION = """\ |
| EPIC-KITCHENS-100 is a large-scale dataset in first-person (egocentric) vision; multi-faceted, audio-visual, |
| non-scripted recordings in native environments - i.e. the wearers' homes, capturing all daily activities |
| in the kitchen over multiple days. Annotations are collected using a novel 'Pause-and-Talk' narration interface. |
| |
| EPIC-KITCHENS-100 is an extension of the EPIC-KITCHENS dataset released in 2018, to 100 hours of footage. |
| """ |
|
|
| _HOMEPAGE = "https://epic-kitchens.github.io/2022" |
|
|
| _LICENSE = "CC BY-NC 4.0" |
|
|
| _URL_BASE = "https://raw.githubusercontent.com/epic-kitchens/epic-kitchens-100-annotations/master/" |
|
|
|
|
| _VARIANTS = [ |
| "action_recognition", |
| "multi_instance_retrieval", |
| "unsupervised_domain_adaptation", |
| ] |
| class EpicKitchens100(datasets.GeneratorBasedBuilder): |
| """Epic Kitchens""" |
|
|
| BUILDER_CONFIGS = [datasets.BuilderConfig(name) for name in _VARIANTS] |
| DEFAULT_CONFIG_NAME = "action_recognition" |
|
|
| def _info(self): |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=datasets.Features( |
| { |
| "extended": datasets.Value("bool"), |
| "narration_id": datasets.Value("string"), |
| "participant_id": datasets.Value("string"), |
| "video_id": datasets.Value("string"), |
| "path": datasets.Value("string"), |
| "narration_timestamp": datasets.Value("string"), |
| "start_timestamp": datasets.Value("string"), |
| "stop_timestamp": datasets.Value("string"), |
| "narration": datasets.Value("string"), |
| "verb": datasets.Value("string"), |
| "verb_class": datasets.Value("int32"), |
| |
| "noun": datasets.Value("string"), |
| "noun_class": datasets.Value("string"), |
| |
| "all_nouns": datasets.features.Sequence(datasets.Value("string")), |
| "all_noun_classes": datasets.features.Sequence(datasets.Value("int32")), |
| } |
| ), |
| supervised_keys=None, |
| homepage=_HOMEPAGE, |
| citation=_CITATION, |
| license=_LICENSE |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| urls = { |
| "action_recognition": { |
| "train": os.path.join(_URL_BASE, "EPIC_100_train.csv"), |
| "validation": os.path.join(_URL_BASE, "EPIC_100_validation.csv"), |
| "test": os.path.join(_URL_BASE, "EPIC_100_test_timestamps.csv"), |
| }, |
| "multi_instance_retrieval": { |
| "train": os.path.join(_URL_BASE, "retrieval_annotations/EPIC_100_retrieval_train.csv"), |
| "test": os.path.join(_URL_BASE, "retrieval_annotations/EPIC_100_retrieval_test.csv") |
| }, |
| "unsupervised_domain_adaptation": { |
| "source_train": os.path.join(_URL_BASE, "UDA_annotations/EPIC_100_uda_source_train.csv"), |
| "target_train": os.path.join(_URL_BASE, "UDA_annotations/EPIC_100_uda_target_train_timestamps.csv"), |
| "source_test": os.path.join(_URL_BASE, "UDA_annotations/EPIC_100_uda_source_test_timestamps.csv"), |
| "target_test": os.path.join(_URL_BASE, "UDA_annotations/EPIC_100_uda_target_test_timestamps.csv"), |
| "source_val": os.path.join(_URL_BASE, "UDA_annotations/EPIC_100_uda_source_val.csv"), |
| "target_val": os.path.join(_URL_BASE, "UDA_annotations/EPIC_100_uda_target_val.csv"), |
| } |
| } |
| |
| files_path = dl_manager.download_and_extract(urls) |
|
|
| if self.config.name == "unsupervised_domain_adaptation": |
| splits = [ |
| datasets.SplitGenerator( |
| name=datasets.Split(n_), |
| gen_kwargs={ |
| "annotations": files_path[self.config.name][n_], |
| "split": n_, |
| }, |
| ) |
| for n_ in ["source_train", "target_train", "source_test", "target_test", "source_val", "target_val"] |
| ] |
| return splits |
| else: |
| splits = [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| gen_kwargs={ |
| "annotations": files_path[self.config.name]["train"], |
| "split": "train", |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| gen_kwargs={ |
| "annotations": files_path[self.config.name]["test"], |
| "split": "test", |
| }, |
| ), |
| ] |
| if self.config.name == "action_recognition": |
| splits.append( |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, |
| gen_kwargs={ |
| "annotations": files_path[self.config.name]["validation"], |
| "split": "validation", |
| }, |
| ), |
| ) |
| return splits |
|
|
| def _generate_examples(self, annotations, split): |
| """This function returns the examples.""" |
| with open(annotations, encoding="utf-8") as csv_file: |
| csv_reader = csv.reader(csv_file, delimiter=",") |
| next(csv_reader) |
| for idx, row in enumerate(csv_reader): |
| narration_id, participant_id, video_id, narration_timestamp, start_timestamp, stop_timestamp = row[:6] |
| if (self.config.name in ["action_recognition", "multi_instance_retrieval"] and split in ["train", "validation"]) or \ |
| (self.config.name == "unsupervised_domain_adaptation" and split in ["source_train", "source_val", "target_val"]): |
| |
| |
| narration, verb, verb_class, noun, noun_class, all_nouns, all_noun_classes = row[8:15] |
| all_nouns = eval(all_nouns) |
| all_noun_classes = eval(all_noun_classes) |
| else: |
| narration = verb = noun = "" |
| verb_class = noun_class = -1 |
| all_nouns = all_noun_classes = [] |
| extended = len(narration_id.split("_")[1]) == 3 |
| if extended: |
| path = f"EPIC-KITCHENS/{participant_id}/videos/{video_id}.MP4" |
| else: |
| path = f"EPIC_KITCHENS_2018/videos/{split}/{participant_id}/{video_id}.MP4" |
|
|
| yield idx, { |
| "extended": extended, |
| "narration_id": narration_id, |
| "participant_id": participant_id, |
| "video_id": video_id, |
| "path": path, |
| "narration_timestamp": narration_timestamp, |
| "start_timestamp": start_timestamp, |
| "stop_timestamp": stop_timestamp, |
| "narration": narration, |
| "verb": verb, |
| "verb_class": verb_class, |
| "noun": noun, |
| "noun_class": noun_class, |
| "all_nouns": all_nouns, |
| "all_noun_classes": all_noun_classes, |
| } |
|
|