| |
|
|
| |
| |
|
|
| |
|
|
|
|
| |
|
|
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
|
|
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
|
|
|
|
|
|
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
|
|
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| """EMT dataset.""" |
|
|
| import os |
| import json |
| import pandas as pd |
| import datasets |
|
|
| _HOMEPAGE = "https://github.com/AV-Lab/emt-dataset" |
| _LICENSE = "CC-BY-SA 4.0" |
|
|
| _CITATION = """ |
| @article{EMTdataset2025, |
| title={EMT: A Visual Multi-Task Benchmark Dataset for Autonomous Driving in the Arab Gulf Region}, |
| author={Nadya Abdel Madjid and Murad Mebrahtu and Abdelmoamen Nasser and Bilal Hassan and Naoufel Werghi and Jorge Dias and Majid Khonji}, |
| year={2025}, |
| eprint={2502.19260}, |
| archivePrefix={arXiv}, |
| primaryClass={cs.CV}, |
| url={https://arxiv.org/abs/2502.19260} |
| } |
| """ |
|
|
| _DESCRIPTION = """\ |
| A multi-task dataset for detection, tracking, prediction, and intention prediction. |
| This dataset includes 34,386 annotated frames collected over 57 minutes of driving, with annotations for detection + tracking.", |
| """ |
|
|
| _REPO = "https://huggingface.co/datasets/Murdism/EMT/resolve/main/annotations" |
|
|
| class EMTConfig(datasets.BuilderConfig): |
| """BuilderConfig for EMT.""" |
|
|
| def __init__(self, data_url, annotation_url, **kwargs): |
| """BuilderConfig for EMT. |
| Args: |
| data_url: `string`, URL to download the image archive (.tar file). |
| annotation_url: `string`, URL to download the annotations (Parquet file). |
| **kwargs: keyword arguments forwarded to super. |
| """ |
| super(EMTConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs) |
| self.data_url = data_url |
| self.annotation_url = annotation_url |
|
|
|
|
| class EMT(datasets.GeneratorBasedBuilder): |
| """EMT dataset.""" |
|
|
| BUILDER_CONFIGS = [ |
| EMTConfig( |
| name="full_size", |
| description="All images are in their original size.", |
| data_url="https://huggingface.co/datasets/KuAvLab/EMT/blob/main/emt_images.tar.gz", |
| annotation_url="https://huggingface.co/datasets/Murdism/EMT/resolve/main/annotations/", |
| ) |
| ] |
|
|
| def _info(self): |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION + self.config.description, |
| features=datasets.Features( |
| { |
| "image": datasets.Image(), |
| "objects": datasets.Sequence( |
| { |
| "bbox": datasets.Sequence(datasets.Float32()), |
| "class_id": datasets.Value("int32"), |
| "track_id": datasets.Value("int32"), |
| "class_name": datasets.Value("string"), |
| } |
| ), |
| } |
| ), |
| supervised_keys=None, |
| homepage=_HOMEPAGE, |
| license=_LICENSE, |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| archive_path = dl_manager.download(self.config.data_url) |
| annotation_paths = { |
| "train": dl_manager.download_and_extract(self.config.annotation_url + "train_annotations.parquet"), |
| "test": dl_manager.download_and_extract(self.config.annotation_url + "test_annotations.parquet"), |
| } |
| |
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| gen_kwargs={ |
| "images": dl_manager.iter_archive(archive_path), |
| "annotation_path": annotation_paths["train"], |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| gen_kwargs={ |
| "images": dl_manager.iter_archive(archive_path), |
| "annotation_path": annotation_paths["test"], |
| }, |
| ), |
| ] |
|
|
| def _generate_examples(self, images, annotation_path): |
| """Generate examples from Parquet annotations and image archive.""" |
| |
| |
| df = pd.read_parquet(annotation_path) |
|
|
| |
| annotation_dict = {} |
| for _, row in df.iterrows(): |
| img_path = row["file_path"].split("/")[-2] + "/" + row["file_path"].split("/")[-1] |
| if img_path not in annotation_dict: |
| annotation_dict[img_path] = [] |
| annotation_dict[img_path].append( |
| { |
| "bbox": row["bbox"], |
| "class_id": row["class_id"], |
| "track_id": row["track_id"], |
| "class_name": row["class_name"], |
| } |
| ) |
|
|
| idx = 0 |
| for file_path, file_obj in images: |
| if file_path in annotation_dict: |
| yield idx, { |
| "image": {"path": file_path, "bytes": file_obj.read()}, |
| "objects": annotation_dict[file_path], |
| } |
| idx += 1 |
|
|