JonasLoos commited on
Commit
fc8aba4
·
verified ·
1 Parent(s): 0859a5f

initial commit

Browse files
Files changed (2) hide show
  1. NYUv2.py +106 -0
  2. README.md +109 -0
NYUv2.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """NYU Depth Dataset V2"""
2
+
3
+ import numpy as np
4
+ import h5py
5
+ import datasets
6
+ from datasets import BuilderConfig, Features, Value, SplitGenerator, Split, Array2D, Image
7
+ import hashlib
8
+
9
+
10
+ _CITATION = """\
11
+ @inproceedings{Silberman:ECCV12,
12
+ author = {Nathan Silberman, Derek Hoiem, Pushmeet Kohli and Rob Fergus},
13
+ title = {Indoor Segmentation and Support Inference from RGBD Images},
14
+ booktitle = {Proceedings of the European Conference on Computer Vision},
15
+ year = {2012}
16
+ }
17
+ """
18
+
19
+ _DESCRIPTION = """\
20
+ The NYU-Depth V2 data set is comprised of video sequences from a variety of indoor scenes as recorded by both the RGB and Depth cameras from the Microsoft Kinect. It features:
21
+
22
+ * 1449 densely labeled pairs of aligned RGB and depth images
23
+ * 464 new scenes taken from 3 cities
24
+ * 407,024 new unlabeled frames
25
+ * Each object is labeled with a class and an instance number (cup1, cup2, cup3, etc)
26
+
27
+ This dataset is useful for various computer vision tasks, including depth estimation, semantic segmentation, and instance segmentation.
28
+
29
+ This Hugging Face version of the dataset is unofficial. It downloads the data from the original source and converts it to the Hugging Face format.
30
+ """
31
+
32
+ _HOMEPAGE = "https://cs.nyu.edu/~fergus/datasets/nyu_depth_v2.html"
33
+
34
+ _LICENSE = "Unknown"
35
+
36
+ _URL = "http://horatio.cs.nyu.edu/mit/silberman/nyu_depth_v2/nyu_depth_v2_labeled.mat"
37
+ _FILE_HASH = "520609c519fba3ba5ac58c8fefcc3530"
38
+
39
+ class NYUv2(datasets.GeneratorBasedBuilder):
40
+ """NYU Depth Dataset V2"""
41
+
42
+ VERSION = datasets.Version("1.0.0")
43
+
44
+ BUILDER_CONFIGS = [
45
+ BuilderConfig(name="default", version=VERSION, description="Default configuration for NYUv2 dataset"),
46
+ ]
47
+
48
+ DEFAULT_CONFIG_NAME = "default"
49
+
50
+ def _info(self):
51
+ features = Features({
52
+ "image": Image(decode=True),
53
+ "depth": Array2D(dtype="float32", shape=(640, 480)),
54
+ "label": Array2D(dtype="int32", shape=(640, 480)),
55
+ "scene": Value("string"),
56
+ "scene_type": Value("string"),
57
+ })
58
+
59
+ return datasets.DatasetInfo(
60
+ description=_DESCRIPTION,
61
+ features=features,
62
+ homepage=_HOMEPAGE,
63
+ license=_LICENSE,
64
+ citation=_CITATION,
65
+ )
66
+
67
+ def _split_generators(self, dl_manager):
68
+ data_path = dl_manager.download(_URL)
69
+
70
+ # Verify file hash
71
+ with open(data_path, 'rb') as f:
72
+ file_hash = hashlib.md5(f.read()).hexdigest()
73
+ if file_hash != _FILE_HASH:
74
+ raise ValueError(
75
+ f"Downloaded file hash '{file_hash}' does not match expected hash '{_FILE_HASH}'. "
76
+ "The downloaded dataset file might be corrupted or modified."
77
+ )
78
+
79
+ return [
80
+ SplitGenerator(
81
+ name="train",
82
+ gen_kwargs={
83
+ "filepath": data_path,
84
+ "split": "train",
85
+ },
86
+ ),
87
+ ]
88
+
89
+ def _generate_examples(self, filepath, split):
90
+ with h5py.File(filepath, 'r') as f:
91
+ images = np.array(f['images'])
92
+ depths = np.array(f['depths'])
93
+ instances = np.array(f['instances'])
94
+ labels = np.array(f['labels'])
95
+ scenes = [''.join(chr(int(x)) for x in f[y]) for y in f.get('scenes')[0]]
96
+ scene_types = [''.join(chr(int(x)) for x in f[y]) for y in f.get('sceneTypes')[0]]
97
+
98
+ for idx in range(images.shape[0]):
99
+ yield idx, {
100
+ "image": images[idx].transpose(1, 2, 0),
101
+ "depth": depths[idx],
102
+ "instance": instances[idx],
103
+ "label": labels[idx],
104
+ "scene": scenes[idx],
105
+ "scene_type": scene_types[idx],
106
+ }
README.md ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ size_categories:
3
+ - 1k<n<10k
4
+ paperswithcode_id: nyuv2
5
+ tags:
6
+ - depth-estimation
7
+ - semantic-segmentation
8
+ dataset_info:
9
+ features:
10
+ - name: image
11
+ dtype: image
12
+ - name: depth
13
+ dtype:
14
+ array2_d:
15
+ shape:
16
+ - 640
17
+ - 480
18
+ dtype: float32
19
+ - name: label
20
+ dtype:
21
+ array2_d:
22
+ shape:
23
+ - 640
24
+ - 480
25
+ dtype: int32
26
+ - name: scene
27
+ dtype: string
28
+ - name: scene_type
29
+ dtype: string
30
+ splits:
31
+ - name: train
32
+ num_bytes: 4096466619
33
+ num_examples: 1449
34
+ download_size: 2972037809
35
+ dataset_size: 4096466619
36
+ ---
37
+
38
+ # NYU Depth Dataset V2
39
+
40
+ This is an unofficial Hugging Face downloading script of the [NYU Depth Dataset V2](https://cs.nyu.edu/~fergus/datasets/nyu_depth_v2.html). It downloads the data from the original source and converts it to the Hugging Face format.
41
+
42
+ This dataset contains the 1449 densely labeled pairs of aligned RGB and depth images.
43
+
44
+ - **Homepage:** [NYU Depth Dataset V2](https://cs.nyu.edu/~fergus/datasets/nyu_depth_v2.html)
45
+ - **Paper:** [Indoor Segmentation and Support Inference from RGBD Images](https://cs.nyu.edu/~fergus/datasets/indoor_seg_support.pdf)
46
+
47
+
48
+ ## Official Description
49
+
50
+ The NYU-Depth V2 data set is comprised of video sequences from a variety of indoor scenes as recorded by both the RGB and Depth cameras from the Microsoft Kinect. It features:
51
+
52
+ * 1449 densely labeled pairs of aligned RGB and depth images
53
+ * 464 new scenes taken from 3 cities
54
+ * 407,024 new unlabeled frames
55
+ * Each object is labeled with a class and an instance number (cup1, cup2, cup3, etc)
56
+
57
+ This dataset is useful for various computer vision tasks, including depth estimation, semantic segmentation, and instance segmentation.
58
+
59
+
60
+ ## Usage
61
+
62
+ ```python
63
+ from datasets import load_dataset
64
+
65
+ # Load the dataset
66
+ dataset = load_dataset("0jl/NYUv2", trust_remote_code=True, split="train")
67
+ ```
68
+
69
+
70
+ ### Common Errors
71
+
72
+ #### `fsspec.exceptions.FSTimeoutError`
73
+
74
+ Can occur for `datasets==3.0` when the download takes more than 5 minutes.
75
+
76
+ ```python
77
+ import datasets, aiohttp
78
+ dataset = datasets.load_dataset(
79
+ "0jl/NYUv2",
80
+ trust_remote_code=True,
81
+ split="train",
82
+ storage_options={'client_kwargs': {'timeout': aiohttp.ClientTimeout(total=3600)}}
83
+ )
84
+ ```
85
+
86
+
87
+ ## Dataset Structure
88
+
89
+ The dataset contains only one training split with the following features:
90
+
91
+ - `image`: RGB image (PIL.Image.Image, shape: (640, 480, 3))
92
+ - `depth`: Depth map (numpy.ndarray, shape: (640, 480), dtype: float32)
93
+ - `label`: Semantic segmentation labels (numpy.ndarray, shape: (640, 480), dtype: int32)
94
+ - `scene`: Scene name (string)
95
+ - `scene_type`: Scene type (string)
96
+
97
+
98
+ ### Citation Information
99
+
100
+ If you use this dataset, please cite the original paper:
101
+
102
+ ```bibtex
103
+ @inproceedings{Silberman:ECCV12,
104
+ author = {Nathan Silberman, Derek Hoiem, Pushmeet Kohli and Rob Fergus},
105
+ title = {Indoor Segmentation and Support Inference from RGBD Images},
106
+ booktitle = {Proceedings of the European Conference on Computer Vision},
107
+ year = {2012}
108
+ }
109
+ ```