sjyhne commited on
Commit
e28f83d
·
1 Parent(s): 6225d41

Delete mapai_dataset.py

Browse files
Files changed (1) hide show
  1. mapai_dataset.py +0 -161
mapai_dataset.py DELETED
@@ -1,161 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- # TODO: Address all TODOs and remove all explanatory comments
15
- """
16
- Aerial image dataset for building segmentation.
17
- The dataset has been used in the MapAI: Precision in Building Segmentation competition with the exact same data split.
18
- The training and validation data is from Denmark and the test data is from Norway.
19
- """
20
-
21
-
22
- import csv
23
- import json
24
- import os
25
-
26
- import datasets
27
-
28
- from pyarrow import parquet as pq
29
-
30
-
31
- # TODO: Add BibTeX citation
32
- # Find for instance the citation on arxiv or on the dataset repo/website
33
- _CITATION = """
34
- @article{Jyhne2022,
35
- author = {Sander Jyhne and Morten Goodwin and Per-Arne Andersen and Ivar Oveland and Alexander Salveson Nossum and Karianne Ormseth and Mathilde Orstavik and Andrew C Flatman},
36
- doi = {10.5617/NMI.9849},
37
- issn = {2703-9196},
38
- issue = {3},
39
- journal = {Nordic Machine Intelligence},
40
- keywords = {Aerial Images,Deep Learning,Image segmentation,machine learning,remote sensing,semantic segmentation},
41
- month = {9},
42
- pages = {1-3},
43
- title = {MapAI: Precision in Building Segmentation},
44
- volume = {2},
45
- url = {https://journals.uio.no/NMI/article/view/9849},
46
- year = {2022},
47
- }
48
- """
49
-
50
- _DESCRIPTION = """
51
- The dataset is released to advance the research on building segmentation using aerial images.
52
- """
53
-
54
- # TODO: Add a link to an official homepage for the dataset here
55
- _HOMEPAGE = ""
56
-
57
- # TODO: Add the licence for the dataset here if you can find it
58
- _LICENSE = ""
59
-
60
- # TODO: Add link to the official dataset URLs here
61
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
62
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
63
- _URLS = {
64
- "train": "https://huggingface.co/datasets/sjyhne/mapai_dataset/resolve/main/train.parquet",
65
- "validation": "https://huggingface.co/datasets/sjyhne/mapai_dataset/resolve/main/validation.parquet",
66
- "task1_test": "https://huggingface.co/datasets/sjyhne/mapai_dataset/resolve/main/task1_test.parquet",
67
- "task2_test": "https://huggingface.co/datasets/sjyhne/mapai_dataset/resolve/main/task2_test.parquet",
68
- }
69
-
70
- # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
71
- class MapaiDataset(datasets.GeneratorBasedBuilder):
72
- """Building segmentation dataset with aerial images and lidar"""
73
-
74
- VERSION = datasets.Version("1.1.0")
75
-
76
- # This is an example of a dataset with multiple configurations.
77
- # If you don't want/need to define several sub-sets in your dataset,
78
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
79
-
80
- # If you need to make complex sub-parts in the datasets with configurable options
81
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
82
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
83
-
84
- # You will be able to load one or the other configurations in the following list with
85
- # data = datasets.load_dataset('my_dataset', 'first_domain')
86
- # data = datasets.load_dataset('my_dataset', 'second_domain')
87
-
88
- # DEFAULT_CONFIG_NAME = "first_domain" # It's not mandatory to have a default configuration. Just use one if it make sense.
89
-
90
- def _info(self):
91
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
92
- features = datasets.Features(
93
- {
94
- "filename": datasets.Value("string"),
95
- "image": datasets.Sequence(datasets.Value("uint8")),
96
- "lidar": datasets.Sequence(datasets.Value("uint8")),
97
- "mask": datasets.Sequence(datasets.Value("uint8")),
98
- "shape": datasets.Sequence(datasets.Value("uint8"))
99
- }
100
- )
101
- return datasets.DatasetInfo(
102
- # This is the description that will appear on the datasets page.
103
- description=_DESCRIPTION,
104
- # This defines the different columns of the dataset and their types
105
- features=features, # Here we define them above because they are different between the two configurations
106
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
107
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
108
- # supervised_keys=("sentence", "label"),
109
- # Homepage of the dataset for documentation
110
- homepage=_HOMEPAGE,
111
- # License for the dataset if available
112
- license=_LICENSE,
113
- # Citation for the dataset
114
- citation=_CITATION,
115
- )
116
-
117
- def _split_generators(self, dl_manager):
118
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
119
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
120
-
121
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
122
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
123
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
124
- urls = _URLS[self.config.name]
125
- data_dir = dl_manager.download_and_extract(urls)
126
-
127
- # Retrieving files from parquet files is slow
128
-
129
- return [datasets.SplitGenerator(name=self.config.name, gen_kwargs={"filepath": os.path.join(data_dir)})]
130
-
131
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
132
- def _generate_examples(self, filepath):
133
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
134
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
135
-
136
- id_ = 0
137
-
138
- meta = pq.read_metadata(filepath)
139
-
140
- pqfile = pq.ParquetFile(filepath)
141
-
142
- print(meta)
143
-
144
-
145
-
146
- with open(filepath, encoding="utf-8") as f:
147
- for key, row in enumerate(f):
148
- data = json.loads(row)
149
- if self.config.name == "first_domain":
150
- # Yields examples as (key, example) tuples
151
- yield key, {
152
- "sentence": data["sentence"],
153
- "option1": data["option1"],
154
- "answer": "" if split == "test" else data["answer"],
155
- }
156
- else:
157
- yield key, {
158
- "sentence": data["sentence"],
159
- "option2": data["option2"],
160
- "second_domain_answer": "" if split == "test" else data["second_domain_answer"],
161
- }