Datasets:

Modalities:
Image
Formats:
arrow
ArXiv:
DOI:
Libraries:
Datasets
License:
jeffliu-LL commited on
Commit
8617d56
·
verified ·
1 Parent(s): 24a5a1a

Create ladi_classify_dataset.py

Browse files
Files changed (1) hide show
  1. ladi_classify_dataset.py +278 -0
ladi_classify_dataset.py ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import datasets
3
+ import pandas as pd
4
+ from datasets.data_files import DataFilesDict, sanitize_patterns
5
+ from pathlib import Path
6
+ from PIL import Image, ImageFile
7
+
8
+ from typing import List, Optional
9
+
10
+ ImageFile.LOAD_TRUNCATED_IMAGES = True
11
+
12
+ # maps the dataset names to names for the image sets they rely on
13
+ DATA_NAME_MAP = {
14
+ 'v1_damage': 'v1',
15
+ 'v1_infrastructure': 'v1',
16
+ 'v2': 'v2',
17
+ 'v2_resized': 'v2_resized',
18
+ 'v2a': 'v2',
19
+ 'v2a_resized': 'v2_resized'
20
+ }
21
+
22
+ DATA_URLS = {'v1': "https://ladi.s3.amazonaws.com/ladi_v1.tar.gz",
23
+ 'v2': 'https://ladi.s3.amazonaws.com/ladi_v2.tar.gz',
24
+ 'v2_resized': 'https://ladi.s3.amazonaws.com/ladi_v2_resized.tar.gz'}
25
+
26
+ SPLIT_REL_PATHS = {
27
+ # note: the v1 datasets don't have separate 'test' and 'val' splits
28
+ 'v1_damage': {'train':'v1/damage_dataset/damage_df_train.csv',
29
+ 'val':'v1/damage_dataset/damage_df_test.csv',
30
+ 'test':'v1/damage_dataset/damage_df_test.csv',
31
+ 'all': 'v1/damage_dataset/damage_df.csv'},
32
+ 'v1_infrastructure': {'train':'v1/infra_dataset/infra_df_train.csv',
33
+ 'val':'v1/infra_dataset/infra_df_test.csv',
34
+ 'test':'v1/infra_dataset/infra_df_test.csv',
35
+ 'all':'v1/infra_dataset/infra_df.csv'},
36
+ 'v2': {'train':'v2/ladi_v2_labels_train.csv',
37
+ 'val':'v2/ladi_v2_labels_val.csv',
38
+ 'test':'v2/ladi_v2_labels_test.csv',
39
+ 'all':'v2/ladi_v2_labels_train_full.csv'},
40
+ 'v2_resized': {'train':'v2/ladi_v2_labels_train_resized.csv',
41
+ 'val':'v2/ladi_v2_labels_val_resized.csv',
42
+ 'test':'v2/ladi_v2_labels_test_resized.csv',
43
+ 'all':'v2/ladi_v2_labels_train_full_resized.csv'},
44
+ 'v2a': {'train':'v2/ladi_v2a_labels_train.csv',
45
+ 'val':'v2/ladi_v2a_labels_val.csv',
46
+ 'test':'v2/ladi_v2a_labels_test.csv',
47
+ 'all':'v2/ladi_v2a_labels_train_full.csv'},
48
+ 'v2a_resized': {'train':'v2/ladi_v2a_labels_train_resized.csv',
49
+ 'val':'v2/ladi_v2a_labels_val_resized.csv',
50
+ 'test':'v2/ladi_v2a_labels_test_resized.csv',
51
+ 'all':'v2/ladi_v2a_labels_train_full_resized.csv'}
52
+ }
53
+
54
+ class LadiClassifyDatasetConfig(datasets.BuilderConfig):
55
+ def __init__(self,
56
+ name: str = 'v2a_resized',
57
+ base_dir: Optional[str] = None,
58
+ split_csvs = None,
59
+ download_ladi = False,
60
+ data_name: Optional[str] = None,
61
+ label_name: Optional[str] = None,
62
+ **kwargs):
63
+ """
64
+ split_csvs: a dictionary mapping split names to existing csv files containing annotations
65
+ if this arg is set, you MUST already have the dataset
66
+ base_dir: the base directory of the label CSVs and data files.
67
+ data_name: the version of the data you're using. Used to determine what files to download if
68
+ you don't specify split_csvs or url_list. Must be in DATA_URLS.keys().
69
+
70
+ If split_csvs is None, the requested data will be downloaded from the hub. Please do NOT
71
+ use this feature with streaming=True, you will perform a large download every time.
72
+ """
73
+ self.download_ladi = download_ladi
74
+ self.data_name = DATA_NAME_MAP[name] if data_name is None else data_name
75
+ self.label_name = name if label_name is None else label_name
76
+ self.base_dir = None if base_dir is None else Path(base_dir)
77
+ self.split_csvs = split_csvs
78
+
79
+ if self.data_name not in DATA_URLS.keys():
80
+ raise ValueError(f"Expected data_name to be one of {DATA_URLS.keys()}, got {self.data_name}")
81
+
82
+ if split_csvs is None and download_ladi == False:
83
+ self.split_csvs = SPLIT_REL_PATHS[self.label_name]
84
+
85
+ super(LadiClassifyDatasetConfig, self).__init__(name=name, **kwargs)
86
+
87
+
88
+ class LADIClassifyDataset(datasets.GeneratorBasedBuilder):
89
+ """
90
+ Dataset for LADI Classification task
91
+ """
92
+
93
+ VERSION = datasets.Version("0.2.1")
94
+ BUILDER_CONFIG_CLASS = LadiClassifyDatasetConfig
95
+ DEFAULT_CONFIG_NAME = 'v2a_resized'
96
+
97
+ BUILDER_CONFIGS = [
98
+ LadiClassifyDatasetConfig(
99
+ name='v1_damage',
100
+ version=VERSION,
101
+ description="Dataset for recognizing damage (flood, rubble, misc) from LADI"
102
+ ),
103
+ LadiClassifyDatasetConfig(
104
+ name="v1_infrastructure",
105
+ version=VERSION,
106
+ description="Dataset for recognizing infrastructure (buildings, roads) from LADI"
107
+ ),
108
+ LadiClassifyDatasetConfig(
109
+ name="v2",
110
+ version=VERSION,
111
+ description="Dataset using the v2 labels for LADI"
112
+ ),
113
+ LadiClassifyDatasetConfig(
114
+ name="v2_resized",
115
+ version=VERSION,
116
+ description="Dataset using the v2 labels for LADI, pointing to the lower resolution source images for speed"
117
+ ),
118
+ LadiClassifyDatasetConfig(
119
+ name="v2a",
120
+ version=VERSION,
121
+ description="Dataset using the v2a labels for LADI"
122
+ ),
123
+ LadiClassifyDatasetConfig(
124
+ name="v2a_resized",
125
+ version=VERSION,
126
+ description="Dataset using the v2a labels for LADI, pointing to the lower resolution source images for speed"
127
+ ),
128
+ ]
129
+
130
+ def _info(self):
131
+ if self.config.label_name == "v1_damage":
132
+ features = datasets.Features(
133
+ {
134
+ "image":datasets.Image(),
135
+ "flood":datasets.Value("bool"),
136
+ "rubble":datasets.Value("bool"),
137
+ "misc_damage":datasets.Value("bool")
138
+ }
139
+ )
140
+ elif self.config.label_name == "v1_infrastructure":
141
+ features = datasets.Features(
142
+ {
143
+ "image":datasets.Image(),
144
+ "building":datasets.Value("bool"),
145
+ "road":datasets.Value("bool")
146
+ }
147
+ )
148
+ elif self.config.label_name in ["v2", "v2_resized"]:
149
+ features = datasets.Features(
150
+ {
151
+ "image":datasets.Image(),
152
+ "bridges_any": datasets.Value("bool"),
153
+ "bridges_damage": datasets.Value("bool"),
154
+ "buildings_affected": datasets.Value("bool"),
155
+ "buildings_any": datasets.Value("bool"),
156
+ "buildings_destroyed": datasets.Value("bool"),
157
+ "buildings_major": datasets.Value("bool"),
158
+ "buildings_minor": datasets.Value("bool"),
159
+ "debris_any": datasets.Value("bool"),
160
+ "flooding_any": datasets.Value("bool"),
161
+ "flooding_structures": datasets.Value("bool"),
162
+ "roads_any": datasets.Value("bool"),
163
+ "roads_damage": datasets.Value("bool"),
164
+ "trees_any": datasets.Value("bool"),
165
+ "trees_damage": datasets.Value("bool"),
166
+ "water_any": datasets.Value("bool"),
167
+ }
168
+ )
169
+ elif self.config.label_name in ["v2a", "v2a_resized"]:
170
+ features = datasets.Features(
171
+ {
172
+ "image":datasets.Image(),
173
+ "bridges_any": datasets.Value("bool"),
174
+ "buildings_any": datasets.Value("bool"),
175
+ "buildings_affected_or_greater": datasets.Value("bool"),
176
+ "buildings_minor_or_greater": datasets.Value("bool"),
177
+ "debris_any": datasets.Value("bool"),
178
+ "flooding_any": datasets.Value("bool"),
179
+ "flooding_structures": datasets.Value("bool"),
180
+ "roads_any": datasets.Value("bool"),
181
+ "roads_damage": datasets.Value("bool"),
182
+ "trees_any": datasets.Value("bool"),
183
+ "trees_damage": datasets.Value("bool"),
184
+ "water_any": datasets.Value("bool"),
185
+ }
186
+ )
187
+ else:
188
+ raise NotImplementedError
189
+ return datasets.DatasetInfo(
190
+ # This is the description that will appear on the datasets page.
191
+ description=f"LADI Dataset for {self.config.label_name} category",
192
+ # This defines the different columns of the dataset and their types
193
+ features=features, # Here we define them above because they are different between the two configurations
194
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
195
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
196
+ # supervised_keys=("image", "label"),
197
+ )
198
+
199
+ def read_ann_csv(self, fpath):
200
+ if self.config.data_name == 'v1':
201
+ return pd.read_csv(fpath, sep='\t', index_col=False)
202
+ return pd.read_csv(fpath, sep=',', index_col=False)
203
+
204
+ def _split_generators(self, dl_manager):
205
+ generators = []
206
+ data_files = self.config.split_csvs
207
+
208
+ if self.config.download_ladi:
209
+ # download data files to config.base_dir
210
+ dl_url = dl_manager.download(DATA_URLS[self.config.data_name])
211
+ base_dir = Path(self.config.base_dir)
212
+ tar_iterator = dl_manager.iter_archive(dl_url)
213
+ base_dir.mkdir(exist_ok=True)
214
+ for filename, file in tar_iterator:
215
+ file_path: Path = base_dir/filename
216
+ file_path.parent.mkdir(parents=True, exist_ok=True)
217
+ with open(base_dir/filename, 'wb') as f:
218
+ f.write(file.read())
219
+
220
+ data_files = DataFilesDict.from_local_or_remote(
221
+ sanitize_patterns(data_files),
222
+ base_path=self.config.base_dir
223
+ )
224
+
225
+ if 'train' in data_files.keys():
226
+ train_df = self.read_ann_csv(data_files['train'][0])
227
+ label_cols = tuple(label for label in train_df.columns if label not in ['url','local_path'])
228
+ train_examples = [x._asdict() for x in train_df.itertuples()]
229
+ generators.append(datasets.SplitGenerator(
230
+ name=datasets.Split.TRAIN,
231
+ gen_kwargs={"examples":train_examples,
232
+ "label_cols":label_cols}
233
+ ))
234
+ if 'val' in data_files.keys():
235
+ val_df = self.read_ann_csv(data_files['val'][0])
236
+ label_cols = tuple(label for label in val_df.columns if label not in ['url','local_path'])
237
+ val_examples = [x._asdict() for x in val_df.itertuples()]
238
+ generators.append(datasets.SplitGenerator(
239
+ name=datasets.Split.VALIDATION,
240
+ gen_kwargs={"examples":val_examples,
241
+ "label_cols":label_cols}
242
+ ))
243
+ if 'test' in data_files.keys():
244
+ test_df = self.read_ann_csv(data_files['test'][0])
245
+ label_cols = tuple(label for label in test_df.columns if label not in ['url','local_path'])
246
+ test_examples = [x._asdict() for x in test_df.itertuples()]
247
+ generators.append(datasets.SplitGenerator(
248
+ name=datasets.Split.TEST,
249
+ gen_kwargs={"examples":test_examples,
250
+ "label_cols":label_cols}
251
+ ))
252
+ if 'all' in data_files.keys():
253
+ all_df = self.read_ann_csv(data_files['all'][0])
254
+ label_cols = tuple(label for label in all_df.columns if label not in ['url','local_path'])
255
+ all_examples = [x._asdict() for x in all_df.itertuples()]
256
+ generators.append(datasets.SplitGenerator(
257
+ name=datasets.Split.ALL,
258
+ gen_kwargs={"examples":all_examples,
259
+ "label_cols":label_cols}
260
+ ))
261
+
262
+ return generators
263
+
264
+ def _generate_examples(self, examples, label_cols, from_url_list=False):
265
+ for ex in examples:
266
+ try:
267
+ image_path = Path(ex['local_path'])
268
+ if not image_path.is_absolute():
269
+ image_path = str(self.config.base_dir/image_path)
270
+ except:
271
+ print(ex)
272
+ raise
273
+
274
+ image = cv2.imread(image_path)
275
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
276
+ labels = {k:ex[k] for k in label_cols}
277
+ labels |= {"image":image}
278
+ yield image_path, labels