SaulLu commited on
Commit
5d3670e
·
1 Parent(s): d1c44d2

add loading script

Browse files
Files changed (1) hide show
  1. Oxford-IIIT-Pet.py +201 -0
Oxford-IIIT-Pet.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Oxford_IIT pet loading script."""
15
+
16
+
17
+ import xml.etree.ElementTree as ET
18
+ from pathlib import Path
19
+
20
+ import datasets
21
+
22
+ _CITATION = """\
23
+ @InProceedings{parkhi12a,
24
+ author = "Parkhi, O. M. and Vedaldi, A. and Zisserman, A. and Jawahar, C.~V.",
25
+ title = "Cats and Dogs",
26
+ booktitle = "IEEE Conference on Computer Vision and Pattern Recognition",
27
+ year = "2012",
28
+ }
29
+
30
+ """
31
+
32
+ _DESCRIPTION = """\
33
+ 37 category pet dataset with roughly 200 images for each class. The images have a large variations in scale, pose and lighting. All images have an associated ground truth annotation of breed, head ROI, and pixel level trimap segmentation.
34
+ """
35
+
36
+ _HOMEPAGE = "https://www.robots.ox.ac.uk/~vgg/data/pets/"
37
+
38
+ _LICENSE = "CC BY-SA 4.0"
39
+
40
+ # TODO: Add link to the official dataset URLs here
41
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
42
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
43
+ _URLS = {
44
+ "images": "https://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz",
45
+ "annotations": "https://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz",
46
+ }
47
+
48
+
49
+ _LABEL_CLASSES = [
50
+ "Abyssinian",
51
+ "american_bulldog",
52
+ "american_pit_bull_terrier",
53
+ "basset_hound",
54
+ "beagle",
55
+ "Bengal",
56
+ "Birman",
57
+ "Bombay",
58
+ "boxer",
59
+ "British_Shorthair",
60
+ "chihuahua",
61
+ "Egyptian_Mau",
62
+ "english_cocker_spaniel",
63
+ "english_setter",
64
+ "german_shorthaired",
65
+ "great_pyrenees",
66
+ "havanese",
67
+ "japanese_chin",
68
+ "keeshond",
69
+ "leonberger",
70
+ "Maine_Coon",
71
+ "miniature_pinscher",
72
+ "newfoundland",
73
+ "Persian",
74
+ "pomeranian",
75
+ "pug",
76
+ "Ragdoll",
77
+ "Russian_Blue",
78
+ "saint_bernard",
79
+ "samoyed",
80
+ "scottish_terrier",
81
+ "shiba_inu",
82
+ "Siamese",
83
+ "Sphynx",
84
+ "staffordshire_bull_terrier",
85
+ "wheaten_terrier",
86
+ "yorkshire_terrier",
87
+ ]
88
+ _SPECIES_CLASSES = ["Cat", "Dog"]
89
+
90
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
91
+ class NewDataset(datasets.GeneratorBasedBuilder):
92
+ """TODO: Short description of my dataset."""
93
+
94
+ VERSION = datasets.Version("1.0.0")
95
+
96
+ def _info(self):
97
+ features = datasets.Features(
98
+ {
99
+ "image": datasets.Image(),
100
+ "label": datasets.features.ClassLabel(names=_LABEL_CLASSES),
101
+ "species": datasets.features.ClassLabel(names=_SPECIES_CLASSES),
102
+ "segmentation_mask": datasets.Image(),
103
+ # "bbox": datasets.features.Array2D(shape=(1, 4), dtype="int64"),
104
+ }
105
+ )
106
+ return datasets.DatasetInfo(
107
+ description=_DESCRIPTION,
108
+ features=features,
109
+ homepage=_HOMEPAGE,
110
+ license=_LICENSE,
111
+ citation=_CITATION,
112
+ )
113
+
114
+ def _split_generators(self, dl_manager):
115
+ urls = _URLS
116
+ data_dir = dl_manager.download_and_extract(urls)
117
+ return [
118
+ datasets.SplitGenerator(
119
+ name=datasets.Split.TRAIN,
120
+ gen_kwargs={
121
+ "images_dir": Path(data_dir["images"]) / "images",
122
+ "annotations_dir": Path(data_dir["annotations"]) / "annotations",
123
+ "images_list": Path(data_dir["annotations"])
124
+ / "annotations"
125
+ / "trainval.txt",
126
+ },
127
+ ),
128
+ datasets.SplitGenerator(
129
+ name=datasets.Split.TEST,
130
+ gen_kwargs={
131
+ "images_dir": Path(data_dir["images"]) / "images",
132
+ "annotations_dir": Path(data_dir["annotations"]) / "annotations",
133
+ "images_list": Path(data_dir["annotations"])
134
+ / "annotations"
135
+ / "test.txt",
136
+ },
137
+ ),
138
+ ]
139
+
140
+ # Not used because missing xml file for Abyssinian_104
141
+ # def _get_data_from_xml_file(self, xlm_annotation_file):
142
+ # # From https://huggingface.co/datasets/fuliucansheng/pascal_voc/blob/main/pascal_voc.py
143
+ # anno_tree = ET.parse(xlm_annotation_file)
144
+ # objects = []
145
+ # for obj in anno_tree.findall("./object"):
146
+ # info = {
147
+ # "class": obj.findall("./name")[0].text,
148
+ # "bbox": [[
149
+ # int(float(obj.findall("./bndbox/xmin")[0].text)),
150
+ # int(float(obj.findall("./bndbox/ymin")[0].text)),
151
+ # int(float(obj.findall("./bndbox/xmax")[0].text)),
152
+ # int(float(obj.findall("./bndbox/ymax")[0].text)),]
153
+ # ],
154
+ # }
155
+
156
+ # if obj.findall("./pose"):
157
+ # info["pose"] = obj.findall("./pose")[0].text
158
+ # if obj.findall("./truncated"):
159
+ # info["truncated"] = int(obj.findall("./truncated")[0].text)
160
+ # if obj.findall("./difficult"):
161
+ # info["difficult"] = int(obj.findall("./difficult")[0].text)
162
+ # else:
163
+ # info["difficult"] = 0
164
+ # if obj.findall("./occluded"):
165
+ # info["occluded"] = int(obj.findall("./occluded")[0].text)
166
+
167
+ # if obj.findall("./actions"):
168
+ # info["action"] = [
169
+ # action.tag
170
+ # for action in obj.findall("./actions/")
171
+ # if int(action.text) == 1
172
+ # ][0]
173
+
174
+ # objects.append(info)
175
+ # print(len(objects))
176
+ # return objects
177
+
178
+ def _generate_examples(self, images_dir, annotations_dir, images_list):
179
+ bounding_box_dir = annotations_dir / "xmls"
180
+ trimaps_dir = annotations_dir / "trimaps"
181
+
182
+ with open(images_list, encoding="utf-8") as f:
183
+ for row in f:
184
+ image_name, label, species, _ = row.strip().split(" ")
185
+
186
+ trimap_name = f"{image_name}.png"
187
+ # bbox_name = f"{image_name}.xml"
188
+ image_name = f"{image_name}.jpg"
189
+
190
+ label = _LABEL_CLASSES[int(label) - 1]
191
+ species = _SPECIES_CLASSES[int(species) - 1]
192
+ # objects = self._get_data_from_xml_file(str(bounding_box_dir / bbox_name))
193
+
194
+ record = {
195
+ "image": str(images_dir / image_name),
196
+ "label": label,
197
+ "species": species,
198
+ "segmentation_mask": str(trimaps_dir / trimap_name),
199
+ # "bbox": [[ymin, ymax, xmin, xmax]]
200
+ }
201
+ yield image_name, record