davanstrien HF Staff commited on
Commit
b065fc4
·
1 Parent(s): 4513943
Files changed (1) hide show
  1. MAMe.py +198 -0
MAMe.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Dataset for dating historical color images"""
15
+
16
+ from pathlib import Path
17
+ import datasets
18
+ import csv
19
+ import os
20
+
21
+ import zipfile
22
+ import io
23
+
24
+ _CITATION = """@inproceedings{10.1007/978-3-642-33783-3_36,
25
+ author = {Palermo, Frank and Hays, James and Efros, Alexei A.},
26
+ title = {Dating Historical Color Images},
27
+ year = {2012},
28
+ isbn = {9783642337826},
29
+ publisher = {Springer-Verlag},
30
+ address = {Berlin, Heidelberg},
31
+ url = {https://doi.org/10.1007/978-3-642-33783-3_36},
32
+ doi = {10.1007/978-3-642-33783-3_36},
33
+ abstract = {We introduce the task of automatically estimating the age of historical color photographs. We suggest features which attempt to capture temporally discriminative information based on the evolution of color imaging processes over time and evaluate the performance of both these novel features and existing features commonly utilized in other problem domains on a novel historical image data set. For the challenging classification task of sorting historical color images into the decade during which they were photographed, we demonstrate significantly greater accuracy than that shown by untrained humans on the same data set. Additionally, we apply the concept of data-driven camera response function estimation to historical color imagery, demonstrating its relevance to both the age estimation task and the popular application of imitating the appearance of vintage color photography.},
34
+ booktitle = {Proceedings of the 12th European Conference on Computer Vision - Volume Part VI},
35
+ pages = {499–512},
36
+ numpages = {14},
37
+ location = {Florence, Italy},
38
+ series = {ECCV'12}
39
+ }
40
+ """
41
+
42
+
43
+ _DESCRIPTION = """\
44
+ This dataset contains color photographs taken between the 1930s and 1970s.
45
+ The goal of the dataset is to develop methods for dating historical color photographs
46
+ """
47
+
48
+ _HOMEPAGE = "http://graphics.cs.cmu.edu/projects/historicalColor/"
49
+
50
+ _URLS = {
51
+ "metadata": "https://storage.hpai.bsc.es/mame-dataset/MAMe_metadata.zip",
52
+ "images": "https://storage.hpai.bsc.es/mame-dataset/MAMe_data_256.zip",
53
+ "full_images": "https://storage.hpai.bsc.es/mame-dataset/MAMe_data.zip",
54
+ }
55
+
56
+
57
+ def generate_mapping_dict(f):
58
+ mapping_dict = {}
59
+ dictreater = csv.DictReader(io.TextIOWrapper(f))
60
+ for row in dictreater:
61
+ split = row["Subset"]
62
+ image_file = row["Image file"]
63
+ del row["Image file"]
64
+ del row["Subset"]
65
+ # add row to subset dictionary
66
+ row_dict = {image_file: row}
67
+ if split not in mapping_dict:
68
+ mapping_dict[split] = row_dict
69
+ else:
70
+ mapping_dict[split].update(row_dict)
71
+ return mapping_dict
72
+
73
+
74
+ class MAMeConfig(datasets.BuilderConfig):
75
+ """TODO"""
76
+
77
+ def __init__(self, image_data_url, **kwargs):
78
+ """TODO"""
79
+ super().__init__(version=datasets.Version("1.0.2"), **kwargs)
80
+ self.image_data_url = image_data_url
81
+
82
+
83
+ class MAMe(datasets.GeneratorBasedBuilder):
84
+ """TODO"""
85
+
86
+ VERSION = datasets.Version("1.1.0")
87
+
88
+ BUILDER_CONFIGS = [
89
+ MAMeConfig(name="256", image_data_url=_URLS["images"]),
90
+ MAMeConfig(name="full", image_data_url=_URLS["full_images"]),
91
+ ]
92
+
93
+ DEFAULT_CONFIG_NAME = "256"
94
+
95
+ def _info(self):
96
+ features = datasets.Features(
97
+ {
98
+ "image": datasets.Image(),
99
+ "label": datasets.ClassLabel(
100
+ names=[
101
+ "Albumen photograph",
102
+ "Bronze",
103
+ "Ceramic",
104
+ "Clay",
105
+ "Engraving",
106
+ "Etching",
107
+ "Faience",
108
+ "Glass",
109
+ "Gold",
110
+ "Graphite",
111
+ "Hand-colored engraving",
112
+ "Hand-colored etching",
113
+ "Iron",
114
+ "Ivory",
115
+ "Limestone",
116
+ "Lithograph",
117
+ "Marble",
118
+ "Oil on canvas",
119
+ "Pen and brown ink",
120
+ "Polychromed wood",
121
+ "Porcelain",
122
+ "Silk and metal thread",
123
+ "Silver",
124
+ "Steel",
125
+ "Wood",
126
+ "Wood engraving",
127
+ "Woodblock",
128
+ "Woodcut",
129
+ "Woven fabric",
130
+ ]
131
+ ),
132
+ "Museum": datasets.Value("string"),
133
+ "Museum-based instance ID": datasets.Value("string"),
134
+ "Width": datasets.Value("float32"),
135
+ "Height": datasets.Value("float32"),
136
+ "Product size": datasets.Value("float32"),
137
+ "Aspect ratio": datasets.Value("float32"),
138
+ }
139
+ )
140
+
141
+ return datasets.DatasetInfo(
142
+ description=_DESCRIPTION,
143
+ features=features,
144
+ homepage=_HOMEPAGE,
145
+ citation=_CITATION,
146
+ )
147
+
148
+ def _split_generators(self, dl_manager):
149
+ metadata = dl_manager.download(_URLS["metadata"])
150
+ images = dl_manager.download_and_extract(self.config.image_data_url)
151
+ return [
152
+ datasets.SplitGenerator(
153
+ name=datasets.Split.TRAIN,
154
+ gen_kwargs={
155
+ "metadata": metadata,
156
+ "images": images,
157
+ "split": "train",
158
+ },
159
+ ),
160
+ datasets.SplitGenerator(
161
+ name=datasets.Split.VALIDATION,
162
+ gen_kwargs={
163
+ "metadata": metadata,
164
+ "images": images,
165
+ "split": "val",
166
+ },
167
+ ),
168
+ datasets.SplitGenerator(
169
+ name=datasets.Split.TEST,
170
+ gen_kwargs={
171
+ "metadata": metadata,
172
+ "images": images,
173
+ "split": "test",
174
+ },
175
+ ),
176
+ ]
177
+
178
+ def _generate_examples(self, metadata, images, split):
179
+ if self.config.name == "full":
180
+ from PIL import Image # to prevent decompression bomb warnings
181
+ Image.MAX_IMAGE_PIXELS = None
182
+ # load csv file from metadata zip directory
183
+ with zipfile.ZipFile(
184
+ "/Users/davanstrien/Documents/data/MAMe/MAMe_metadata.zip"
185
+ ) as z:
186
+ with io.BytesIO(z.read("MAMe_dataset.csv")) as f:
187
+ mapping = generate_mapping_dict(f)
188
+ subset = mapping[split]
189
+ for i, kv in enumerate(subset.items()):
190
+ k, v = kv
191
+ if self.config.name == "256":
192
+ im = f"{images}/data_256/{k}"
193
+ if self.config.name == "full":
194
+ im = f"{images}/data/{k}"
195
+ v["label"] = v["Medium"]
196
+ del v["Medium"]
197
+ v["image"] = im
198
+ yield i, v