File size: 8,001 Bytes
630b95c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
caf7bfa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
import datasets
import os
import tarfile
import shutil
import subprocess
import tempfile

_VERSION = datasets.Version("1.0.0")

_URLS = {
    "copydays_original": {
        "images": [
            "https://dl.fbaipublicfiles.com/vissl/datasets/copydays_original.tar.gz"
        ],
    },
    "copydays_strong": {
        "images": [
            "https://dl.fbaipublicfiles.com/vissl/datasets/copydays_strong.tar.gz"
        ],
    },
}

_DESCRIPTION = (
    "Copydays dataset for copy detection and near-duplicate image retrieval evaluation."
)

_CITATION = """\
@inproceedings{jegou2008hamming,
  title={Hamming embedding and weak geometric consistency for large scale image search},
  author={Jegou, Herve and Douze, Matthijs and Schmid, Cordelia},
  booktitle={European conference on computer vision},
  pages={304--317},
  year={2008},
  organization={Springer}
}
"""

BUILDER_CONFIGS = [
    datasets.BuilderConfig(
        name="database",
        version=_VERSION,
        description="Copydays original split for copy detection evaluation. Original, unmodified images.",
    ),
    datasets.BuilderConfig(
        name="query",
        version=_VERSION,
        description="Copydays query split for copy detection evaluation. Currently only contains the strong modifications.",
    ),
]


class Copydays(datasets.GeneratorBasedBuilder):
    """Copydays copy detection dataset."""

    BUILDER_CONFIGS = BUILDER_CONFIGS
    DEFAULT_CONFIG_NAME = "database"

    def _download_and_extract(self, urls, cache_dir):
        """Download archives using wget and extract them."""
        os.makedirs(cache_dir, exist_ok=True)

        existing_files = [f for f in os.listdir(cache_dir) if f.endswith(".jpg")]
        has_original = any(f.endswith("00") for f in existing_files)
        has_strong = any(
            not f.endswith("00") for f in existing_files if f.endswith(".jpg")
        )

        if has_original and has_strong:
            print(
                f"Found existing extracted files in {cache_dir}, skipping download..."
            )
            return [cache_dir]

        for url in urls:
            filename = url.split("/")[-1]
            archive_path = os.path.join(cache_dir, filename)

            # Download using wget if file doesn't exist
            if not os.path.exists(archive_path):
                print(f"Downloading {url}...")
                result = subprocess.run(
                    ["wget", url, "-O", archive_path], capture_output=True, text=True
                )
                if result.returncode != 0:
                    raise RuntimeError(f"Failed to download {url}: {result.stderr}")

            marker_file = os.path.join(cache_dir, f".{filename}.extracted")
            if not os.path.exists(marker_file):
                print(f"Extracting {archive_path}...")
                with tarfile.open(archive_path, "r:gz") as tar:
                    tar.extractall(cache_dir)
                with open(marker_file, "w") as f:
                    f.write("extracted")

        return [cache_dir]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "image": datasets.Image(),
                    "filename": datasets.Value(
                        "string"
                    ),  # ex: "200000.jpg" which is the first db image
                    "split_type": datasets.Value("string"),  # "original" or "strong"
                    "block_id": datasets.Value(
                        "int32"
                    ),  # first 4 digists of filename (ex: 2000)
                    "query_id": datasets.Value(
                        "int32"
                    ),  # 1 indexed, digits 5-6 of filename (ex: 01, 02, etc.)
                    # query_id is -1 for database split to make it clear it's not a query
                }
            ),
            supervised_keys=None,
            homepage="https://thoth.inrialpes.fr/~jegou/data.php.html#copydays",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        # Download both datasets regardless of config (this way we just have to download and cache once)
        all_urls = []
        for dataset_type in _URLS.values():
            all_urls.extend(dataset_type["images"])

        cache_dir = tempfile.mkdtemp(prefix="copydays_")

        try:
            # Try HF DownloadManager (this is the preferred method but doesn't work for this dataset)
            archive_paths = dl_manager.download(all_urls)
            extracted_paths = dl_manager.extract(archive_paths)

            # for type errors
            if not isinstance(extracted_paths, list):
                extracted_paths = [extracted_paths]
        except Exception as e:
            # Download and extract using wget
            print(f"HF download failed: {e}")
            print(
                "Falling back to wget download strategy... This typically works better for this dataset."
            )
            extracted_paths = self._download_and_extract(all_urls, cache_dir)

        return [
            datasets.SplitGenerator(
                name="queries",
                gen_kwargs={
                    "image_dirs": extracted_paths,
                    "split_type": "queries",
                    "config_name": self.config.name,
                },
            ),
            datasets.SplitGenerator(
                name="database",
                gen_kwargs={
                    "image_dirs": extracted_paths,
                    "split_type": "database",
                    "config_name": self.config.name,
                },
            ),
        ]

    def _generate_examples(self, image_dirs, split_type, config_name):
        """Generate examples for the dataset."""
        idx = 0

        for image_dir in image_dirs:
            for root, dirs, files in os.walk(image_dir):
                for file in files:
                    if file.lower().endswith((".jpg", ".jpeg", ".png", ".bmp", ".gif")):
                        file_path = os.path.join(root, file)
                        filename = file

                        # format: "XXXXXX.jpg" where first 4 digits are block_id, next two are query_id
                        base_name = os.path.splitext(filename)[0]
                        if not base_name.isdigit() or len(base_name) != 6:
                            continue

                        block_id = int(base_name[:4])
                        query_id_str = base_name[4:6]

                        if query_id_str != "00":  # Case 1: Strong image
                            if split_type == "queries":
                                query_id = int(query_id_str)
                                actual_split_type = "strong"
                                yield idx, {
                                    "image": file_path,
                                    "filename": filename,
                                    "split_type": actual_split_type,
                                    "block_id": block_id,
                                    "query_id": query_id,
                                }
                                idx += 1
                        else:  # Case 2: Original image
                            actual_split_type = "original"
                            if split_type == "queries":
                                query_id = 0  # Query ID for queries split
                            else:  # split_type == "database"
                                query_id = -1  # Query ID for database split

                            yield idx, {
                                "image": file_path,
                                "filename": filename,
                                "split_type": actual_split_type,
                                "block_id": block_id,
                                "query_id": query_id,
                            }
                            idx += 1