INRIA-CopyDays / INRIA-CopyDays.py
ianhajra's picture
Update INRIA-CopyDays.py
caf7bfa verified
import datasets
import os
import tarfile
import shutil
import subprocess
import tempfile
_VERSION = datasets.Version("1.0.0")
_URLS = {
"copydays_original": {
"images": [
"https://dl.fbaipublicfiles.com/vissl/datasets/copydays_original.tar.gz"
],
},
"copydays_strong": {
"images": [
"https://dl.fbaipublicfiles.com/vissl/datasets/copydays_strong.tar.gz"
],
},
}
_DESCRIPTION = (
"Copydays dataset for copy detection and near-duplicate image retrieval evaluation."
)
_CITATION = """\
@inproceedings{jegou2008hamming,
title={Hamming embedding and weak geometric consistency for large scale image search},
author={Jegou, Herve and Douze, Matthijs and Schmid, Cordelia},
booktitle={European conference on computer vision},
pages={304--317},
year={2008},
organization={Springer}
}
"""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="database",
version=_VERSION,
description="Copydays original split for copy detection evaluation. Original, unmodified images.",
),
datasets.BuilderConfig(
name="query",
version=_VERSION,
description="Copydays query split for copy detection evaluation. Currently only contains the strong modifications.",
),
]
class Copydays(datasets.GeneratorBasedBuilder):
"""Copydays copy detection dataset."""
BUILDER_CONFIGS = BUILDER_CONFIGS
DEFAULT_CONFIG_NAME = "database"
def _download_and_extract(self, urls, cache_dir):
"""Download archives using wget and extract them."""
os.makedirs(cache_dir, exist_ok=True)
existing_files = [f for f in os.listdir(cache_dir) if f.endswith(".jpg")]
has_original = any(f.endswith("00") for f in existing_files)
has_strong = any(
not f.endswith("00") for f in existing_files if f.endswith(".jpg")
)
if has_original and has_strong:
print(
f"Found existing extracted files in {cache_dir}, skipping download..."
)
return [cache_dir]
for url in urls:
filename = url.split("/")[-1]
archive_path = os.path.join(cache_dir, filename)
# Download using wget if file doesn't exist
if not os.path.exists(archive_path):
print(f"Downloading {url}...")
result = subprocess.run(
["wget", url, "-O", archive_path], capture_output=True, text=True
)
if result.returncode != 0:
raise RuntimeError(f"Failed to download {url}: {result.stderr}")
marker_file = os.path.join(cache_dir, f".{filename}.extracted")
if not os.path.exists(marker_file):
print(f"Extracting {archive_path}...")
with tarfile.open(archive_path, "r:gz") as tar:
tar.extractall(cache_dir)
with open(marker_file, "w") as f:
f.write("extracted")
return [cache_dir]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image": datasets.Image(),
"filename": datasets.Value(
"string"
), # ex: "200000.jpg" which is the first db image
"split_type": datasets.Value("string"), # "original" or "strong"
"block_id": datasets.Value(
"int32"
), # first 4 digists of filename (ex: 2000)
"query_id": datasets.Value(
"int32"
), # 1 indexed, digits 5-6 of filename (ex: 01, 02, etc.)
# query_id is -1 for database split to make it clear it's not a query
}
),
supervised_keys=None,
homepage="https://thoth.inrialpes.fr/~jegou/data.php.html#copydays",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
# Download both datasets regardless of config (this way we just have to download and cache once)
all_urls = []
for dataset_type in _URLS.values():
all_urls.extend(dataset_type["images"])
cache_dir = tempfile.mkdtemp(prefix="copydays_")
try:
# Try HF DownloadManager (this is the preferred method but doesn't work for this dataset)
archive_paths = dl_manager.download(all_urls)
extracted_paths = dl_manager.extract(archive_paths)
# for type errors
if not isinstance(extracted_paths, list):
extracted_paths = [extracted_paths]
except Exception as e:
# Download and extract using wget
print(f"HF download failed: {e}")
print(
"Falling back to wget download strategy... This typically works better for this dataset."
)
extracted_paths = self._download_and_extract(all_urls, cache_dir)
return [
datasets.SplitGenerator(
name="queries",
gen_kwargs={
"image_dirs": extracted_paths,
"split_type": "queries",
"config_name": self.config.name,
},
),
datasets.SplitGenerator(
name="database",
gen_kwargs={
"image_dirs": extracted_paths,
"split_type": "database",
"config_name": self.config.name,
},
),
]
def _generate_examples(self, image_dirs, split_type, config_name):
"""Generate examples for the dataset."""
idx = 0
for image_dir in image_dirs:
for root, dirs, files in os.walk(image_dir):
for file in files:
if file.lower().endswith((".jpg", ".jpeg", ".png", ".bmp", ".gif")):
file_path = os.path.join(root, file)
filename = file
# format: "XXXXXX.jpg" where first 4 digits are block_id, next two are query_id
base_name = os.path.splitext(filename)[0]
if not base_name.isdigit() or len(base_name) != 6:
continue
block_id = int(base_name[:4])
query_id_str = base_name[4:6]
if query_id_str != "00": # Case 1: Strong image
if split_type == "queries":
query_id = int(query_id_str)
actual_split_type = "strong"
yield idx, {
"image": file_path,
"filename": filename,
"split_type": actual_split_type,
"block_id": block_id,
"query_id": query_id,
}
idx += 1
else: # Case 2: Original image
actual_split_type = "original"
if split_type == "queries":
query_id = 0 # Query ID for queries split
else: # split_type == "database"
query_id = -1 # Query ID for database split
yield idx, {
"image": file_path,
"filename": filename,
"split_type": actual_split_type,
"block_id": block_id,
"query_id": query_id,
}
idx += 1