import csv import json import os import datasets from PIL import Image import tarfile from io import BytesIO import logging import datasets from datasets.utils.logging import get_logger, tqdm logger = get_logger(__name__) # <- same pattern used in HF scripts _DESCRIPTION = "WikiChurches is a dataset for architectural style classification" _HOMEPAGE = "https://huggingface.co/datasets/LZSI-2/WikiChurches" _LICENSE = "cc-by-nc-4.0" _CITATION = """@article{barz2021wikichurches, title={WikiChurches: A Fine-Grained Dataset of Architectural Styles with Real-World Challenges}, author={Barz, Björn and Denzler, Joachim}, journal={arXiv preprint arXiv:2108.06959}, year={2021} }""" TAR_PARTS = 40 _TAR_URLS =[f"https://huggingface.co/datasets/LZSI-2/WikiChurches/resolve/main/images/images_part{part_id}.tar" for part_id in range(1, TAR_PARTS + 1)] _URLS = { "churches_json": "https://huggingface.co/datasets/LZSI-2/WikiChurches/resolve/main/churches.json", "style_levels": "https://huggingface.co/datasets/LZSI-2/WikiChurches/resolve/main/style_levels.json", "style_names": "https://huggingface.co/datasets/LZSI-2/WikiChurches/resolve/main/style_names.txt", } class SacredBuildingsDataset(datasets.GeneratorBasedBuilder): """Sacred Buildings Dataset with support for different hierarchy levels.""" VERSION = datasets.Version("0.0.1") BUILDER_CONFIGS = [ datasets.BuilderConfig( name="wc4", version=VERSION, description="Wiki Churches 4 main classes" ), datasets.BuilderConfig( name="wc6", version=VERSION, description="Wiki Churches 6 largest classes" ), datasets.BuilderConfig( name="wc14", version=VERSION, description="Wiki Churches 14 first-level classes" ), ] DEFAULT_CONFIG_NAME = "wc4" def _info(self): features = datasets.Features({ "image": datasets.Image(), "style": datasets.Value("string"), "church_id": datasets.Value("string"), }) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _get_style_names(self, style_levels): return style_levels.get(self.config.name, {}) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" downloaded_files = dl_manager.download(_TAR_URLS) data_dir = dl_manager.download_and_extract(_URLS) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "file_paths": downloaded_files, "data_dir": data_dir, }, ), ] def _trace_to_top_parent(self, style_id, hierarchy): while style_id in hierarchy and hierarchy[style_id]["parent"]: style_id = hierarchy[style_id]["parent"] return style_id def _generate_examples(self, file_paths, data_dir): with open(data_dir["churches_json"], "r") as f: churches_data = json.load(f) with open(data_dir["style_levels"], "r") as f: style_levels = json.load(f) style_levels = json.load(open(data_dir["style_levels"], "r")) if self.config.name == "wc4": domain_styles = list(style_levels["statistics"]["styles_by_size"].keys())[:5] elif self.config.name == "wc6": domain_styles = list(style_levels["statistics"]["styles_by_size"].keys())[:7] elif self.config.name == "wc14": domain_styles = list(style_levels["statistics"]["styles_by_size"].keys())[:15] """Yields examples.""" # Load churches data for filepath in file_paths: logger.info(f"Processing {filepath.split('/')[-1]}") with tarfile.open(filepath, "r") as tar: for member in tqdm(tar.getmembers(), desc=f"{os.path.basename(filepath)}"): if member.isfile(): logger.debug(f"Processing {member.name}") image_file = tar.extractfile(member) church_id = member.name.split("_")[0] image_name = member.name if church_id not in churches_data: continue church_styles = churches_data[church_id]["styles"] top_level_set = set() for s_id in church_styles: top_parent = self._trace_to_top_parent(s_id, style_levels["hierarchy"]) if top_parent in domain_styles: top_level_set.add(top_parent) if len(top_level_set) == 1: label_name = top_level_set.pop() with Image.open(image_file) as image: image = image.convert("RGB") yield image_name, { "image": image, "style": label_name, "church_id": church_id, } else: continue