| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | """TODO: Add a description here.""" |
| |
|
| |
|
| | import pandas as pd |
| | import json |
| | import os |
| |
|
| | import datasets |
| | from huggingface_hub import hf_hub_url |
| |
|
| |
|
| | |
| | |
| | _CITATION = """\ |
| | @InProceedings{huggingface:dataset, |
| | title = {A great new dataset}, |
| | author={huggingface, Inc. |
| | }, |
| | year={2020} |
| | } |
| | """ |
| |
|
| | |
| | |
| | _DESCRIPTION = """\ |
| | This new dataset is designed to solve this great NLP task and is crafted with a lot of care. |
| | """ |
| |
|
| | |
| | _HOMEPAGE = "https://huggingface.co/datasets/wuyuchen/ImageRewardDB" |
| | _VERSION = datasets.Version("1.0.0") |
| |
|
| | |
| | _LICENSE = "" |
| |
|
| | |
| | |
| | |
| | _REPO_ID = "wuyuchen/ImageRewardDB" |
| | _URLS = {} |
| | _PART_IDS = { |
| | "train": 32, |
| | "validation": 2, |
| | "test": 2 |
| | } |
| |
|
| | for name in list(_PART_IDS.keys()): |
| | _URLS[name] = {} |
| | for i in range(1, _PART_IDS[name]+1): |
| | _URLS[name][i] = hf_hub_url( |
| | _REPO_ID, |
| | filename=f"images/{name}/{name}_{i}.zip", |
| | repo_type="dataset" |
| | ) |
| | _URLS[name]["metadata"] = hf_hub_url( |
| | _REPO_ID, |
| | filename=f"metadata-{name}.parquet", |
| | repo_type="dataset" |
| | ) |
| |
|
| | class ImageRewardDBConfig(datasets.BuilderConfig): |
| | '''BuilderConfig for ImageRewardDB''' |
| | |
| | def __init__(self, part_ids, **kwargs): |
| | '''BuilderConfig for ImageRewardDB |
| | Args: |
| | part_ids([int]): A list of part_ids. |
| | **kwargs: keyword arguments forwarded to super |
| | ''' |
| | super(ImageRewardDBConfig, self).__init__(version=_VERSION, **kwargs) |
| | self.part_ids = part_ids |
| |
|
| | |
| | class ImageRewardDB(datasets.GeneratorBasedBuilder): |
| | """TODO: Short description of my dataset.""" |
| |
|
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | BUILDER_CONFIGS = [] |
| | |
| | for num_k in [1,2,4,8]: |
| | part_ids = { |
| | "train": 4*num_k, |
| | "validation": 2, |
| | "test": 2 |
| | } |
| | BUILDER_CONFIGS.append( |
| | ImageRewardDBConfig(name=f"{num_k}k", part_ids=part_ids, description=f"This is a {num_k}k-scale ImageRewardDB") |
| | ) |
| |
|
| | DEFAULT_CONFIG_NAME = "8k" |
| |
|
| | def _info(self): |
| | |
| | features = datasets.Features( |
| | { |
| | "image": datasets.Image(), |
| | "prompt_id": datasets.Value("string"), |
| | "prompt": datasets.Value("string"), |
| | "classification": datasets.Value("string"), |
| | "image_amount_in_total": datasets.Value("int8"), |
| | "rank": datasets.Value("int8"), |
| | "overall_rating": datasets.Value("int8"), |
| | "image_text_alignment_rating": datasets.Value("int8"), |
| | "fidelity_rating": datasets.Value("int8") |
| | } |
| | ) |
| | return datasets.DatasetInfo( |
| | |
| | description=_DESCRIPTION, |
| | |
| | features=features, |
| | |
| | |
| | |
| | |
| | homepage=_HOMEPAGE, |
| | |
| | license=_LICENSE, |
| | |
| | citation=_CITATION, |
| | ) |
| |
|
| | def _split_generators(self, dl_manager): |
| | |
| | |
| |
|
| | |
| | |
| | |
| | data_dirs = {name: [] for name in list(_PART_IDS.keys())} |
| | json_paths = {name: [] for name in list(_PART_IDS.keys())} |
| | metadata_paths = {name: [] for name in list(_PART_IDS.keys())} |
| | for key in list(self.config.part_ids.keys()): |
| | for i in range(1, self.config.part_ids[key]+1): |
| | data_dir = dl_manager.download_and_extract(_URLS[key][i]) |
| | data_dirs[key].append(data_dir) |
| | json_paths[key].append(os.path.join(data_dir, f"{key}_{i}.json")) |
| | metadata_paths[key] = dl_manager.download(_URLS[key]["metadata"]) |
| | return [ |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TRAIN, |
| | |
| | gen_kwargs={ |
| | "split": "train", |
| | "data_dirs": data_dirs["train"], |
| | "json_paths": json_paths["train"], |
| | "metadata_path": metadata_paths["train"] |
| | }, |
| | ), |
| | datasets.SplitGenerator( |
| | name=datasets.Split.VALIDATION, |
| | |
| | gen_kwargs={ |
| | "split": "validation", |
| | "data_dirs": data_dirs["validation"], |
| | "json_paths": json_paths["validation"], |
| | "metadata_path": metadata_paths["validation"] |
| | }, |
| | ), |
| | datasets.SplitGenerator( |
| | name=datasets.Split.TEST, |
| | |
| | gen_kwargs={ |
| | "split": "test", |
| | "data_dirs": data_dirs["test"], |
| | "json_paths": json_paths["test"], |
| | "metadata_path": metadata_paths["test"] |
| | }, |
| | ), |
| | ] |
| |
|
| | |
| | def _generate_examples(self, split, data_dirs, json_paths, metadata_path): |
| | |
| | |
| | |
| | num_data_dirs = len(data_dirs) |
| | assert num_data_dirs == len(json_paths) |
| | |
| | |
| | for index, json_path in enumerate(json_paths): |
| | json_data = json.load(open(json_path, "r", encoding="utf-8")) |
| | for example in json_data: |
| | image_path = os.path.join(data_dirs[index], str(example["image_path"]).split("/")[-1]) |
| | yield example["image_path"], { |
| | "image": { |
| | "path": image_path, |
| | "bytes": open(image_path, "rb").read() |
| | }, |
| | "prompt_id": example["prompt_id"], |
| | "prompt": example["prompt"], |
| | "classification": example["classification"], |
| | "image_amount_in_total": example["image_amount_in_total"], |
| | "rank": example["rank"], |
| | "overall_rating": example["overall_rating"], |
| | "image_text_alignment_rating": example["image_text_alignment_rating"], |
| | "fidelity_rating": example["fidelity_rating"] |
| | } |