File size: 2,798 Bytes
0b12b3f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98b2f85
 
 
 
 
0b12b3f
98b2f85
 
 
 
 
 
 
0b12b3f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import pandas as pd
from huggingface_hub import hf_hub_url
import datasets
import os

_VERSION = datasets.Version("0.0.2")

_DESCRIPTION = "This dataset includes images and conditioning images for XYZ purpose."
_HOMEPAGE = "https://www.example.com"
_LICENSE = "MIT"
_CITATION = """@article{YourDataset2021,
  title={Your Dataset Title},
  author={Your Name},
  journal={Your Journal},
  year={2021}
}"""

# _FEATURES = datasets.Features({
#     "image": datasets.Value("string"),  # Change from datasets.Image() to Value("string") if using paths directly
#     "conditioning_image": datasets.Value("string"),
#     "text": datasets.Value("string"),
# })

_FEATURES = datasets.Features(
    {
        "image": datasets.Image(),
        "conditioning_image": datasets.Image(),
        "text": datasets.Value("string"),
    },
)
METADATA_URL = hf_hub_url(
    "spine-crook/test",
    filename="train.jsonl",
    repo_type="dataset",
)

IMAGES_URL = hf_hub_url(
    "spine-crook/test",
    filename="images.zip",
    repo_type="dataset",
)

CONDITIONING_IMAGES_URL = hf_hub_url(
    "spine-crook/test",
    filename="conditioning_images.zip",
    repo_type="dataset",
)

_DEFAULT_CONFIG = datasets.BuilderConfig(name="default", version=_VERSION)

class Test(datasets.GeneratorBasedBuilder):
    BUILDER_CONFIGS = [_DEFAULT_CONFIG]
    DEFAULT_CONFIG_NAME = "default"

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=_FEATURES,
            supervised_keys=None,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        metadata_path = dl_manager.download(METADATA_URL)
        images_dir = dl_manager.download_and_extract(IMAGES_URL)
        conditioning_images_dir = dl_manager.download_and_extract(CONDITIONING_IMAGES_URL)

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "metadata_path": metadata_path,
                    "images_dir": images_dir,
                    "conditioning_images_dir": conditioning_images_dir,
                },
            ),
        ]

    def _generate_examples(self, metadata_path, images_dir, conditioning_images_dir):
        metadata = pd.read_json(metadata_path, lines=True)

        for _, row in metadata.iterrows():
            text = row["text"]
            image_path = os.path.join(images_dir, row["image"])
            conditioning_image_path = os.path.join(conditioning_images_dir, row["conditioning_image"])

            yield row["image"], {
                "text": text,
                "image": image_path,
                "conditioning_image": conditioning_image_path,
            }