schattin commited on
Commit
83fcdb9
·
verified ·
1 Parent(s): 0e200de

Upload 4 files

Browse files
Files changed (4) hide show
  1. conditioning_images.zip +3 -0
  2. fill1k.py +134 -0
  3. images.zip +3 -0
  4. train.jsonl +0 -0
conditioning_images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efd807356b0f9ee036c4ed4aacc6d64981038153719623f8884acb0ada547b31
3
+ size 2487461
fill1k.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from huggingface_hub import hf_hub_url
3
+ import datasets
4
+ import os
5
+
6
+ _VERSION = datasets.Version("0.0.2")
7
+
8
+ _DESCRIPTION = "TODO"
9
+ _HOMEPAGE = "TODO"
10
+ _LICENSE = "TODO"
11
+ _CITATION = "TODO"
12
+
13
+ _FEATURES = datasets.Features(
14
+ {
15
+ "image": datasets.Image(),
16
+ "conditioning_image": datasets.Image(),
17
+ "text": datasets.Value("string"),
18
+ },
19
+ )
20
+
21
+ METADATA_URL = hf_hub_url(
22
+ "schattin/fill1k",
23
+ filename="train.jsonl",
24
+ repo_type="dataset",
25
+ )
26
+
27
+ IMAGES_URL = hf_hub_url(
28
+ "schattin/fill1k",
29
+ filename="images.zip",
30
+ repo_type="dataset",
31
+ )
32
+
33
+ CONDITIONING_IMAGES_URL = hf_hub_url(
34
+ "schattin/fill1k",
35
+ filename="conditioning_images.zip",
36
+ repo_type="dataset",
37
+ )
38
+
39
+ _DEFAULT_CONFIG = datasets.BuilderConfig(name="default", version=_VERSION)
40
+
41
+
42
+ # Define dataset class
43
+ class Fill1k(datasets.GeneratorBasedBuilder):
44
+
45
+ BUILDER_CONFIGS = [_DEFAULT_CONFIG]
46
+ DEFAULT_CONFIG_NAME = "default"
47
+
48
+ # Define dataset info
49
+ def _info(self):
50
+ """
51
+ Define dataset info
52
+ """
53
+ return datasets.DatasetInfo(
54
+ description=_DESCRIPTION,
55
+ features=_FEATURES,
56
+ supervised_keys=None,
57
+ homepage=_HOMEPAGE,
58
+ license=_LICENSE,
59
+ citation=_CITATION,
60
+ )
61
+ # end _info
62
+
63
+ # Define dataset split
64
+ def _split_generators(
65
+ self,
66
+ dl_manager
67
+ ):
68
+ """
69
+ Define dataset split
70
+ """
71
+ metadata_path = dl_manager.download(METADATA_URL)
72
+ images_dir = dl_manager.download_and_extract(IMAGES_URL)
73
+ conditioning_images_dir = dl_manager.download_and_extract(
74
+ CONDITIONING_IMAGES_URL
75
+ )
76
+
77
+ return [
78
+ datasets.SplitGenerator(
79
+ name=datasets.Split.TRAIN,
80
+ # These kwargs will be passed to _generate_examples
81
+ gen_kwargs={
82
+ "metadata_path": metadata_path,
83
+ "images_dir": images_dir,
84
+ "conditioning_images_dir": conditioning_images_dir,
85
+ },
86
+ ),
87
+ ]
88
+ # end _split_generators
89
+
90
+ # Generate examples
91
+ def _generate_examples(
92
+ self,
93
+ metadata_path,
94
+ images_dir,
95
+ conditioning_images_dir
96
+ ):
97
+ """
98
+ Generate examples
99
+ """
100
+ # Read metadata
101
+ metadata = pd.read_json(metadata_path, lines=True)
102
+
103
+ # For each row
104
+ for _, row in metadata.iterrows():
105
+ text = row["text"]
106
+
107
+ # Image
108
+ image_path = row["image"]
109
+ image_path = os.path.join(images_dir, image_path)
110
+ image = open(image_path, "rb").read()
111
+
112
+ # Conditioning image
113
+ conditioning_image_path = row["conditioning_image"]
114
+ conditioning_image_path = os.path.join(
115
+ conditioning_images_dir, row["conditioning_image"]
116
+ )
117
+ conditioning_image = open(conditioning_image_path, "rb").read()
118
+
119
+ # Yield sample
120
+ yield row["image"], {
121
+ "text": text,
122
+ "image": {
123
+ "path": image_path,
124
+ "bytes": image,
125
+ },
126
+ "conditioning_image": {
127
+ "path": conditioning_image_path,
128
+ "bytes": conditioning_image,
129
+ },
130
+ }
131
+ # end for
132
+ # end _generate_examples
133
+
134
+ # end Fill1k
images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3d61a36417d1eecf9b52cdce44ea7209d147912a3465d52fc139735b4f512ee
3
+ size 2197219
train.jsonl ADDED
The diff for this file is too large to render. See raw diff