zhenzi commited on
Commit
b8577a7
·
1 Parent(s): 333b75c
Files changed (3) hide show
  1. README.md +0 -0
  2. data_process.py +95 -0
  3. metadata/tests/train.txt +2 -0
README.md ADDED
File without changes
data_process.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+
4
+ import datasets
5
+
6
+ _HOMEPAGE = "https://huggingface.co/datasets/zhenzi/datasets"
7
+
8
+ _LICENSE = "Apache License 2.0"
9
+
10
+ _CITATION = """\
11
+ @software{2022,
12
+ title=数据集标题,
13
+ author=zhenzi,
14
+ year={2022},
15
+ month={March},
16
+ publisher = {GitHub}
17
+ }
18
+ """
19
+
20
+ _DESCRIPTION = """\
21
+ 数据集描述.
22
+ """
23
+
24
+ _REPO = "https://huggingface.co/datasets/zhenzi/datasets/resolve/main/metadata"
25
+
26
+
27
+ class ImageConfig(datasets.BuilderConfig):
28
+ """BuilderConfig for Imagette."""
29
+
30
+ def __init__(self, data_url, metadata_urls, **kwargs):
31
+ super(ImageConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
32
+ self.data_url = data_url
33
+ self.metadata_urls = metadata_urls
34
+
35
+
36
+ class Imagenette(datasets.GeneratorBasedBuilder):
37
+ """Imagenette dataset."""
38
+
39
+ BUILDER_CONFIGS = [
40
+ ImageConfig(
41
+ name="tests",
42
+ description="测试",
43
+ data_url="https://huggingface.co/datasets/zhenzi/test/resolve/main/tests.zip",
44
+ metadata_urls={
45
+ "train": f"{_REPO}/tests/train.txt"
46
+ },
47
+ )
48
+ ]
49
+
50
+ def _info(self):
51
+ return datasets.DatasetInfo(
52
+ description=_DESCRIPTION + self.config.description,
53
+ features=datasets.Features(
54
+ {
55
+ "image": datasets.Image(),
56
+ "text": datasets.Value("string"),
57
+ }
58
+ ),
59
+ supervised_keys=None,
60
+ homepage=_HOMEPAGE,
61
+ license=_LICENSE,
62
+ citation=_CITATION,
63
+ )
64
+
65
+ def _split_generators(self, dl_manager):
66
+ archive_path = dl_manager.download(self.config.data_url)
67
+ metadata_paths = dl_manager.download(self.config.metadata_urls)
68
+ archive_iter = dl_manager.iter_archive(archive_path)
69
+ return [
70
+ datasets.SplitGenerator(
71
+ name=datasets.Split.TRAIN,
72
+ gen_kwargs={
73
+ "images": archive_iter,
74
+ "metadata_path": metadata_paths["train"],
75
+ },
76
+ ),
77
+ datasets.SplitGenerator(
78
+ name=datasets.Split.VALIDATION,
79
+ gen_kwargs={
80
+ "images": archive_iter,
81
+ "metadata_path": metadata_paths["validation"],
82
+ },
83
+ ),
84
+ ]
85
+
86
+ def _generate_examples(self, images, metadata_path):
87
+ with open(metadata_path, encoding="utf-8") as f:
88
+ files_to_keep = set(f.read().split("\n"))
89
+ for file_path, file_obj in images:
90
+ print(file_path)
91
+ if file_path in files_to_keep:
92
+ yield file_path, {
93
+ "image": {"path": file_path, "bytes": file_obj.read()},
94
+ "text": "dee",
95
+ }
metadata/tests/train.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ tests/train/001.jpeg 酷酷的女机器人
2
+ tests/train/002.jpeg 穿警服的兔子