lijianbin commited on
Commit
4e77ef4
·
1 Parent(s): 2bf9b3c

first update

Browse files
Files changed (3) hide show
  1. README.md +6 -0
  2. data.xlsx +0 -0
  3. share_test.py +171 -0
README.md ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ ---
2
+ task_categories:
3
+ - text-classification
4
+ ---
5
+
6
+ This dataset is a sample used to learn how to share dataset to the Hub.
data.xlsx ADDED
Binary file (128 kB). View file
 
share_test.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """该脚本是官网下载的模板所改写的"""
2
+
3
+
4
+ # import csv
5
+ # import json
6
+ import os
7
+
8
+ import datasets
9
+
10
+
11
+ # TODO: Add BibTeX citation
12
+ # Find for instance the citation on arxiv or on the dataset repo/website
13
+ import pandas as pd
14
+
15
+ _CITATION = """
16
+ @InProceedings{huggingface:dataset,
17
+ title = {A great new dataset},
18
+ author={huggingface, Inc.
19
+ },
20
+ year={2020}
21
+ }
22
+ """
23
+
24
+ # TODO: Add description of the dataset here
25
+ # You can copy an official description
26
+ _DESCRIPTION = """
27
+ This dataset is a random sample used to learn how to share dataset in the Hub.
28
+ """
29
+
30
+ # TODO: Add a link to an official homepage for the dataset here
31
+ _HOMEPAGE = "www.huggingface.co/lijianbin"
32
+
33
+ # TODO: Add the licence for the dataset here if you can find it
34
+ _LICENSE = "123"
35
+
36
+ # TODO: Add link to the official dataset URLs here
37
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
38
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
39
+ # _URLS = {
40
+ # "first_domain": "https://huggingface.co/great-new-dataset-first_domain.zip",
41
+ # "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
42
+ # }
43
+ _URLS = 'https://huggingface.co/datasets/lijianbin/share_test/'
44
+
45
+
46
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
47
+ class ShareTest(datasets.GeneratorBasedBuilder):
48
+ """TODO: Short description of my dataset."""
49
+
50
+ VERSION = datasets.Version("1.0.0")
51
+
52
+ # This is an example of a dataset with multiple configurations.
53
+ # If you don't want/need to define several sub-sets in your dataset,
54
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
55
+
56
+ # If you need to make complex sub-parts in the datasets with configurable options
57
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
58
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
59
+
60
+ # You will be able to load one or the other configurations in the following list with
61
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
62
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
63
+ # BUILDER_CONFIGS = [
64
+ # datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"),
65
+ # datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"),
66
+ # ]
67
+
68
+
69
+ def _info(self):
70
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
71
+ features = datasets.Features(
72
+ {
73
+ "label": datasets.Value("float"),
74
+ "x1": datasets.Value("float"),
75
+ "x2": datasets.Value("float"),
76
+ "x3": datasets.Value("float"),
77
+ "x4": datasets.Value("float"),
78
+ "x5": datasets.Value("float"),
79
+ "x6": datasets.Value("float"),
80
+ "x7": datasets.Value("float"),
81
+ "x8": datasets.Value("float"),
82
+ "x9": datasets.Value("float"),
83
+ "x10": datasets.Value("float")
84
+ # These are the features of your dataset like images, labels ...
85
+ }
86
+ )
87
+
88
+ return datasets.DatasetInfo(
89
+ # This is the description that will appear on the datasets page.
90
+ description=_DESCRIPTION,
91
+ # This defines the different columns of the dataset and their types
92
+ features=features, # Here we define them above because they are different between the two configurations
93
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
94
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
95
+ # supervised_keys=("sentence", "label"),
96
+ # Homepage of the dataset for documentation
97
+ homepage=_HOMEPAGE,
98
+ # License for the dataset if available
99
+ license=_LICENSE,
100
+ # Citation for the dataset
101
+ citation=_CITATION,
102
+ )
103
+
104
+ def _split_generators(self, dl_manager):
105
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
106
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
107
+
108
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
109
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
110
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
111
+ urls = _URLS
112
+ data_dir = dl_manager.download_and_extract(urls)
113
+ return [
114
+ datasets.SplitGenerator(
115
+ name=datasets.Split.TRAIN,
116
+ # These kwargs will be passed to _generate_examples
117
+ gen_kwargs={
118
+ "filepath": os.path.join(data_dir, "data.xlsx"),
119
+ "split": "train",
120
+ },
121
+ ),
122
+ datasets.SplitGenerator(
123
+ name=datasets.Split.VALIDATION,
124
+ # These kwargs will be passed to _generate_examples
125
+ gen_kwargs={
126
+ "filepath": os.path.join(data_dir, "data.xlsx"),
127
+ "split": "validation",
128
+ },
129
+ ),
130
+ datasets.SplitGenerator(
131
+ name=datasets.Split.TEST,
132
+ # These kwargs will be passed to _generate_examples
133
+ gen_kwargs={
134
+ "filepath": os.path.join(data_dir, "data.xlsx"),
135
+ "split": "test"
136
+ },
137
+ ),
138
+ ]
139
+
140
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
141
+ def _generate_examples(self, filepath, split):
142
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
143
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
144
+ # with open(filepath, encoding="utf-8") as f:
145
+ # for key, row in enumerate(f):
146
+ # data = json.loads(row)
147
+ # if self.config.name == "first_domain":
148
+ # # Yields examples as (key, example) tuples
149
+ # yield key, {
150
+ # "sentence": data["sentence"],
151
+ # "option1": data["option1"],
152
+ # "answer": "" if split == "test" else data["answer"],
153
+ # }
154
+ # else:
155
+ # yield key, {
156
+ # "sentence": data["sentence"],
157
+ # "option2": data["option2"],
158
+ # "second_domain_answer": "" if split == "test" else data["second_domain_answer"],
159
+ # }
160
+ df = pd.read_excel(filepath)
161
+ train_df = df.iloc[:700, :]
162
+ validation_df = df.iloc[700:900, :]
163
+ test_df = df.iloc[900:, :]
164
+ if split == 'train':
165
+ iteration = train_df
166
+ elif split == 'validation':
167
+ iteration = validation_df
168
+ else:
169
+ iteration = test_df
170
+ for key in range(iteration.index.tolist()):
171
+ yield key, iteration.loc[key, :]