Datasets:

Modalities:
Text
Languages:
English
ArXiv:
Libraries:
Datasets
License:
parquet-converter commited on
Commit
b33a9cb
·
1 Parent(s): f1cb701

Update parquet files

Browse files
Files changed (6) hide show
  1. .gitattributes +0 -37
  2. C2Gen.py +0 -122
  3. README.md +0 -70
  4. c2gen/c2_gen-test.parquet +3 -0
  5. data/test.json +0 -0
  6. dataset_infos.json +0 -1
.gitattributes DELETED
@@ -1,37 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bin.* filter=lfs diff=lfs merge=lfs -text
5
- *.bz2 filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.onnx filter=lfs diff=lfs merge=lfs -text
14
- *.ot filter=lfs diff=lfs merge=lfs -text
15
- *.parquet filter=lfs diff=lfs merge=lfs -text
16
- *.pb filter=lfs diff=lfs merge=lfs -text
17
- *.pt filter=lfs diff=lfs merge=lfs -text
18
- *.pth filter=lfs diff=lfs merge=lfs -text
19
- *.rar filter=lfs diff=lfs merge=lfs -text
20
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
- *.tar.* filter=lfs diff=lfs merge=lfs -text
22
- *.tflite filter=lfs diff=lfs merge=lfs -text
23
- *.tgz filter=lfs diff=lfs merge=lfs -text
24
- *.xz filter=lfs diff=lfs merge=lfs -text
25
- *.zip filter=lfs diff=lfs merge=lfs -text
26
- *.zstandard filter=lfs diff=lfs merge=lfs -text
27
- *tfevents* filter=lfs diff=lfs merge=lfs -text
28
- # Audio files - uncompressed
29
- *.pcm filter=lfs diff=lfs merge=lfs -text
30
- *.sam filter=lfs diff=lfs merge=lfs -text
31
- *.raw filter=lfs diff=lfs merge=lfs -text
32
- # Audio files - compressed
33
- *.aac filter=lfs diff=lfs merge=lfs -text
34
- *.flac filter=lfs diff=lfs merge=lfs -text
35
- *.mp3 filter=lfs diff=lfs merge=lfs -text
36
- *.ogg filter=lfs diff=lfs merge=lfs -text
37
- *.wav filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
C2Gen.py DELETED
@@ -1,122 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """The SuperGLUE benchmark."""
18
-
19
- import json
20
- import os
21
- import datasets
22
- import pandas as pd
23
-
24
- _CITATION = """TODO
25
- """
26
-
27
- # You can copy an official description
28
- _DESCRIPTION = """The task of C2Gen is to both generate commonsensical text which include the given words, and also have the generated text adhere to the given context.
29
- """
30
-
31
- _HOMEPAGE = ""
32
-
33
- _LICENSE = "cc-by-sa-4.0"
34
-
35
- # TODO: Add link to the official dataset URLs here
36
- # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
37
- # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
38
- _URL = "https://huggingface.co/datasets/Severine/C2Gen/resolve/main/data/"
39
- _TASKS = {
40
- "c2gen": "C2Gen",
41
- }
42
-
43
-
44
- # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
45
- class C2Gen(datasets.GeneratorBasedBuilder):
46
- """TODO: Short description of my dataset."""
47
-
48
- VERSION = datasets.Version("1.1.0")
49
-
50
- # If you need to make complex sub-parts in the datasets with configurable options
51
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
52
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
53
-
54
- # You will be able to load one or the other configurations in the following list with
55
- # data = datasets.load_dataset('my_dataset', 'first_domain')
56
- # data = datasets.load_dataset('my_dataset', 'second_domain')
57
- BUILDER_CONFIGS = [
58
- datasets.BuilderConfig(name="c2gen", version=VERSION, description=_DESCRIPTION),
59
- ]
60
-
61
- DEFAULT_CONFIG_NAME = "c2gen"
62
-
63
- def _info(self):
64
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
65
- # This is the name of the configuration selected in BUILDER_CONFIGS above
66
- features = datasets.Features(
67
- {
68
- "context": datasets.Value("string"),
69
- "keywords": datasets.Sequence(feature=datasets.Value(dtype="string",id=None), length=-1,id=None),
70
- # These are the features of your dataset like images, labels ...
71
- }
72
- )
73
-
74
- return datasets.DatasetInfo(
75
- # This is the description that will appear on the datasets page.
76
- description=_DESCRIPTION,
77
- # This defines the different columns of the dataset and their types
78
- features=features, # Here we define them above because they are different between the two configurations
79
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
80
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
81
- # supervised_keys=("sentence", "label"),
82
- # Homepage of the dataset for documentation
83
- homepage=_HOMEPAGE,
84
- # License for the dataset if available
85
- license=_LICENSE,
86
- # Citation for the dataset
87
- citation=_CITATION,
88
- )
89
-
90
- def _split_generators(self, dl_manager):
91
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
92
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
93
-
94
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
95
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
96
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
97
- #urls = _URLS[self.config.name]
98
- data_dir_test = dl_manager.download_and_extract(os.path.join(_URL, "test.json"))
99
- return [
100
- datasets.SplitGenerator(
101
- name=datasets.Split.TEST,
102
- # These kwargs will be passed to _generate_examples
103
- gen_kwargs={
104
- "filepath": data_dir_test,
105
- "split": "test"
106
- },
107
- ),
108
- ]
109
-
110
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
111
- def _generate_examples(self, filepath, split):
112
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
113
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
114
- data = json.load(open(filepath,"r"))
115
- for key, row in enumerate(data):
116
-
117
- # Yields examples as (key, example) tuples
118
- yield key, {
119
- "context": row["Context"],
120
- "keywords": row["Words"],
121
- }
122
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,70 +0,0 @@
1
- ---
2
- language:
3
- - en
4
- license:
5
- - cc-by-sa-4.0
6
- size_categories:
7
- - <100K
8
- task_categories:
9
- - text-generation
10
- ---
11
- # Dataset Card for Contextualized CommonGen(C2Gen)
12
-
13
- ## Table of Contents
14
- - [Dataset Description](#dataset-description)
15
- - [Dataset Summary](#dataset-summary)
16
- - [Languages](#languages)
17
- - [Dataset Structure](#dataset-structure)
18
- - [Data Instances](#data-instances)
19
- - [Data Fields](#data-instances)
20
- - [Data Splits](#data-instances)
21
- - [Dataset Creation](#dataset-creation)
22
- - [Curation Rationale](#curation-rationale)
23
- - [Initial Data Collection and Normalization](#initial-cata-collection-and-normalization)
24
- - [Licensing Information](#licensing-information)
25
-
26
-
27
- ## Dataset Description
28
-
29
- - **Repository:** [Non-Residual Prompting](https://github.com/FreddeFrallan/Non-Residual-Prompting)
30
- - **Paper:** [Fine-Grained Controllable Text Generation Using Non-Residual Prompting](https://aclanthology.org/2022.acl-long.471)
31
- - **Point of Contact:** [Fredrik Carlsson](mailto:Fredrik.Carlsson@ri.se)
32
-
33
-
34
- ### Dataset Summary
35
-
36
- CommonGen [Lin et al., 2020](https://arxiv.org/abs/1911.03705) is a dataset for the constrained text generation task of word inclusion. But the task does not allow to include context. Therefore, to complement CommonGen, we provide an extended test set C2Gen [Carlsson et al., 2022](https://aclanthology.org/2022.acl-long.471) where an additional context is provided for each set of target words. The task is therefore reformulated to both generate commonsensical text which include the given words, and also have the generated text adhere to the given context.
37
-
38
- ### Languages
39
-
40
- English
41
-
42
- ## Dataset Structure
43
-
44
- ### Data Instances
45
-
46
- {"Context": "The show came on the television with people singing. The family all gathered to watch. They all became silent when the show came on.", "Words": ["follow", "series", "voice"]}
47
-
48
- ### Data Fields
49
-
50
- - context: the generated text by the model should adhere to this text
51
- - words: the words that should be included in the generated continuation
52
-
53
- ### Data Splits
54
-
55
- Test
56
-
57
- ## Dataset Creation
58
-
59
- ### Curation Rationale
60
-
61
- C2Gen was created because the authors of the paper believed that the task formulation of CommonGen is too narrow, and that it needlessly incentivizes researchers
62
- to focus on methods that do not support context. Which is orthogonal to their belief that many application areas necessitates the consideration of surrounding context. Therefore, to complement CommonGen, they provide an extended test set where an additional context is provided for each set of target words.
63
-
64
- ### Initial Data Collection and Normalization
65
-
66
- The dataset was constructed with the help the crowd sourcing platform MechanicalTurk. Each remaining concept set manually received a textual context. To assure the quality of the data generation, only native English speakers with a recorded high acceptance were allowed to participate. Finally, all contexts were manually verified, and fixed in terms of typos and poor quality. Furthermore we want to raise awareness that C2GEN can contain personal data or offensive content. If you would encounter such a sample, please reach out to us.
67
-
68
- ## Licensing Information
69
-
70
- license: cc-by-sa-4.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c2gen/c2_gen-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4c098bec8f2024daf438beca9f77b0b1bc7fcc6aaa678cb6d85a3de3202def7
3
+ size 215504
data/test.json DELETED
The diff for this file is too large to render. See raw diff
 
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"c2gen": {"description": "The task of C2Gen is to both generate commonsensical text which include the given words, and also have the generated text adhere to the given context.\n", "citation": "TODO\n", "homepage": "", "license": "cc-by-sa-4.0", "features": {"context": {"dtype": "string", "id": null, "_type": "Value"}, "keywords": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "c2_gen", "config_name": "c2gen", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 367049, "num_examples": 1483, "dataset_name": "c2_gen"}}, "download_checksums": {"https://huggingface.co/datasets/Severine/C2Gen/resolve/main/data/test.json": {"num_bytes": 396766, "checksum": "9f1c6c770f8583a05f72c80c5a93427e4d30c5d72fc683b8964138c4cbad1d8b"}}, "download_size": 396766, "post_processing_size": null, "dataset_size": 367049, "size_in_bytes": 763815}}