Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
christopherastone commited on
Commit
4f0b905
·
1 Parent(s): 5f5f8b7

Upload 3 files

Browse files
Files changed (3) hide show
  1. prooflang.py +177 -0
  2. proofs.zip +3 -0
  3. sentences.zip +3 -0
prooflang.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # TODO: Address all TODOs and remove all explanatory comments
15
+ """The ProofLang Corpus of arXiv Proofs"""
16
+
17
+
18
+ import csv
19
+ import os
20
+
21
+ import datasets
22
+
23
+
24
+ # TODO: Add BibTeX citation
25
+ # Find for instance the citation on arxiv or on the dataset repo/website
26
+ _CITATION = """\
27
+ @unpublished{prooflang:dataset,
28
+ title = {ProofLang: the Language of arXiv Proofs},
29
+ author = {Henry Hammer and Nanako Noda and Christopher A. Stone},
30
+ year = {2023}
31
+ }
32
+ """
33
+
34
+ # TODO: Add description of the dataset here
35
+ # You can copy an official description
36
+ _DESCRIPTION = """\ The ProofLang Corpus includes over three million
37
+ English-language proofs—558 million words—mechanically extracted from the papers
38
+ (Math, CS, Physics, etc.) posted on arXiv.org between 1992 and 2020. The focus
39
+ of this corpus is written proofs, not the explanatory text that surrounds them,
40
+ and more specifically on the language used in such proofs; mathematical
41
+ content is filtered out, resulting in sentences such as ``Let MATH be
42
+ the restriction of MATH to MATH.'' This dataset reflects how people prefer to
43
+ write informal proofs. It is also amenable to statistical analyses and to
44
+ experiments with Natural Language Processing (NLP) techniques.
45
+ """
46
+
47
+ # TODO: Add a link to an official homepage for the dataset here
48
+ _HOMEPAGE = ""
49
+
50
+ # TODO: Add the licence for the dataset here if you can find it
51
+ _LICENSE = "CC-BY 4.0"
52
+
53
+ # TODO: Add link to the official dataset URLs here
54
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
55
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
56
+ _URLS = {
57
+ "proofs": "https://huggingface.co/proofcheck/prooflang/proofs.zip",
58
+ "sentences": "https://huggingface.co/proofcheck/prooflang/sentences.zip",
59
+ # "proofs": "proofs.zip",
60
+ # "sentences": "sentences.zip",
61
+ }
62
+
63
+
64
+ # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
65
+ class ArxivProofs(datasets.GeneratorBasedBuilder):
66
+ """TODO: Short description of my dataset."""
67
+
68
+ VERSION = datasets.Version("0.5.0")
69
+
70
+ # This is an example of a dataset with multiple configurations.
71
+ # If you don't want/need to define several sub-sets in your dataset,
72
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
73
+
74
+ # If you need to make complex sub-parts in the datasets with configurable options
75
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
76
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
77
+
78
+ # You will be able to load one or the other configurations in the following list with
79
+ # data = datasets.load_dataset('my_dataset', 'proofs')
80
+ # data = datasets.load_dataset('my_dataset', 'sentences')
81
+ BUILDER_CONFIGS = [
82
+ datasets.BuilderConfig(name="proofs", version=VERSION, description="One proof per line"),
83
+ datasets.BuilderConfig(name="sentences", version=VERSION, description="One sentence per line"),
84
+ ]
85
+
86
+ DEFAULT_CONFIG_NAME = "proofs" # It's not mandatory to have a default configuration. Just use one if it make sense.
87
+
88
+ def _info(self):
89
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
90
+ if self.config.name == "proofs": # This is the name of the configuration selected in BUILDER_CONFIGS above
91
+ features = datasets.Features(
92
+ {
93
+ "fileID": datasets.Value("string"),
94
+ "proof": datasets.Value("string"),
95
+ # These are the features of your dataset like images, labels ...
96
+ }
97
+ )
98
+ else: # This is an example to show how to have different features for "proofs" and "sentences"
99
+ features = datasets.Features(
100
+ {
101
+ "fileID": datasets.Value("string"),
102
+ "sentence": datasets.Value("string"),
103
+ # These are the features of your dataset like images, labels ...
104
+ }
105
+ )
106
+ return datasets.DatasetInfo(
107
+ # This is the description that will appear on the datasets page.
108
+ description=_DESCRIPTION,
109
+ # This defines the different columns of the dataset and their types
110
+ features=features, # Here we define them above because they are different between the two configurations
111
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
112
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
113
+ # supervised_keys=("sentence", "label"),
114
+ # Homepage of the dataset for documentation
115
+ homepage=_HOMEPAGE,
116
+ # License for the dataset if available
117
+ license=_LICENSE,
118
+ # Citation for the dataset
119
+ citation=_CITATION,
120
+ )
121
+
122
+ def _split_generators(self, dl_manager):
123
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
124
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
125
+
126
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
127
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
128
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
129
+ urls = _URLS[self.config.name]
130
+ data_dir = dl_manager.download_and_extract(urls)
131
+ # data_file = dl_manager.download_and_extract(urls)
132
+ return [
133
+ datasets.SplitGenerator(
134
+ name=datasets.Split.TRAIN,
135
+ # These kwargs will be passed to _generate_examples
136
+ gen_kwargs={
137
+ "filepath": data_dir, # os.path.join(data_dir, "train.jsonl"), # data_file,
138
+ "split": "train",
139
+ },
140
+ ),
141
+ # datasets.SplitGenerator(
142
+ # name=datasets.Split.TEST,
143
+ # # These kwargs will be passed to _generate_examples
144
+ # gen_kwargs={
145
+ # "filepath": os.path.join(data_dir, "test.jsonl"),
146
+ # "split": "test"
147
+ # },
148
+ # ),
149
+ # datasets.SplitGenerator(
150
+ # name=datasets.Split.VALIDATION,
151
+ # # These kwargs will be passed to _generate_examples
152
+ # gen_kwargs={
153
+ # "filepath": os.path.join(data_dir, "dev.jsonl"),
154
+ # "split": "dev",
155
+ # },
156
+ # ),
157
+ ]
158
+
159
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
160
+ def _generate_examples(self, filepath, split):
161
+ # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
162
+ # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
163
+ with open(os.path.join(filepath, self.config.name + ".tsv"), encoding="utf-8") as f:
164
+ reader = csv.reader(f, delimiter='\t')
165
+ for key, data in enumerate(reader):
166
+ if self.config.name == "proofs":
167
+ # Yields examples as (key, example) tuples
168
+ # print(key, repr(data))
169
+ yield key, {
170
+ "fileID" : data[0],
171
+ "proof": data[1],
172
+ }
173
+ else:
174
+ yield key, {
175
+ "fileID" : data[0],
176
+ "sentence": data[1],
177
+ }
proofs.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24e9990fe719b420be5e926e384f1d6d445208c5409e09ddcd394823b2c5e042
3
+ size 720642714
sentences.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1237d489c67ab3296f5509ea2b25a45a12918a1188ce2708dfe0c6cb63e9aaf
3
+ size 753492504