Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
christopherastone commited on
Commit
de14215
·
1 Parent(s): 99eba9d

Upload 3 files

Browse files
Files changed (3) hide show
  1. prooflang.py +19 -14
  2. proofs.zip +2 -2
  3. sentences.zip +2 -2
prooflang.py CHANGED
@@ -54,6 +54,10 @@ _LICENSE = "CC-BY 4.0"
54
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
55
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
56
  _URLS = {
 
 
 
 
57
  "proofs": "proofs.zip",
58
  "sentences": "sentences.zip",
59
  }
@@ -63,7 +67,7 @@ _URLS = {
63
  class ArxivProofs(datasets.GeneratorBasedBuilder):
64
  """TODO: Short description of my dataset."""
65
 
66
- VERSION = datasets.Version("0.5.0")
67
 
68
  # This is an example of a dataset with multiple configurations.
69
  # If you don't want/need to define several sub-sets in your dataset,
@@ -159,17 +163,18 @@ class ArxivProofs(datasets.GeneratorBasedBuilder):
159
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
160
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
161
  with open(os.path.join(filepath, self.config.name + ".tsv"), encoding="utf-8") as f:
162
- reader = csv.reader(f, delimiter='\t')
163
  for key, data in enumerate(reader):
164
- if self.config.name == "proofs":
165
- # Yields examples as (key, example) tuples
166
- # print(key, repr(data))
167
- yield key, {
168
- "fileID" : data[0],
169
- "proof": data[1],
170
- }
171
- else:
172
- yield key, {
173
- "fileID" : data[0],
174
- "sentence": data[1],
175
- }
 
 
54
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
55
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
56
  _URLS = {
57
+ # "proofs": "https://huggingface.co/great-new-dataset-proofs.zip",
58
+ # "sentences": "https://huggingface.co/great-new-dataset-sentences.zip",
59
+ # "proofs": "cleanproofs19.tsv",
60
+ # "sentences": "sent19.tsv"
61
  "proofs": "proofs.zip",
62
  "sentences": "sentences.zip",
63
  }
 
67
  class ArxivProofs(datasets.GeneratorBasedBuilder):
68
  """TODO: Short description of my dataset."""
69
 
70
+ VERSION = datasets.Version("0.5.2")
71
 
72
  # This is an example of a dataset with multiple configurations.
73
  # If you don't want/need to define several sub-sets in your dataset,
 
163
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
164
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
165
  with open(os.path.join(filepath, self.config.name + ".tsv"), encoding="utf-8") as f:
166
+ reader = csv.DictReader(f, delimiter='\t')
167
  for key, data in enumerate(reader):
168
+ yield key, data
169
+ # if self.config.name == "proofs":
170
+ # # Yields examples as (key, example) tuples
171
+ # # print(key, repr(data))
172
+ # yield key, {
173
+ # "fileID" : data[0],
174
+ # "proof": data[1],
175
+ # }
176
+ # else:
177
+ # yield key, {
178
+ # "fileID" : data[0],
179
+ # "sentence": data[1],
180
+ # }
proofs.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:24e9990fe719b420be5e926e384f1d6d445208c5409e09ddcd394823b2c5e042
3
- size 720642714
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3cecb6a1fe54e2d80baafa727629786d64f7d212c08e80e2249753e81a90ccc
3
+ size 723931097
sentences.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f1237d489c67ab3296f5509ea2b25a45a12918a1188ce2708dfe0c6cb63e9aaf
3
- size 753492504
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25bcbb4637bbe587be43dedf53d124e69e55e5b70aa8c47f63d4226082d36c95
3
+ size 756959311