parquet-converter commited on
Commit
573304d
·
1 Parent(s): 3856402

Update parquet files

Browse files
.DS_Store DELETED
Binary file (6.15 kB)
 
.gitattributes DELETED
@@ -1,27 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bin.* filter=lfs diff=lfs merge=lfs -text
5
- *.bz2 filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.onnx filter=lfs diff=lfs merge=lfs -text
14
- *.ot filter=lfs diff=lfs merge=lfs -text
15
- *.parquet filter=lfs diff=lfs merge=lfs -text
16
- *.pb filter=lfs diff=lfs merge=lfs -text
17
- *.pt filter=lfs diff=lfs merge=lfs -text
18
- *.pth filter=lfs diff=lfs merge=lfs -text
19
- *.rar filter=lfs diff=lfs merge=lfs -text
20
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
- *.tar.* filter=lfs diff=lfs merge=lfs -text
22
- *.tflite filter=lfs diff=lfs merge=lfs -text
23
- *.tgz filter=lfs diff=lfs merge=lfs -text
24
- *.xz filter=lfs diff=lfs merge=lfs -text
25
- *.zip filter=lfs diff=lfs merge=lfs -text
26
- *.zstandard filter=lfs diff=lfs merge=lfs -text
27
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Arguement_Mining_CL2017.py DELETED
@@ -1,135 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """
18
- Arguement Mining Dataset created by Stab , Gurevych et. al. CL 2017
19
- """
20
-
21
- import datasets
22
- import os
23
-
24
-
25
- _CITATION = """\
26
- @article{stab2017parsing,
27
- title={Parsing argumentation structures in persuasive essays},
28
- author={Stab, Christian and Gurevych, Iryna},
29
- journal={Computational Linguistics},
30
- volume={43},
31
- number={3},
32
- pages={619--659},
33
- year={2017},
34
- publisher={MIT Press One Rogers Street, Cambridge, MA 02142-1209, USA journals-info~…}
35
- }
36
- """
37
-
38
- _DESCRIPTION = """\
39
- tokens along with chunk id. IOB1 format Begining of arguement denoted by B-ARG,inside arguement
40
- denoted by I-ARG, other chunks are O
41
- Orginial train,test split as used by the paper is provided
42
- """
43
-
44
- _URL = "https://raw.githubusercontent.com/Sam131112/Argument-Mining-Dataset/main/"
45
- _TRAINING_FILE = "train.txt"
46
- _TEST_FILE = "test.txt"
47
-
48
-
49
- class ArguementMiningCL2017Config(datasets.BuilderConfig):
50
- """BuilderConfig for CL2017"""
51
-
52
- def __init__(self, **kwargs):
53
- """BuilderConfig forCl2017.
54
- Args:
55
- **kwargs: keyword arguments forwarded to super.
56
- """
57
- super(ArguementMiningCL2017Config, self).__init__(**kwargs)
58
-
59
-
60
- class ArguementMiningCL2017(datasets.GeneratorBasedBuilder):
61
- """CL2017 dataset."""
62
-
63
- BUILDER_CONFIGS = [
64
- ArguementMiningCL2017Config(name="cl2017", version=datasets.Version("1.0.0"), description="Cl2017 dataset"),
65
- ]
66
-
67
- def _info(self):
68
- return datasets.DatasetInfo(
69
- description=_DESCRIPTION,
70
- features=datasets.Features(
71
- {
72
- "id": datasets.Value("string"),
73
- "tokens": datasets.Sequence(datasets.Value("string")),
74
- "chunk_tags":datasets.Sequence(
75
- datasets.features.ClassLabel(
76
- names=[
77
- "O",
78
- "B-ARG",
79
- "I-ARG",
80
- ]
81
- )
82
- ),
83
- }
84
- ),
85
- supervised_keys=None,
86
- homepage="https://direct.mit.edu/coli/article/43/3/619/1573/Parsing-Argumentation-Structures-in-Persuasive",
87
- citation=_CITATION,
88
- )
89
-
90
- def _split_generators(self, dl_manager):
91
- """Returns SplitGenerators."""
92
- urls_to_download = {
93
- "train": _TRAINING_FILE,
94
- "test": _TEST_FILE,
95
- }
96
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
97
-
98
- return [
99
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
100
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
101
- ]
102
-
103
- def _generate_examples(self, filepath):
104
- print("⏳ Generating examples from = %s", filepath)
105
- with open(filepath, encoding="utf-8") as f:
106
- guid = 0
107
- tokens = []
108
- pos_tags = []
109
- chunk_tags = []
110
- ner_tags = []
111
- for line in f:
112
- if line == "\n":
113
- if tokens:
114
- yield guid, {
115
- "id": str(guid),
116
- "tokens": tokens,
117
- "chunk_tags": chunk_tags,
118
- }
119
- guid = guid+1
120
- tokens = []
121
- chunk_tags = []
122
- else:
123
- # cl2017 tokens are space separated
124
- line=line.strip('\n')
125
- splits = line.split("\t")
126
- #print(splits)
127
- tokens.append(splits[0])
128
- chunk_tags.append(splits[1])
129
- #print({"id": str(guid),"tokens": tokens,"chunk_tags": chunk_tags,})
130
- # last example
131
- yield guid, {
132
- "id": str(guid),
133
- "tokens": tokens,
134
- "chunk_tags": chunk_tags,
135
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cl2017/arguement_mining_cl2017-test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0feca15cbe371dabfd8fa90737f219534ebdf477c82d76b2f9662327be332b2
3
+ size 81603
cl2017/arguement_mining_cl2017-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bbfdc3b33be69f8c85d0ce9acf335cf8749b9ef59827f86fadf3457545bf3a3
3
+ size 284451
test.txt DELETED
The diff for this file is too large to render. See raw diff
 
train.txt DELETED
The diff for this file is too large to render. See raw diff