MohammedHB commited on
Commit
f3aeba8
·
1 Parent(s): 97a6b8b

Upload AraPOS2.py

Browse files
Files changed (1) hide show
  1. AraPOS2.py +155 -0
AraPOS2.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 HuggingFace Datasets Authors.
3
+
4
+ import os
5
+
6
+ import datasets
7
+
8
+
9
+ logger = datasets.logging.get_logger(__name__)
10
+
11
+
12
+ _CITATION = """\
13
+ Haibaoui Mohammed 2023
14
+ }
15
+ """
16
+
17
+ _DESCRIPTION = """\
18
+ Arabic Fakenews Datasets POSTAGS
19
+ """
20
+
21
+ _URL = "https://www.dropbox.com/s/7j897h9lafb0emq/docs.zip"
22
+ _TRAINING_FILE = "train.txt"
23
+ _DEV_FILE = "valid.txt"
24
+ _TEST_FILE = "test.txt"
25
+
26
+
27
+ class AraPOSConfig(datasets.BuilderConfig):
28
+ """BuilderConfig for AraPOS"""
29
+
30
+ def __init__(self, **kwargs):
31
+ """BuilderConfig for AraPOS.
32
+ Args:
33
+ **kwargs: keyword arguments forwarded to super.
34
+ """
35
+ super(AraPOSConfig, self).__init__(**kwargs)
36
+
37
+
38
+ class Arapos(datasets.GeneratorBasedBuilder):
39
+ """AraPOS dataset."""
40
+
41
+ BUILDER_CONFIGS = [
42
+ AraPOSConfig(name="arapos", version=datasets.Version("1.0.0"), description="AraPOS dataset"),
43
+ ]
44
+
45
+ def _info(self):
46
+ return datasets.DatasetInfo(
47
+ description=_DESCRIPTION,
48
+ features=datasets.Features(
49
+ {
50
+ "id": datasets.Value("string"),
51
+ "tokens": datasets.Sequence(datasets.Value("string")),
52
+ "pos_tags": datasets.Sequence(
53
+ datasets.features.ClassLabel(
54
+ names=[
55
+ '"',
56
+ "''",
57
+ "#",
58
+ "$",
59
+ "(",
60
+ ")",
61
+ ",",
62
+ ".",
63
+ ":",
64
+ "``",
65
+ "CC",
66
+ "CD",
67
+ "DT",
68
+ "EX",
69
+ "FW",
70
+ "IN",
71
+ "JJ",
72
+ "JJR",
73
+ "JJS",
74
+ "LS",
75
+ "MD",
76
+ "NN",
77
+ "NNP",
78
+ "NNPS",
79
+ "NNS",
80
+ "NN|SYM",
81
+ "PDT",
82
+ "POS",
83
+ "PRP",
84
+ "PRP$",
85
+ "RB",
86
+ "RBR",
87
+ "RBS",
88
+ "RP",
89
+ "SYM",
90
+ "TO",
91
+ "UH",
92
+ "VB",
93
+ "VBD",
94
+ "VBG",
95
+ "VBN",
96
+ "VBP",
97
+ "VBZ",
98
+ "WDT",
99
+ "WP",
100
+ "WP$",
101
+ "WRB",
102
+ ]
103
+ )
104
+ ),
105
+ }
106
+ ),
107
+ supervised_keys=None,
108
+ citation=_CITATION,
109
+ )
110
+
111
+ def _split_generators(self, dl_manager):
112
+ """Returns SplitGenerators."""
113
+ downloaded_file = dl_manager.download_and_extract(_URL)
114
+ data_files = {
115
+ "train": os.path.join(downloaded_file, _TRAINING_FILE),
116
+ "dev": os.path.join(downloaded_file, _DEV_FILE),
117
+ "test": os.path.join(downloaded_file, _TEST_FILE),
118
+ }
119
+
120
+ return [
121
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["dev"]}),
122
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
123
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
124
+ ]
125
+
126
+ def _generate_examples(self, filepath):
127
+ logger.info("⏳ Generating examples from = %s", filepath)
128
+ with open(filepath, encoding="utf-8") as f:
129
+ guid = 0
130
+ tokens = []
131
+ pos_tags = []
132
+ for line in f:
133
+ if line.startswith("-DOCSTART-") or line == "" or line == "\n":
134
+ if tokens:
135
+ yield guid, {
136
+ "id": str(guid),
137
+ "tokens": tokens,
138
+ "pos_tags": pos_tags,
139
+ }
140
+ guid += 1
141
+ tokens = []
142
+ pos_tags = []
143
+ else:
144
+ # conll2003 tokens are space separated
145
+ splits = line.split(" ")
146
+ tokens.append(splits[0])
147
+ pos_tags.append(splits[1])
148
+ # last example
149
+ if tokens:
150
+ yield guid, {
151
+ "id": str(guid),
152
+ "tokens": tokens,
153
+ "pos_tags": pos_tags,
154
+ }
155
+