thientran commited on
Commit
daf18e3
·
1 Parent(s): 0e5c6c0

init favs_bot.py

Browse files
Files changed (1) hide show
  1. favs_bot.py +177 -0
favs_bot.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inspired by conll2003 dataset
2
+ # https://huggingface.co/datasets/conll2003
3
+
4
+
5
+ # coding=utf-8
6
+ # Copyright 2020 HuggingFace Datasets Authors.
7
+ #
8
+ # Licensed under the Apache License, Version 2.0 (the "License");
9
+ # you may not use this file except in compliance with the License.
10
+ # You may obtain a copy of the License at
11
+ #
12
+ # http://www.apache.org/licenses/LICENSE-2.0
13
+ #
14
+ # Unless required by applicable law or agreed to in writing, software
15
+ # distributed under the License is distributed on an "AS IS" BASIS,
16
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
+ # See the License for the specific language governing permissions and
18
+ # limitations under the License.
19
+
20
+ # Lint as: python3
21
+ """Introduction to the CoNLL-2003 Shared Task: Language-Independent Named Entity Recognition"""
22
+
23
+ import os
24
+
25
+ import datasets
26
+
27
+
28
+ logger = datasets.logging.get_logger(__name__)
29
+
30
+
31
+ _CITATION = """\
32
+ @inproceedings{tjong-kim-sang-de-meulder-2003-introduction,
33
+ title = "Introduction to the {C}o{NLL}-2003 Shared Task: Language-Independent Named Entity Recognition",
34
+ author = "Tjong Kim Sang, Erik F. and
35
+ De Meulder, Fien",
36
+ booktitle = "Proceedings of the Seventh Conference on Natural Language Learning at {HLT}-{NAACL} 2003",
37
+ year = "2003",
38
+ url = "https://www.aclweb.org/anthology/W03-0419",
39
+ pages = "142--147",
40
+ }
41
+ """
42
+
43
+ _DESCRIPTION = """\
44
+ The shared task of CoNLL-2003 concerns language-independent named entity recognition. We will concentrate on
45
+ four types of named entities: persons, locations, organizations and names of miscellaneous entities that do
46
+ not belong to the previous three groups.
47
+
48
+ The CoNLL-2003 shared task data files contain four columns separated by a single space. Each word has been put on
49
+ a separate line and there is an empty line after each sentence. The first item on each line is a word, the second
50
+ a part-of-speech (POS) tag, the third a syntactic chunk tag and the fourth the named entity tag. The chunk tags
51
+ and the named entity tags have the format I-TYPE which means that the word is inside a phrase of type TYPE. Only
52
+ if two phrases of the same type immediately follow each other, the first word of the second phrase will have tag
53
+ B-TYPE to show that it starts a new phrase. A word with tag O is not part of a phrase. Note the dataset uses IOB2
54
+ tagging scheme, whereas the original dataset uses IOB1.
55
+
56
+ For more details see https://www.clips.uantwerpen.be/conll2003/ner/ and https://www.aclweb.org/anthology/W03-0419
57
+ """
58
+
59
+ _URL = "https://data.deepai.org/conll2003.zip"
60
+ _TRAINING_FILE = "train.txt"
61
+ _DEV_FILE = "valid.txt"
62
+ _TEST_FILE = "test.txt"
63
+
64
+
65
+ class Conll2003Config(datasets.BuilderConfig):
66
+ """BuilderConfig for Conll2003"""
67
+
68
+ def __init__(self, **kwargs):
69
+ """BuilderConfig forConll2003.
70
+
71
+ Args:
72
+ **kwargs: keyword arguments forwarded to super.
73
+ """
74
+ super(Conll2003Config, self).__init__(**kwargs)
75
+
76
+
77
+ class Conll2003(datasets.GeneratorBasedBuilder):
78
+ """Conll2003 dataset."""
79
+
80
+ BUILDER_CONFIGS = [
81
+ Conll2003Config(name="conll2003", version=datasets.Version(
82
+ "1.0.0"), description="Conll2003 dataset"),
83
+ ]
84
+
85
+ def _info(self):
86
+ return datasets.DatasetInfo(
87
+ description=_DESCRIPTION,
88
+ features=datasets.Features(
89
+ {
90
+ "id": datasets.Value("string"),
91
+ "tokens": datasets.Sequence(datasets.Value("string")),
92
+ "ner_tags": datasets.Sequence(
93
+ datasets.features.ClassLabel(
94
+ names=[
95
+ "O",
96
+ "B-PER",
97
+ "I-PER",
98
+ "B-ORG",
99
+ "I-ORG",
100
+ "B-LOC",
101
+ "I-LOC",
102
+ "B-MISC",
103
+ "I-MISC",
104
+ ]
105
+ )
106
+ ),
107
+ }
108
+ ),
109
+ supervised_keys=None,
110
+ homepage="https://www.aclweb.org/anthology/W03-0419/",
111
+ citation=_CITATION,
112
+ )
113
+
114
+ def _split_generators(self, dl_manager):
115
+ """Returns SplitGenerators."""
116
+ # downloaded_file = dl_manager.download_and_extract(_URL)
117
+ # data_files = {
118
+ # "train": os.path.join(downloaded_file, _TRAINING_FILE),
119
+ # "dev": os.path.join(downloaded_file, _DEV_FILE),
120
+ # "test": os.path.join(downloaded_file, _TEST_FILE),
121
+ # }
122
+
123
+ # data_files = {
124
+ # "train": os.path.join(downloaded_file, _TRAINING_FILE),
125
+ # "dev": os.path.join(downloaded_file, _DEV_FILE),
126
+ # "test": os.path.join(downloaded_file, _TEST_FILE),
127
+ # }
128
+
129
+ url = "https://pastebin.pl/view/raw/671a4c61"
130
+ text_file = dl_manager.download(url)
131
+
132
+ data_files = {
133
+ "train": text_file,
134
+ "dev": text_file,
135
+ "test": text_file,
136
+ }
137
+
138
+ return [
139
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={
140
+ "filepath": data_files["train"]}),
141
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={
142
+ "filepath": data_files["dev"]}),
143
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={
144
+ "filepath": data_files["test"]}),
145
+ ]
146
+
147
+ def _generate_examples(self, filepath):
148
+ logger.info("⏳ Generating examples from = %s", filepath)
149
+ with open(filepath, encoding="utf-8") as f:
150
+ guid = 0
151
+ tokens = []
152
+ pos_tags = []
153
+ chunk_tags = []
154
+ ner_tags = []
155
+ for line in f:
156
+ if line.startswith("-DOCSTART-") or line == "" or line == "\n":
157
+ if tokens:
158
+ yield guid, {
159
+ "id": str(guid),
160
+ "tokens": tokens,
161
+ "ner_tags": ner_tags,
162
+ }
163
+ guid += 1
164
+ tokens = []
165
+ ner_tags = []
166
+ else:
167
+ # conll2003 tokens are space separated
168
+ splits = line.split(" ")
169
+ tokens.append(splits[0])
170
+ ner_tags.append(splits[1].rstrip())
171
+ # last example
172
+ if tokens:
173
+ yield guid, {
174
+ "id": str(guid),
175
+ "tokens": tokens,
176
+ "ner_tags": ner_tags,
177
+ }