Omar2027 commited on
Commit
c1f85fe
·
1 Parent(s): ff376d8

Delete caner_replicate.py

Browse files
Files changed (1) hide show
  1. caner_replicate.py +0 -119
caner_replicate.py DELETED
@@ -1,119 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """caner_replicate.ipynb
3
-
4
- Automatically generated by Colaboratory.
5
-
6
- Original file is located at
7
- https://colab.research.google.com/drive/1QgToSf5_B5l43oRCKwI2vGEB0YcRUtHe
8
- """
9
-
10
- import csv
11
- import os
12
-
13
- import datasets
14
-
15
-
16
- _CITATION = """\
17
- @article{article,
18
- author = {Salah, Ramzi and Zakaria, Lailatul},
19
- year = {2018},
20
- month = {12},
21
- pages = {},
22
- title = {BUILDING THE CLASSICAL ARABIC NAMED ENTITY RECOGNITION CORPUS (CANERCORPUS)},
23
- volume = {96},
24
- journal = {Journal of Theoretical and Applied Information Technology}
25
- }
26
- """
27
-
28
- _DESCRIPTION = """\
29
- Classical Arabic Named Entity Recognition corpus as a new corpus of tagged data that can be useful for handling the issues in recognition of Arabic named entities.
30
- """
31
-
32
- _HOMEPAGE = "https://github.com/RamziSalah/Classical-Arabic-Named-Entity-Recognition-Corpus"
33
- _HOMEPAGE = "https://github.com/omarmohamed2011/caner_data_copy"
34
- # TODO: Add the licence for the dataset here if you can find it
35
- _LICENSE = ""
36
-
37
- #_URL = "https://github.com/RamziSalah/Classical-Arabic-Named-Entity-Recognition-Corpus/archive/master.zip"
38
- _URL = 'https://github.com/omarmohamed2011/caner_data_copy/blob/main/df_caner.zip'
39
-
40
- class Caner(datasets.GeneratorBasedBuilder):
41
- """Classical Arabic Named Entity Recognition corpus as a new corpus of tagged data that can be useful for handling the issues in recognition of Arabic named entities"""
42
-
43
- VERSION = datasets.Version("1.1.0")
44
-
45
- def _info(self):
46
-
47
- features = datasets.Features(
48
- {
49
- "token": datasets.Value("string"),
50
- "ner_tag": datasets.ClassLabel(
51
- names=[
52
- "Allah",
53
- "Book",
54
- "Clan",
55
- "Crime",
56
- "Date",
57
- "Day",
58
- "Hell",
59
- "Loc",
60
- "Meas",
61
- "Mon",
62
- "Month",
63
- "NatOb",
64
- "Number",
65
- "O",
66
- "Org",
67
- "Para",
68
- "Pers",
69
- "Prophet",
70
- "Rlig",
71
- "Sect",
72
- "Time",
73
- ]
74
- ),
75
- }
76
- )
77
-
78
- return datasets.DatasetInfo(
79
- description=_DESCRIPTION,
80
- features=features,
81
- supervised_keys=None,
82
- homepage=_HOMEPAGE,
83
- license=_LICENSE,
84
- citation=_CITATION,
85
- )
86
-
87
- def _split_generators(self, dl_manager):
88
- """Returns SplitGenerators."""
89
-
90
- my_urls = _URL
91
- data_dir = dl_manager.download_and_extract(my_urls)
92
-
93
- data_dir = '/content/df_caner.csv'
94
- return [
95
- datasets.SplitGenerator(
96
- name=datasets.Split.TRAIN,
97
- # These kwargs will be passed to _generate_examples
98
-
99
- gen_kwargs={
100
- "filepath": data_dir,
101
- #os.path.join( data_dir, "/df_caner.csv"),
102
- "split": "train",
103
- },
104
- )
105
- ]
106
-
107
- def _generate_examples(self, filepath, split):
108
- """Yields examples."""
109
-
110
- with open(filepath, encoding="utf-8") as csv_file:
111
- reader = csv.reader(csv_file, delimiter=",")
112
- next(reader, None)
113
-
114
- for id_, row in enumerate(reader):
115
-
116
- yield id_, {
117
- "token": row[0],
118
- "ner_tag": row[1],
119
- }