File size: 5,081 Bytes
c9364ce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b41b654
 
d5c5317
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c9364ce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b41b654
 
c9364ce
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
# -*- coding: utf-8 -*-
"""caner_replicate.ipynb

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/drive/1QgToSf5_B5l43oRCKwI2vGEB0YcRUtHe
"""

import csv
import os

import datasets


_CITATION = """\
@article{article,
author = {Salah, Ramzi and Zakaria, Lailatul},
year = {2018},
month = {12},
pages = {},
title = {BUILDING THE CLASSICAL ARABIC NAMED ENTITY RECOGNITION CORPUS (CANERCORPUS)},
volume = {96},
journal = {Journal of Theoretical and Applied Information Technology}
}
"""

_DESCRIPTION = """\
Classical Arabic Named Entity Recognition corpus as a new corpus of tagged data that can be useful for handling the issues in recognition of Arabic named entities.
"""

_HOMEPAGE = "https://github.com/RamziSalah/Classical-Arabic-Named-Entity-Recognition-Corpus"
_HOMEPAGE = "https://github.com/omarmohamed2011/caner_data_copy"
# TODO: Add the licence for the dataset here if you can find it
_LICENSE = ""

#_URL = "https://github.com/RamziSalah/Classical-Arabic-Named-Entity-Recognition-Corpus/archive/master.zip"
_URL  = 'https://github.com/omarmohamed2011/caner_data_copy/blob/main/df_caner.zip'

class Caner(datasets.GeneratorBasedBuilder):
    """Classical Arabic Named Entity Recognition corpus as a new corpus of tagged data that can be useful for handling the issues in recognition of Arabic named entities"""

    VERSION = datasets.Version("1.1.0")

    def _info(self):

        features = datasets.Features(
            {
                "token": datasets.Value("string"),
                "ner_tag": datasets.ClassLabel(
                names =['MalikIbnAnas',
                         'Sibawayh',
                         'IbnHisham',
                         'IbnSulaymanKufi',
                         'Bukhari',
                         'KhalilFarahidi',
                         'FathIbnKhaqan',
                         'IbnHamzaKisai',
                         'IbnTayfur',
                         'MuhammadBarjlani',
                         'ImamCaskari',
                         'AbdAllahIbnCabbas',
                         'IbnAyyubRazi',
                         'IbnWaddahQurtubi',
                         'HasanBasri',
                         'IbnAbiKhaythama',
                         'YahyaIbnHusayn',
                         'SufyanThawri',
                         'IbnQuraybAsmaci',
                         'IbnIsmacilKirmani',
                         'IbnCimranMawsili',
                         'Mubarrad',
                         'MuhammadShaybani',
                         'AbuZurcaDimashqi',
                         'IbnWahbQurashi',
                         'MacmarIbnMuthanna',
                         'YahyaIbnSalam',
                         'AbuHasanSacdi',
                         'IbnIbrahimBursi',
                         'IbnSirin',
                         'Baladhuri',
                         'CaliIbnAbiTalib',
                         'IbnZiyadFarra',
                         'AbuYusufYacqub',
                         'IbnHanbal',
                         'ZubayrIbnBakkar',
                         'AbuBakrBazzar',
                         'Fakihi',
                         'IbnMuzahimMinqari',
                         'AbyZurca',
                         'AkhfashAwsat',
                         'AhmadBarqi',
                         'IbnAhmadIbnHanbal',
                         'IbnCabdHakam',
                         'CabdRazzakSancani',
                         'AbuHatimSijistani',
                         'IbnSacd',
                         'IbnHammadKhuzaci',
                         'IbnCaliMarwazi',
                         'MujahidIbnJabr',
                         'Bahshal',
                         'IbnHasanSaffar']

                ),
            }
        )

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            supervised_keys=None,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""

        my_urls = _URL
        data_dir = dl_manager.download_and_extract(my_urls)
        
        data_dir = '/content/df_caner.csv'
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                # These kwargs will be passed to _generate_examples
               
                gen_kwargs={
                    "filepath": data_dir,
                    #os.path.join(   data_dir, "/df_caner.csv"),
                    "split": "train",
                },
            )
        ]

    def _generate_examples(self, filepath, split):
        """Yields examples."""

        with open(filepath, encoding="utf-8") as csv_file:
            reader = csv.reader(csv_file, delimiter=",")
            next(reader, None)

            for id_, row in enumerate(reader):

                yield id_, {
                    "token": row[0],
                    "ner_tag": row[1],
                }