Datasets:
Tasks:
Token Classification
Modalities:
Text
Formats:
parquet
Sub-tasks:
named-entity-recognition
Languages:
Arabic
Size:
100K - 1M
License:
Convert dataset to Parquet
#3
by
albertvillanova
HF Staff
- opened
- README.md +8 -3
- caner.py +0 -122
- data/train-00000-of-00001.parquet +3 -0
- dataset_infos.json +0 -1
README.md
CHANGED
|
@@ -49,10 +49,15 @@ dataset_info:
|
|
| 49 |
'20': Time
|
| 50 |
splits:
|
| 51 |
- name: train
|
| 52 |
-
num_bytes:
|
| 53 |
num_examples: 258240
|
| 54 |
-
download_size:
|
| 55 |
-
dataset_size:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
---
|
| 57 |
|
| 58 |
# Dataset Card for CANER
|
|
|
|
| 49 |
'20': Time
|
| 50 |
splits:
|
| 51 |
- name: train
|
| 52 |
+
num_bytes: 5095617
|
| 53 |
num_examples: 258240
|
| 54 |
+
download_size: 1459014
|
| 55 |
+
dataset_size: 5095617
|
| 56 |
+
configs:
|
| 57 |
+
- config_name: default
|
| 58 |
+
data_files:
|
| 59 |
+
- split: train
|
| 60 |
+
path: data/train-*
|
| 61 |
---
|
| 62 |
|
| 63 |
# Dataset Card for CANER
|
caner.py
DELETED
|
@@ -1,122 +0,0 @@
|
|
| 1 |
-
# coding=utf-8
|
| 2 |
-
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
| 3 |
-
#
|
| 4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
-
# you may not use this file except in compliance with the License.
|
| 6 |
-
# You may obtain a copy of the License at
|
| 7 |
-
#
|
| 8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
-
#
|
| 10 |
-
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
-
# See the License for the specific language governing permissions and
|
| 14 |
-
# limitations under the License.
|
| 15 |
-
"""A new corpus of tagged data that can be useful for handling the issues in recognition of Classical Arabic named entities"""
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
import csv
|
| 19 |
-
import os
|
| 20 |
-
|
| 21 |
-
import datasets
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
_CITATION = """\
|
| 25 |
-
@article{article,
|
| 26 |
-
author = {Salah, Ramzi and Zakaria, Lailatul},
|
| 27 |
-
year = {2018},
|
| 28 |
-
month = {12},
|
| 29 |
-
pages = {},
|
| 30 |
-
title = {BUILDING THE CLASSICAL ARABIC NAMED ENTITY RECOGNITION CORPUS (CANERCORPUS)},
|
| 31 |
-
volume = {96},
|
| 32 |
-
journal = {Journal of Theoretical and Applied Information Technology}
|
| 33 |
-
}
|
| 34 |
-
"""
|
| 35 |
-
|
| 36 |
-
_DESCRIPTION = """\
|
| 37 |
-
Classical Arabic Named Entity Recognition corpus as a new corpus of tagged data that can be useful for handling the issues in recognition of Arabic named entities.
|
| 38 |
-
"""
|
| 39 |
-
|
| 40 |
-
_HOMEPAGE = "https://github.com/RamziSalah/Classical-Arabic-Named-Entity-Recognition-Corpus"
|
| 41 |
-
|
| 42 |
-
# TODO: Add the licence for the dataset here if you can find it
|
| 43 |
-
_LICENSE = ""
|
| 44 |
-
|
| 45 |
-
_URL = "https://raw.githubusercontent.com/RamziSalah/Classical-Arabic-Named-Entity-Recognition-Corpus/master/CANERCorpus.csv"
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
class Caner(datasets.GeneratorBasedBuilder):
|
| 49 |
-
"""Classical Arabic Named Entity Recognition corpus as a new corpus of tagged data that can be useful for handling the issues in recognition of Arabic named entities"""
|
| 50 |
-
|
| 51 |
-
VERSION = datasets.Version("1.1.0")
|
| 52 |
-
|
| 53 |
-
def _info(self):
|
| 54 |
-
|
| 55 |
-
features = datasets.Features(
|
| 56 |
-
{
|
| 57 |
-
"token": datasets.Value("string"),
|
| 58 |
-
"ner_tag": datasets.ClassLabel(
|
| 59 |
-
names=[
|
| 60 |
-
"Allah",
|
| 61 |
-
"Book",
|
| 62 |
-
"Clan",
|
| 63 |
-
"Crime",
|
| 64 |
-
"Date",
|
| 65 |
-
"Day",
|
| 66 |
-
"Hell",
|
| 67 |
-
"Loc",
|
| 68 |
-
"Meas",
|
| 69 |
-
"Mon",
|
| 70 |
-
"Month",
|
| 71 |
-
"NatOb",
|
| 72 |
-
"Number",
|
| 73 |
-
"O",
|
| 74 |
-
"Org",
|
| 75 |
-
"Para",
|
| 76 |
-
"Pers",
|
| 77 |
-
"Prophet",
|
| 78 |
-
"Rlig",
|
| 79 |
-
"Sect",
|
| 80 |
-
"Time",
|
| 81 |
-
]
|
| 82 |
-
),
|
| 83 |
-
}
|
| 84 |
-
)
|
| 85 |
-
|
| 86 |
-
return datasets.DatasetInfo(
|
| 87 |
-
description=_DESCRIPTION,
|
| 88 |
-
features=features,
|
| 89 |
-
supervised_keys=None,
|
| 90 |
-
homepage=_HOMEPAGE,
|
| 91 |
-
license=_LICENSE,
|
| 92 |
-
citation=_CITATION,
|
| 93 |
-
)
|
| 94 |
-
|
| 95 |
-
def _split_generators(self, dl_manager):
|
| 96 |
-
"""Returns SplitGenerators."""
|
| 97 |
-
|
| 98 |
-
data_path = dl_manager.download(_URL)
|
| 99 |
-
|
| 100 |
-
return [
|
| 101 |
-
datasets.SplitGenerator(
|
| 102 |
-
name=datasets.Split.TRAIN,
|
| 103 |
-
# These kwargs will be passed to _generate_examples
|
| 104 |
-
gen_kwargs={
|
| 105 |
-
"filepath": data_path,
|
| 106 |
-
},
|
| 107 |
-
)
|
| 108 |
-
]
|
| 109 |
-
|
| 110 |
-
def _generate_examples(self, filepath):
|
| 111 |
-
"""Yields examples."""
|
| 112 |
-
|
| 113 |
-
with open(filepath, encoding="utf-8") as csv_file:
|
| 114 |
-
reader = csv.reader(csv_file, delimiter=",")
|
| 115 |
-
next(reader, None)
|
| 116 |
-
|
| 117 |
-
for id_, row in enumerate(reader):
|
| 118 |
-
|
| 119 |
-
yield id_, {
|
| 120 |
-
"token": row[0],
|
| 121 |
-
"ner_tag": row[1],
|
| 122 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data/train-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:68d710a709cefb398a3b44ac3d0629975fed8ba489747448c499c65baffc3de2
|
| 3 |
+
size 1459014
|
dataset_infos.json
DELETED
|
@@ -1 +0,0 @@
|
|
| 1 |
-
{"default": {"description": "Classical Arabic Named Entity Recognition corpus as a new corpus of tagged data that can be useful for handling the issues in recognition of Arabic named entities.\n", "citation": "@article{article,\nauthor = {Salah, Ramzi and Zakaria, Lailatul},\nyear = {2018},\nmonth = {12},\npages = {},\ntitle = {BUILDING THE CLASSICAL ARABIC NAMED ENTITY RECOGNITION CORPUS (CANERCORPUS)},\nvolume = {96},\njournal = {Journal of Theoretical and Applied Information Technology}\n}\n", "homepage": "https://github.com/RamziSalah/Classical-Arabic-Named-Entity-Recognition-Corpus", "license": "", "features": {"token": {"dtype": "string", "id": null, "_type": "Value"}, "ner_tag": {"num_classes": 21, "names": ["Allah", "Book", "Clan", "Crime", "Date", "Day", "Hell", "Loc", "Meas", "Mon", "Month", "NatOb", "Number", "O", "Org", "Para", "Pers", "Prophet", "Rlig", "Sect", "Time"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "caner", "config_name": "default", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 5095721, "num_examples": 258240, "dataset_name": "caner"}}, "download_checksums": {"https://github.com/RamziSalah/Classical-Arabic-Named-Entity-Recognition-Corpus/archive/master.zip": {"num_bytes": 17063406, "checksum": "b4f6bbcc1074dfb9a6cf53fbbd5825a8eafbff842cd89ed20ab33f5b3ef6cddb"}}, "download_size": 17063406, "post_processing_size": null, "dataset_size": 5095721, "size_in_bytes": 22159127}}
|
|
|
|
|
|