Datasets:
Tasks:
Token Classification
Modalities:
Text
Sub-tasks:
named-entity-recognition
Languages:
Polish
Size:
10K - 100K
License:
First version of dataset scripts. Datafiles and tagset list.
Browse files- README.md +37 -0
- data/n82_tagset.txt +82 -0
- data/test.iob +0 -0
- data/train.iob +0 -0
- data/valid.iob +0 -0
- kpwr.py +81 -0
README.md
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
annotations_creators:
|
| 3 |
+
- expert-generated
|
| 4 |
+
language_creators:
|
| 5 |
+
- found
|
| 6 |
+
language:
|
| 7 |
+
- pl
|
| 8 |
+
license:
|
| 9 |
+
- cc-by-3.0
|
| 10 |
+
multilinguality:
|
| 11 |
+
- monolingual
|
| 12 |
+
pretty_name: 'KPWr 1.27'
|
| 13 |
+
size_categories:
|
| 14 |
+
- 18K
|
| 15 |
+
- 10K<n<100K
|
| 16 |
+
source_datasets:
|
| 17 |
+
- original
|
| 18 |
+
task_categories:
|
| 19 |
+
- token-classification
|
| 20 |
+
task_ids:
|
| 21 |
+
- named-entity-recognition
|
| 22 |
+
---
|
| 23 |
+
|
| 24 |
+
# KPWr
|
| 25 |
+
|
| 26 |
+
## Description
|
| 27 |
+
|
| 28 |
+
KPWr dataset is a HF dataset implementation of the Polish Corpus of Wrocław University of Technology (*Korpus Języka Polskiego Politechniki Wrocławskiej*). Its objective is named entity recognition for fine-grained categories of entities. It is the ‘n82’ version of the KPWr, which means that number of classes is restricted to 82 (originally 120). During corpus creation, texts were annotated by humans from various sources, covering many domains and genres.
|
| 29 |
+
|
| 30 |
+
## Tasks (input, output and metrics)
|
| 31 |
+
Named entity recognition (NER) - tagging entities in text with their corresponding type.
|
| 32 |
+
|
| 33 |
+
**Input** ('*tokens'* column): sequence of tokens
|
| 34 |
+
|
| 35 |
+
**Output** ('*ner'* column): sequence of predicted tokens’ classes in BIO notation (82 possible classes, described in detail in the annotation guidelines)
|
| 36 |
+
|
| 37 |
+
**Measurements**: F1-score (seqeval)
|
data/n82_tagset.txt
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nam_adj
|
| 2 |
+
nam_adj_city
|
| 3 |
+
nam_adj_country
|
| 4 |
+
nam_adj_person
|
| 5 |
+
nam_eve
|
| 6 |
+
nam_eve_human
|
| 7 |
+
nam_eve_human_cultural
|
| 8 |
+
nam_eve_human_holiday
|
| 9 |
+
nam_eve_human_sport
|
| 10 |
+
nam_fac_bridge
|
| 11 |
+
nam_fac_goe
|
| 12 |
+
nam_fac_goe_stop
|
| 13 |
+
nam_fac_park
|
| 14 |
+
nam_fac_road
|
| 15 |
+
nam_fac_square
|
| 16 |
+
nam_fac_system
|
| 17 |
+
nam_liv_animal
|
| 18 |
+
nam_liv_character
|
| 19 |
+
nam_liv_god
|
| 20 |
+
nam_liv_habitant
|
| 21 |
+
nam_liv_person
|
| 22 |
+
nam_loc
|
| 23 |
+
nam_loc_astronomical
|
| 24 |
+
nam_loc_country_region
|
| 25 |
+
nam_loc_gpe_admin1
|
| 26 |
+
nam_loc_gpe_admin2
|
| 27 |
+
nam_loc_gpe_admin3
|
| 28 |
+
nam_loc_gpe_city
|
| 29 |
+
nam_loc_gpe_conurbation
|
| 30 |
+
nam_loc_gpe_country
|
| 31 |
+
nam_loc_gpe_district
|
| 32 |
+
nam_loc_gpe_subdivision
|
| 33 |
+
nam_loc_historical_region
|
| 34 |
+
nam_loc_hydronym
|
| 35 |
+
nam_loc_hydronym_lake
|
| 36 |
+
nam_loc_hydronym_ocean
|
| 37 |
+
nam_loc_hydronym_river
|
| 38 |
+
nam_loc_hydronym_sea
|
| 39 |
+
nam_loc_land
|
| 40 |
+
nam_loc_land_continent
|
| 41 |
+
nam_loc_land_island
|
| 42 |
+
nam_loc_land_mountain
|
| 43 |
+
nam_loc_land_peak
|
| 44 |
+
nam_loc_land_region
|
| 45 |
+
nam_num_house
|
| 46 |
+
nam_num_phone
|
| 47 |
+
nam_org_company
|
| 48 |
+
nam_org_group
|
| 49 |
+
nam_org_group_band
|
| 50 |
+
nam_org_group_team
|
| 51 |
+
nam_org_institution
|
| 52 |
+
nam_org_nation
|
| 53 |
+
nam_org_organization
|
| 54 |
+
nam_org_organization_sub
|
| 55 |
+
nam_org_political_party
|
| 56 |
+
nam_oth
|
| 57 |
+
nam_oth_currency
|
| 58 |
+
nam_oth_data_format
|
| 59 |
+
nam_oth_license
|
| 60 |
+
nam_oth_position
|
| 61 |
+
nam_oth_tech
|
| 62 |
+
nam_oth_www
|
| 63 |
+
nam_pro
|
| 64 |
+
nam_pro_award
|
| 65 |
+
nam_pro_brand
|
| 66 |
+
nam_pro_media
|
| 67 |
+
nam_pro_media_periodic
|
| 68 |
+
nam_pro_media_radio
|
| 69 |
+
nam_pro_media_tv
|
| 70 |
+
nam_pro_media_web
|
| 71 |
+
nam_pro_model_car
|
| 72 |
+
nam_pro_software
|
| 73 |
+
nam_pro_software_game
|
| 74 |
+
nam_pro_title
|
| 75 |
+
nam_pro_title_album
|
| 76 |
+
nam_pro_title_article
|
| 77 |
+
nam_pro_title_book
|
| 78 |
+
nam_pro_title_document
|
| 79 |
+
nam_pro_title_song
|
| 80 |
+
nam_pro_title_treaty
|
| 81 |
+
nam_pro_title_tv
|
| 82 |
+
nam_pro_vehicle
|
data/test.iob
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/train.iob
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/valid.iob
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
kpwr.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
"""KPWR version 1.27 dataset."""
|
| 3 |
+
|
| 4 |
+
import csv
|
| 5 |
+
import datasets
|
| 6 |
+
|
| 7 |
+
_DESCRIPTION = "KPWR version 1.27 dataset."
|
| 8 |
+
|
| 9 |
+
_URLS = {
|
| 10 |
+
"train": "https://huggingface.co/datasets/clarin-knext/kpwr/resolve/main/data/train.iob",
|
| 11 |
+
"valid": "https://huggingface.co/datasets/clarin-knext/kpwr/resolve/main/data/valid.iob",
|
| 12 |
+
"test": "https://huggingface.co/datasets/clarin-knext/kpwr/resolve/main/data/test.iob",
|
| 13 |
+
}
|
| 14 |
+
|
| 15 |
+
_HOMEPAGE = "https://clarin-pl.eu/dspace/handle/11321/270"
|
| 16 |
+
|
| 17 |
+
with open('data/n82_tagset.txt', 'r') as fin:
|
| 18 |
+
_N82_TAGS = fin.read().split('\n')
|
| 19 |
+
|
| 20 |
+
_NER_IOB_TAGS = ['O']
|
| 21 |
+
|
| 22 |
+
for tag in _N82_TAGS:
|
| 23 |
+
_NER_IOB_TAGS.extend([f'B-{tag}', f'I-{tag}'])
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class KpwrDataset(datasets.GeneratorBasedBuilder):
|
| 27 |
+
|
| 28 |
+
def _info(self) -> datasets.DatasetInfo:
|
| 29 |
+
return datasets.DatasetInfo(
|
| 30 |
+
description=_DESCRIPTION,
|
| 31 |
+
features=datasets.Features(
|
| 32 |
+
{
|
| 33 |
+
"tokens": datasets.Sequence(datasets.Value('string')),
|
| 34 |
+
"lemmas": datasets.Sequence(datasets.Value('string')),
|
| 35 |
+
"mstags": datasets.Sequence(datasets.Value('string')),
|
| 36 |
+
"ner": datasets.Sequence(datasets.features.ClassLabel(names=_NER_IOB_TAGS))
|
| 37 |
+
}
|
| 38 |
+
),
|
| 39 |
+
homepage=_HOMEPAGE
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
| 43 |
+
downloaded_files = dl_manager.download_and_extract(_URLS)
|
| 44 |
+
return [
|
| 45 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepath': downloaded_files['train']}),
|
| 46 |
+
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={'filepath': downloaded_files['valid']}),
|
| 47 |
+
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={'filepath': downloaded_files['test']})
|
| 48 |
+
]
|
| 49 |
+
|
| 50 |
+
def _generate_examples(self, filepath: str):
|
| 51 |
+
with open(filepath, 'r', encoding='utf-8') as fin:
|
| 52 |
+
reader = csv.reader(fin, delimiter='\t', quoting=csv.QUOTE_NONE)
|
| 53 |
+
|
| 54 |
+
tokens = []
|
| 55 |
+
lemmas = []
|
| 56 |
+
mstags = []
|
| 57 |
+
ner = []
|
| 58 |
+
gid = 0
|
| 59 |
+
|
| 60 |
+
for line in reader:
|
| 61 |
+
if not line:
|
| 62 |
+
yield gid, {
|
| 63 |
+
"tokens": tokens,
|
| 64 |
+
"lemmas": lemmas,
|
| 65 |
+
"mstags": mstags,
|
| 66 |
+
"ner": ner
|
| 67 |
+
}
|
| 68 |
+
gid += 1
|
| 69 |
+
tokens = []
|
| 70 |
+
lemmas = []
|
| 71 |
+
mstags = []
|
| 72 |
+
ner = []
|
| 73 |
+
|
| 74 |
+
elif len(line) == 1: # ignore --DOCSTART lines
|
| 75 |
+
continue
|
| 76 |
+
|
| 77 |
+
else:
|
| 78 |
+
tokens.append(line[0])
|
| 79 |
+
lemmas.append(line[1])
|
| 80 |
+
mstags.append(line[2])
|
| 81 |
+
ner.append(line[3])
|