| import datasets |
| from datasets.download.download_manager import DownloadManager |
| import pyarrow.parquet as pq |
| import json |
|
|
| _DESCRIPTION = """\ |
| The Weibo NER dataset is a Chinese Named Entity Recognition dataset |
| drawn from the social media website Sina Weibo. |
| """ |
|
|
| _CITATION = """\ |
| @inproceedings{peng-dredze-2015-named, |
| title = "Named Entity Recognition for {C}hinese |
| Social Media with Jointly Trained Embeddings", |
| author = "Peng, Nanyun and Dredze, Mark", |
| booktitle = "Proceedings of the 2015 Conference on |
| Empirical Methods in Natural Language Processing", |
| month = sep, |
| year = "2015", |
| address = "Lisbon, Portugal", |
| publisher = "Association for Computational Linguistics", |
| url = "https://aclanthology.org/D15-1064", |
| doi = "10.18653/v1/D15-1064", |
| pages = "548--554", |
| } |
| """ |
|
|
| _URL = "https://huggingface.co/datasets/minskiter/weibo/resolve/main/" |
| _URLS = { |
| "train": _URL + "data/train.parquet", |
| "validation": _URL + "data/validation.parquet", |
| "test": _URL + "data/test.parquet", |
| } |
|
|
| class WeiboNamedEntities(datasets.GeneratorBasedBuilder): |
| VERSION = datasets.Version("1.0.0") |
|
|
| def _info(self): |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=datasets.Features( |
| { |
| "text": datasets.Sequence(datasets.Value("string")), |
| "labels": datasets.Sequence( |
| datasets.features.ClassLabel( |
| names=[ |
| 'O', |
| 'B-PER.NAM', |
| 'I-PER.NAM', |
| 'E-PER.NAM', |
| 'S-PER.NAM', |
| 'B-ORG.NAM', |
| 'I-ORG.NAM', |
| 'E-ORG.NAM', |
| 'S-ORG.NAM', |
| 'B-LOC.NAM', |
| 'I-LOC.NAM', |
| 'E-LOC.NAM', |
| 'S-LOC.NAM', |
| 'B-GPE.NAM', |
| 'I-GPE.NAM', |
| 'E-GPE.NAM', |
| 'S-GPE.NAM', |
| 'B-PER.NOM', |
| 'I-PER.NOM', |
| 'E-PER.NOM', |
| 'S-PER.NOM', |
| 'B-ORG.NOM', |
| 'I-ORG.NOM', |
| 'E-ORG.NOM', |
| 'S-ORG.NOM', |
| 'B-LOC.NOM', |
| 'I-LOC.NOM', |
| 'E-LOC.NOM', |
| 'S-LOC.NOM', |
| 'B-GPE.NOM', |
| 'I-GPE.NOM', |
| 'E-GPE.NOM', |
| 'S-GPE.NOM', |
| ] |
| ) |
| ), |
| } |
| ), |
| supervised_keys=None, |
| homepage="https://aclanthology.org/D15-1064/", |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager: DownloadManager): |
| urls_to_download = _URLS |
| download_files = dl_manager.download_and_extract(urls_to_download) |
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| gen_kwargs={"filepath": download_files["train"]}, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, |
| gen_kwargs={"filepath": download_files["validation"]}, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| gen_kwargs={"filepath": download_files["test"]}, |
| ), |
| ] |
|
|
| def _generate_examples(self, filepath): |
| |
| with open(filepath, "rb") as f: |
| with pq.ParquetFile(f) as file: |
| _id = -1 |
| for i in file.iter_batches(batch_size=64): |
| rows = i.to_pylist() |
| for row in rows: |
| _id+=1 |
| yield _id, row |
| |
| |
|
|
|
|
|
|
|
|
|
|
|
|
|
|