Commit
·
424d195
0
Parent(s):
feat(data): update data
Browse files- .gitattributes +57 -0
- .gitignore +5 -0
- README.md +60 -0
- data/test.parquet +3 -0
- data/train.parquet +3 -0
- data/validation.parquet +3 -0
- msra_dev.py +91 -0
.gitattributes
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
# Audio files - uncompressed
|
| 38 |
+
*.pcm filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
*.sam filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
*.raw filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
# Audio files - compressed
|
| 42 |
+
*.aac filter=lfs diff=lfs merge=lfs -text
|
| 43 |
+
*.flac filter=lfs diff=lfs merge=lfs -text
|
| 44 |
+
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
*.ogg filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
*.wav filter=lfs diff=lfs merge=lfs -text
|
| 47 |
+
# Image files - uncompressed
|
| 48 |
+
*.bmp filter=lfs diff=lfs merge=lfs -text
|
| 49 |
+
*.gif filter=lfs diff=lfs merge=lfs -text
|
| 50 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
| 51 |
+
*.tiff filter=lfs diff=lfs merge=lfs -text
|
| 52 |
+
# Image files - compressed
|
| 53 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 54 |
+
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 55 |
+
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 56 |
+
# BMES files - uncompressed
|
| 57 |
+
*.bmes filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
download.py
|
| 2 |
+
save.py
|
| 3 |
+
*.bmes
|
| 4 |
+
data/labels.txt
|
| 5 |
+
*.json
|
README.md
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
dataset_info:
|
| 3 |
+
features:
|
| 4 |
+
- name: text
|
| 5 |
+
sequence: string
|
| 6 |
+
- name: labels
|
| 7 |
+
sequence:
|
| 8 |
+
class_label:
|
| 9 |
+
names:
|
| 10 |
+
'0': O
|
| 11 |
+
'1': B-NS
|
| 12 |
+
'2': M-NS
|
| 13 |
+
'3': E-NS
|
| 14 |
+
'4': S-NS
|
| 15 |
+
'5': B-NT
|
| 16 |
+
'6': M-NT
|
| 17 |
+
'7': E-NT
|
| 18 |
+
'8': S-NT
|
| 19 |
+
'9': B-NR
|
| 20 |
+
'10': M-NR
|
| 21 |
+
'11': E-NR
|
| 22 |
+
'12': S-NR
|
| 23 |
+
splits:
|
| 24 |
+
- name: train
|
| 25 |
+
num_bytes: 32917977
|
| 26 |
+
num_examples: 46364
|
| 27 |
+
- name: validation
|
| 28 |
+
num_bytes: 2623860
|
| 29 |
+
num_examples: 4365
|
| 30 |
+
- name: test
|
| 31 |
+
num_bytes: 2623860
|
| 32 |
+
num_examples: 4365
|
| 33 |
+
download_size: 4762958
|
| 34 |
+
dataset_size: 38165697
|
| 35 |
+
---
|
| 36 |
+
|
| 37 |
+
### How to loading dataset?
|
| 38 |
+
```python
|
| 39 |
+
from datasets import load_dataset
|
| 40 |
+
datasets = load_dataset("minskiter/msra_dev",save_infos=True)
|
| 41 |
+
train,test = datasets['train'],datasets['test']
|
| 42 |
+
# convert label to str
|
| 43 |
+
print(train.features['labels'].feature.int2str(0))
|
| 44 |
+
```
|
| 45 |
+
|
| 46 |
+
### Force update
|
| 47 |
+
```python
|
| 48 |
+
from datasets import load_dataset
|
| 49 |
+
datasets = load_dataset("minskiter/msra_dev", download_mode="force_redownload")
|
| 50 |
+
```
|
| 51 |
+
|
| 52 |
+
### Fit your train
|
| 53 |
+
|
| 54 |
+
```python
|
| 55 |
+
def transform(example):
|
| 56 |
+
# edit example here
|
| 57 |
+
return example
|
| 58 |
+
for key in datasets:
|
| 59 |
+
datasets[key] = datasets.map(transform)
|
| 60 |
+
```
|
data/test.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1c458c4d25ed611ecfe2a5dc548b671df7c12efa48e8ff42d25bc45a86450a3c
|
| 3 |
+
size 334044
|
data/train.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:50ac447cb00d6364eae1ef716191973b738626074bf66ad5c68b49d4a50b56af
|
| 3 |
+
size 4094870
|
data/validation.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1c458c4d25ed611ecfe2a5dc548b671df7c12efa48e8ff42d25bc45a86450a3c
|
| 3 |
+
size 334044
|
msra_dev.py
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import datasets
|
| 2 |
+
from datasets.download.download_manager import DownloadManager
|
| 3 |
+
import pyarrow.parquet as pq
|
| 4 |
+
|
| 5 |
+
_DESCRIPTION = """\
|
| 6 |
+
The MSRA NER dataset is a Chinese Named Entity Recognition dataset
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
_CITATION = """\
|
| 10 |
+
@inproceedings{levow-2006-third,
|
| 11 |
+
title = "The Third International {C}hinese Language Processing Bakeoff: Word Segmentation and Named Entity Recognition",
|
| 12 |
+
author = "Levow, Gina-Anne",
|
| 13 |
+
booktitle = "Proceedings of the Fifth {SIGHAN} Workshop on {C}hinese Language Processing",
|
| 14 |
+
month = jul,
|
| 15 |
+
year = "2006",
|
| 16 |
+
address = "Sydney, Australia",
|
| 17 |
+
publisher = "Association for Computational Linguistics",
|
| 18 |
+
url = "https://aclanthology.org/W06-0115",
|
| 19 |
+
pages = "108--117",
|
| 20 |
+
}
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
_URL = "https://huggingface.co/datasets/minskiter/msra_dev/resolve/main/"
|
| 24 |
+
_URLS = {
|
| 25 |
+
"train": _URL + "data/train.parquet",
|
| 26 |
+
'validation': _URL + "data/validation.parquet",
|
| 27 |
+
"test": _URL + "data/test.parquet",
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
class MSRANamedEntities(datasets.GeneratorBasedBuilder):
|
| 31 |
+
VERSION = datasets.Version("1.0.0")
|
| 32 |
+
|
| 33 |
+
def _info(self):
|
| 34 |
+
return datasets.DatasetInfo(
|
| 35 |
+
description=_DESCRIPTION,
|
| 36 |
+
features=datasets.Features(
|
| 37 |
+
{
|
| 38 |
+
"text": datasets.Sequence(datasets.Value("string")),
|
| 39 |
+
"labels": datasets.Sequence(
|
| 40 |
+
datasets.features.ClassLabel(
|
| 41 |
+
names=[
|
| 42 |
+
'O',
|
| 43 |
+
'B-NS',
|
| 44 |
+
'M-NS',
|
| 45 |
+
'E-NS',
|
| 46 |
+
'S-NS',
|
| 47 |
+
'B-NT',
|
| 48 |
+
'M-NT',
|
| 49 |
+
'E-NT',
|
| 50 |
+
'S-NT',
|
| 51 |
+
'B-NR',
|
| 52 |
+
'M-NR',
|
| 53 |
+
'E-NR',
|
| 54 |
+
'S-NR'
|
| 55 |
+
]
|
| 56 |
+
)
|
| 57 |
+
),
|
| 58 |
+
}
|
| 59 |
+
),
|
| 60 |
+
supervised_keys=None,
|
| 61 |
+
homepage="https://aclanthology.org/W06-0115/",
|
| 62 |
+
citation=_CITATION,
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
def _split_generators(self, dl_manager: DownloadManager):
|
| 66 |
+
urls_to_download = _URLS
|
| 67 |
+
download_files = dl_manager.download_and_extract(urls_to_download)
|
| 68 |
+
return [
|
| 69 |
+
datasets.SplitGenerator(
|
| 70 |
+
name=datasets.Split.TRAIN,
|
| 71 |
+
gen_kwargs={"filepath": download_files["train"]},
|
| 72 |
+
),
|
| 73 |
+
datasets.SplitGenerator(
|
| 74 |
+
name=datasets.Split.VALIDATION,
|
| 75 |
+
gen_kwargs={"filepath": download_files["validation"]},
|
| 76 |
+
),
|
| 77 |
+
datasets.SplitGenerator(
|
| 78 |
+
name=datasets.Split.TEST,
|
| 79 |
+
gen_kwargs={"filepath": download_files["test"]},
|
| 80 |
+
),
|
| 81 |
+
]
|
| 82 |
+
|
| 83 |
+
def _generate_examples(self, filepath):
|
| 84 |
+
with open(filepath,"rb") as f:
|
| 85 |
+
with pq.ParquetFile(f) as file:
|
| 86 |
+
_id = -1
|
| 87 |
+
for i in file.iter_batches(batch_size=64):
|
| 88 |
+
rows = i.to_pylist()
|
| 89 |
+
for row in rows:
|
| 90 |
+
_id+=1
|
| 91 |
+
yield _id, row
|