File size: 2,006 Bytes
02d1760
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
{
  "ug_nlp_dataset": {
    "description": "Uyghur NLP Dataset - 维吾尔语自然语言处理数据集,基于清华大学开源的 THUUyMorph 形态学标注语料库构建",
    "citation": "",
    "homepage": "",
    "license": "",
    "features": {
      "id": {"dtype": "int64"},
      "text": {"dtype": "string"}
    },
    "post_processed_features": {},
    "splits": {
      "train": {"num_bytes": 26400000, "num_examples": 10595}
    },
    "download_size": 26400000,
    "dataset_size": 26400000,
    "config_name": "ug_nlp_dataset"
  },
  "morph_vocabulary": {
    "description": "维吾尔语形态学词根词典",
    "citation": "",
    "homepage": "",
    "license": "",
    "features": {
      "id": {"dtype": "int64"},
      "word": {"dtype": "string"}
    },
    "post_processed_features": {},
    "splits": {
      "train": {"num_bytes": 1200000, "num_examples": 67832}
    },
    "download_size": 1200000,
    "dataset_size": 1200000,
    "config_name": "morph_vocabulary"
  },
  "segmentation_train": {
    "description": "维吾尔语文本",
    "citation": "",
    "homepage": "",
    "license": "",
    "features": {
      "id": {"dtype": "int64"},
      "text": {"dtype": "string"}
    },
    "post_processed_features": {},
    "splits": {
      "train": {"num_bytes": 26500000, "num_examples": 10572}
    },
    "download_size": 26500000,
    "dataset_size": 26500000,
    "config_name": "segmentation_train"
  },
  "word_root_pairs": {
    "description": "维吾尔语词-词根-词缀对",
    "citation": "",
    "homepage": "",
    "license": "",
    "features": {
      "id": {"dtype": "int64"},
      "word": {"dtype": "string"},
      "root": {"dtype": "string"},
      "suffix": {"dtype": "string"},
      "has_variant": {"dtype": "bool"}
    },
    "post_processed_features": {},
    "splits": {
      "train": {"num_bytes": 1000000, "num_examples": 10391}
    },
    "download_size": 1000000,
    "dataset_size": 1000000,
    "config_name": "word_root_pairs"
  }
}