xkas2001 commited on
Commit
4cd878c
·
verified ·
1 Parent(s): f89d960

Upload dataset_card.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. dataset_card.py +121 -0
dataset_card.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+
3
+ _DESCRIPTION = """
4
+ Uzbek Language Dataset Collection - Bu o'zbek tili uchun eng keng ko'lamli va keng qamrovli dataset to'plami hisoblanadi.
5
+ Dataset turli manbalardan to'plangan va NLP modellari, til modellari va boshqa AI ilovalar uchun mo'ljallangan.
6
+
7
+ Dataset 4ta asosiy qismdan iborat:
8
+ - Community OSCAR Uzbek (1.1GB): OSCAR Common Crawl datasidan o'zbek tilidagi matnlar
9
+ - Custom Uzbek (2.1GB): Maxsus to'plangan va qayta ishlangan o'zbek matnlari
10
+ - OSCAR Uzbek (38MB): OSCAR 2301 rasmiy uzbek dataset'idan so'z ro'yxatlari
11
+ - Merge (122MB): Birlashtirilgan lug'atlar va frequency lists
12
+
13
+ Jami hajmi: ~3.4GB
14
+ Jami satr soni: 4.7+ million lines
15
+ """
16
+
17
+ _HOMEPAGE = "https://huggingface.co/datasets/xkas2001/uzbek-language-dataset"
18
+
19
+ _LICENSE = "apache-2.0"
20
+
21
+ _URLS = {
22
+ "oscar_community": "community-oscar-uzbek/all_metadata_text.txt",
23
+ "custom_uzbek": "custom-uzbek/parsed_with_imlo.txt",
24
+ "custom_uzbek_no_emoji": "custom-uzbek/parsed_with_imlo_without_emoji.txt",
25
+ "top5_quality": "community-oscar-uzbek/top5_metadata_text.txt",
26
+ "frequency_list": "merge/frequency_list.txt",
27
+ "unique_words": "merge/unique_words.txt"
28
+ }
29
+
30
+ class UzbekLanguageDataset(datasets.GeneratorBasedBuilder):
31
+ """Uzbek Language Dataset Collection."""
32
+
33
+ VERSION = datasets.Version("1.0.0")
34
+
35
+ BUILDER_CONFIGS = [
36
+ datasets.BuilderConfig(
37
+ name="oscar_community",
38
+ version=VERSION,
39
+ description="OSCAR Community data from Common Crawl (2014-2023)"
40
+ ),
41
+ datasets.BuilderConfig(
42
+ name="custom_uzbek",
43
+ version=VERSION,
44
+ description="Custom curated Uzbek texts with spelling correction"
45
+ ),
46
+ datasets.BuilderConfig(
47
+ name="top5_quality",
48
+ version=VERSION,
49
+ description="Top 5 quality texts from OSCAR Community"
50
+ ),
51
+ datasets.BuilderConfig(
52
+ name="word_lists",
53
+ version=VERSION,
54
+ description="Combined word frequency and unique word lists"
55
+ ),
56
+ ]
57
+
58
+ DEFAULT_CONFIG_NAME = "oscar_community"
59
+
60
+ def _info(self):
61
+ if self.config.name == "word_lists":
62
+ features = datasets.Features({
63
+ "word": datasets.Value("string"),
64
+ "frequency": datasets.Value("int32")
65
+ })
66
+ else:
67
+ features = datasets.Features({
68
+ "text": datasets.Value("string")
69
+ })
70
+
71
+ return datasets.DatasetInfo(
72
+ description=_DESCRIPTION,
73
+ features=features,
74
+ homepage=_HOMEPAGE,
75
+ license=_LICENSE,
76
+ )
77
+
78
+ def _split_generators(self, dl_manager):
79
+ if self.config.name == "word_lists":
80
+ frequency_file = dl_manager.download(_URLS["frequency_list"])
81
+ return [
82
+ datasets.SplitGenerator(
83
+ name=datasets.Split.TRAIN,
84
+ gen_kwargs={"filepath": frequency_file, "split": "frequency"}
85
+ )
86
+ ]
87
+ else:
88
+ url_key = self.config.name
89
+ if url_key not in _URLS:
90
+ url_key = "oscar_community"
91
+
92
+ text_file = dl_manager.download(_URLS[url_key])
93
+ return [
94
+ datasets.SplitGenerator(
95
+ name=datasets.Split.TRAIN,
96
+ gen_kwargs={"filepath": text_file, "split": "train"}
97
+ )
98
+ ]
99
+
100
+ def _generate_examples(self, filepath, split):
101
+ if split == "frequency":
102
+ with open(filepath, encoding="utf-8") as f:
103
+ for idx, line in enumerate(f):
104
+ line = line.strip()
105
+ if '\t' in line:
106
+ parts = line.split('\t', 1)
107
+ if len(parts) == 2:
108
+ word, freq = parts
109
+ try:
110
+ yield idx, {
111
+ "word": word,
112
+ "frequency": int(freq)
113
+ }
114
+ except ValueError:
115
+ continue
116
+ else:
117
+ with open(filepath, encoding="utf-8") as f:
118
+ for idx, line in enumerate(f):
119
+ line = line.strip()
120
+ if line:
121
+ yield idx, {"text": line}