meryyllebr543 commited on
Commit
7d34ac0
·
verified ·
1 Parent(s): f2395b0

Delete stack_edu_huggingface.py

Browse files
Files changed (1) hide show
  1. stack_edu_huggingface.py +0 -104
stack_edu_huggingface.py DELETED
@@ -1,104 +0,0 @@
1
- # stack_edu_huggingface.py
2
-
3
- import datasets
4
-
5
- _DESCRIPTION = """
6
- Pre-downloaded and processed version of the Stack-Edu dataset, which is a 125B token dataset of educational code filtered from The Stack v2.
7
- This dataset was curated by the Hugging Face team using a classifier-based filtering strategy to retain only the highest-quality educational programming content.
8
- This repository contains the raw text content organized by programming language in Parquet format, making it directly accessible without needing to re-download from Software Heritage.
9
- """
10
-
11
- _CITATION = """
12
- @misc{allal2025smollm2smolgoesbig,
13
- title={SmolLM2: When Smol Goes Big -- Data-Centric Training of a Small Language Model},
14
- author={...},
15
- year={2025},
16
- eprint={2502.02737},
17
- archivePrefix={arXiv},
18
- primaryClass={cs.CL}
19
- }
20
- """
21
-
22
- _HOMEPAGE = "https://huggingface.co/datasets/HuggingFaceTB/stack-edu"
23
- _LICENSE = "ODC-By v1.0, also subject to CommonCrawl's Terms of Use and Software Heritage licenses."
24
-
25
-
26
- class StackEduConfig(datasets.BuilderConfig):
27
- """BuilderConfig for Stack-Edu languages."""
28
-
29
- def __init__(self, language, **kwargs):
30
- """BuilderConfig for Stack-Edu.
31
- Args:
32
- language: string, the language to load
33
- **kwargs: keyword arguments forwarded to super.
34
- """
35
- super(StackEduConfig, self).__init__(
36
- name=language,
37
- version=datasets.Version("1.0.0"),
38
- **kwargs,
39
- )
40
- self.language = language
41
-
42
-
43
- class StackEdu(datasets.GeneratorBasedBuilder):
44
- """Processed Stack-Edu Dataset"""
45
-
46
- VERSION = datasets.Version("1.0.0")
47
-
48
- # Define a configuration for each programming language
49
- LANGUAGES = [
50
- "c", "cpp", "csharp", "go", "java", "javascript", "markdown",
51
- "php", "python", "ruby", "rust", "shell", "sql", "swift", "typescript"
52
- ]
53
-
54
- BUILDER_CONFIGS = [
55
- StackEduConfig(
56
- language=lang,
57
- description=f"Educational code data for the {lang} language."
58
- ) for lang in LANGUAGES
59
- ]
60
-
61
- def _info(self):
62
- return datasets.DatasetInfo(
63
- description=_DESCRIPTION,
64
- features=datasets.Features(
65
- {
66
- # Defina aqui as colunas do seu Parquet.
67
- # Baseado na sua print, parece ser algo assim:
68
- "language": datasets.Value("string"),
69
- "src_encoding": datasets.Value("string"),
70
- "length_bytes": datasets.Value("int64"),
71
- "score": datasets.Value("float64"),
72
- "int_score": datasets.Value("int64"),
73
- "detected_licenses": datasets.Sequence(datasets.Value("string")),
74
- "license_type": datasets.Value("string"),
75
- "text": datasets.Value("string"),
76
- }
77
- ),
78
- homepage=_HOMEPAGE,
79
- license=_LICENSE,
80
- citation=_CITATION,
81
- )
82
-
83
- def _split_generators(self, dl_manager):
84
- # O Hub baixa os arquivos para você. Aqui, especificamos o padrão de arquivo.
85
- # Ele vai encontrar o arquivo parquet correspondente à configuração de linguagem.
86
- file_path = f"{self.config.language}.parquet"
87
- return [
88
- datasets.SplitGenerator(
89
- name=datasets.Split.TRAIN,
90
- gen_kwargs={"filepath": file_path},
91
- ),
92
- ]
93
-
94
- def _generate_examples(self, filepath):
95
- """Yields examples."""
96
- # A biblioteca `datasets` lida com a leitura de Parquet nativamente
97
- # então não precisamos de um gerador complexo.
98
- # Esta função pode ser mais elaborada se precisarmos de processamento extra.
99
- # Por agora, a biblioteca cuidará disso.
100
- # A biblioteca `datasets` usará o leitor de Parquet para este gerador.
101
- # Esta função precisa existir, mas a mágica acontece por baixo dos panos.
102
- # Apenas para cumprir a API.
103
- # A mágica está em como o `datasets` lida com Parquet.
104
- pass