lhoestq HF Staff commited on
Commit
822b4c6
·
verified ·
1 Parent(s): 9da3fa8

Delete loading script

Browse files
Files changed (1) hide show
  1. code_search_net.py +0 -218
code_search_net.py DELETED
@@ -1,218 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """CodeSearchNet corpus: proxy dataset for semantic code search"""
18
-
19
- # TODO: add licensing info in the examples
20
- # TODO: log richer informations (especially while extracting the jsonl.gz files)
21
- # TODO: enable custom configs; such as: "java+python"
22
- # TODO: enable fetching examples with a given license, eg: "java_MIT"
23
-
24
-
25
- import json
26
- import os
27
-
28
- import datasets
29
-
30
-
31
- _CITATION = """\
32
- @article{husain2019codesearchnet,
33
- title={{CodeSearchNet} challenge: Evaluating the state of semantic code search},
34
- author={Husain, Hamel and Wu, Ho-Hsiang and Gazit, Tiferet and Allamanis, Miltiadis and Brockschmidt, Marc},
35
- journal={arXiv preprint arXiv:1909.09436},
36
- year={2019}
37
- }
38
- """
39
-
40
- _DESCRIPTION = """\
41
- CodeSearchNet corpus contains about 6 million functions from open-source code \
42
- spanning six programming languages (Go, Java, JavaScript, PHP, Python, and Ruby). \
43
- The CodeSearchNet Corpus also contains automatically generated query-like \
44
- natural language for 2 million functions, obtained from mechanically scraping \
45
- and preprocessing associated function documentation.
46
- """
47
-
48
- _HOMEPAGE = "https://github.com/github/CodeSearchNet"
49
-
50
- _LICENSE = "Various"
51
-
52
- _DATA_DIR_URL = "data/"
53
- _AVAILABLE_LANGUAGES = ["python", "java", "javascript", "go", "ruby", "php"]
54
- _URLs = {language: _DATA_DIR_URL + f"{language}.zip" for language in _AVAILABLE_LANGUAGES}
55
- # URLs for "all" are just the concatenation of URLs for all languages
56
- _URLs["all"] = _URLs.copy()
57
-
58
-
59
- class CodeSearchNet(datasets.GeneratorBasedBuilder):
60
- """ "CodeSearchNet corpus: proxy dataset for semantic code search."""
61
-
62
- VERSION = datasets.Version("1.0.0", "Add CodeSearchNet corpus dataset")
63
- BUILDER_CONFIGS = [
64
- datasets.BuilderConfig(
65
- name="all",
66
- version=VERSION,
67
- description="All available languages: Java, Go, Javascript, Python, PHP, Ruby",
68
- ),
69
- datasets.BuilderConfig(
70
- name="java",
71
- version=VERSION,
72
- description="Java language",
73
- ),
74
- datasets.BuilderConfig(
75
- name="go",
76
- version=VERSION,
77
- description="Go language",
78
- ),
79
- datasets.BuilderConfig(
80
- name="python",
81
- version=VERSION,
82
- description="Pyhton language",
83
- ),
84
- datasets.BuilderConfig(
85
- name="javascript",
86
- version=VERSION,
87
- description="Javascript language",
88
- ),
89
- datasets.BuilderConfig(
90
- name="ruby",
91
- version=VERSION,
92
- description="Ruby language",
93
- ),
94
- datasets.BuilderConfig(
95
- name="php",
96
- version=VERSION,
97
- description="PHP language",
98
- ),
99
- ]
100
-
101
- DEFAULT_CONFIG_NAME = "all"
102
-
103
- def _info(self):
104
- return datasets.DatasetInfo(
105
- description=_DESCRIPTION,
106
- features=datasets.Features(
107
- {
108
- "repository_name": datasets.Value("string"),
109
- "func_path_in_repository": datasets.Value("string"),
110
- "func_name": datasets.Value("string"),
111
- "whole_func_string": datasets.Value("string"),
112
- "language": datasets.Value("string"),
113
- "func_code_string": datasets.Value("string"),
114
- "func_code_tokens": datasets.Sequence(datasets.Value("string")),
115
- "func_documentation_string": datasets.Value("string"),
116
- "func_documentation_tokens": datasets.Sequence(datasets.Value("string")),
117
- "split_name": datasets.Value("string"),
118
- "func_code_url": datasets.Value("string"),
119
- # TODO - add licensing info in the examples
120
- }
121
- ),
122
- # No default supervised keys
123
- supervised_keys=None,
124
- homepage=_HOMEPAGE,
125
- license=_LICENSE,
126
- citation=_CITATION,
127
- )
128
-
129
- def _split_generators(self, dl_manager):
130
- """Returns SplitGenerators.
131
-
132
- Note: The original data is stored in S3, and follows this unusual directory structure:
133
- ```
134
- .
135
- ├── <language_name> # e.g. python
136
- │   └── final
137
- │   └── jsonl
138
- │ �� ├── test
139
- │   │   └── <language_name>_test_0.jsonl.gz
140
- │   ├── train
141
- │   │   ├── <language_name>_train_0.jsonl.gz
142
- │   │   ├── <language_name>_train_1.jsonl.gz
143
- │   │   ├── ...
144
- │   │   └── <language_name>_train_n.jsonl.gz
145
- │   └── valid
146
- │   └── <language_name>_valid_0.jsonl.gz
147
- ├── <language_name>_dedupe_definitions_v2.pkl
148
- └── <language_name>_licenses.pkl
149
- ```
150
- """
151
- data_urls = _URLs[self.config.name]
152
- if isinstance(data_urls, str):
153
- data_urls = {self.config.name: data_urls}
154
- # Download & extract the language archives
155
- data_dirs = [
156
- os.path.join(directory, lang, "final", "jsonl")
157
- for lang, directory in dl_manager.download_and_extract(data_urls).items()
158
- ]
159
-
160
- split2dirs = {
161
- split_name: [os.path.join(directory, split_name) for directory in data_dirs]
162
- for split_name in ["train", "test", "valid"]
163
- }
164
-
165
- split2paths = dl_manager.extract(
166
- {
167
- split_name: [
168
- os.path.join(directory, entry_name)
169
- for directory in split_dirs
170
- for entry_name in os.listdir(directory)
171
- ]
172
- for split_name, split_dirs in split2dirs.items()
173
- }
174
- )
175
-
176
- return [
177
- datasets.SplitGenerator(
178
- name=datasets.Split.TRAIN,
179
- gen_kwargs={
180
- "filepaths": split2paths["train"],
181
- },
182
- ),
183
- datasets.SplitGenerator(
184
- name=datasets.Split.TEST,
185
- gen_kwargs={
186
- "filepaths": split2paths["test"],
187
- },
188
- ),
189
- datasets.SplitGenerator(
190
- name=datasets.Split.VALIDATION,
191
- gen_kwargs={
192
- "filepaths": split2paths["valid"],
193
- },
194
- ),
195
- ]
196
-
197
- def _generate_examples(self, filepaths):
198
- """Yields the examples by iterating through the available jsonl files."""
199
- for file_id_, filepath in enumerate(filepaths):
200
- with open(filepath, encoding="utf-8") as f:
201
- for row_id_, row in enumerate(f):
202
- # Key of the example = file_id + row_id,
203
- # to ensure all examples have a distinct key
204
- id_ = f"{file_id_}_{row_id_}"
205
- data = json.loads(row)
206
- yield id_, {
207
- "repository_name": data["repo"],
208
- "func_path_in_repository": data["path"],
209
- "func_name": data["func_name"],
210
- "whole_func_string": data["original_string"],
211
- "language": data["language"],
212
- "func_code_string": data["code"],
213
- "func_code_tokens": data["code_tokens"],
214
- "func_documentation_string": data["docstring"],
215
- "func_documentation_tokens": data["docstring_tokens"],
216
- "split_name": data["partition"],
217
- "func_code_url": data["url"],
218
- }