Serhii commited on
Commit
cc14724
·
1 Parent(s): 54de6a8

Add load file

Browse files
Files changed (1) hide show
  1. Custom_SQuAD.py +1221 -0
Custom_SQuAD.py ADDED
@@ -0,0 +1,1221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Access datasets."""
18
+ import filecmp
19
+ import glob
20
+ import importlib
21
+ import inspect
22
+ import json
23
+ import os
24
+ import re
25
+ import shutil
26
+ import time
27
+ import warnings
28
+ from collections import Counter
29
+ from pathlib import Path, PurePath
30
+ from typing import Dict, List, Mapping, Optional, Sequence, Tuple, Type, Union
31
+ from urllib.parse import urlparse
32
+
33
+ import fsspec
34
+ import huggingface_hub
35
+ from huggingface_hub import HfApi
36
+
37
+ from . import config
38
+ from .arrow_dataset import Dataset
39
+ from .builder import DatasetBuilder
40
+ from .dataset_dict import DatasetDict, IterableDatasetDict
41
+ from .features import Features
42
+ from .filesystems import extract_path_from_uri, is_remote_filesystem
43
+ from .iterable_dataset import IterableDataset
44
+ from .metric import Metric
45
+ from .naming import camelcase_to_snakecase
46
+ from .packaged_modules import _EXTENSION_TO_MODULE, _PACKAGED_DATASETS_MODULES, hash_python_lines
47
+ from .splits import Split
48
+ from .streaming import extend_module_for_streaming
49
+ from .tasks import TaskTemplate
50
+ from .utils.download_manager import GenerateMode
51
+ from .utils.file_utils import (
52
+ DownloadConfig,
53
+ cached_path,
54
+ head_hf_s3,
55
+ hf_github_url,
56
+ hf_hub_url,
57
+ init_hf_modules,
58
+ is_relative_path,
59
+ is_remote_url,
60
+ relative_to_absolute_path,
61
+ url_or_path_join,
62
+ url_or_path_parent,
63
+ )
64
+ from .utils.filelock import FileLock
65
+ from .utils.info_utils import is_small_dataset
66
+ from .utils.logging import get_logger
67
+ from .utils.py_utils import NestedDataStructure
68
+ from .utils.version import Version
69
+
70
+
71
+ logger = get_logger(__name__)
72
+
73
+
74
+ def init_dynamic_modules(
75
+ name: str = config.MODULE_NAME_FOR_DYNAMIC_MODULES, hf_modules_cache: Optional[Union[Path, str]] = None
76
+ ):
77
+ """
78
+ Create a module with name `name` in which you can add dynamic modules
79
+ such as metrics or datasets. The module can be imported using its name.
80
+ The module is created in the HF_MODULE_CACHE directory by default (~/.cache/huggingface/modules) but it can
81
+ be overriden by specifying a path to another directory in `hf_modules_cache`.
82
+ """
83
+ hf_modules_cache = init_hf_modules(hf_modules_cache)
84
+ dynamic_modules_path = os.path.join(hf_modules_cache, name)
85
+ os.makedirs(dynamic_modules_path, exist_ok=True)
86
+ if not os.path.exists(os.path.join(dynamic_modules_path, "__init__.py")):
87
+ with open(os.path.join(dynamic_modules_path, "__init__.py"), "w"):
88
+ pass
89
+ return dynamic_modules_path
90
+
91
+
92
+ def import_main_class(module_path, dataset=True) -> Optional[Union[Type[DatasetBuilder], Type[Metric]]]:
93
+ """Import a module at module_path and return its main class:
94
+ - a DatasetBuilder if dataset is True
95
+ - a Metric if dataset is False
96
+ """
97
+ module = importlib.import_module(module_path)
98
+
99
+ if dataset:
100
+ main_cls_type = DatasetBuilder
101
+ else:
102
+ main_cls_type = Metric
103
+
104
+ # Find the main class in our imported module
105
+ module_main_cls = None
106
+ for name, obj in module.__dict__.items():
107
+ if isinstance(obj, type) and issubclass(obj, main_cls_type):
108
+ if inspect.isabstract(obj):
109
+ continue
110
+ module_main_cls = obj
111
+ break
112
+
113
+ return module_main_cls
114
+
115
+
116
+ def files_to_hash(file_paths: List[str]) -> str:
117
+ """
118
+ Convert a list of scripts or text files provided in file_paths into a hashed filename in a repeatable way.
119
+ """
120
+ # List all python files in directories if directories are supplied as part of external imports
121
+ to_use_files: List[Union[Path, str]] = []
122
+ for file_path in file_paths:
123
+ if os.path.isdir(file_path):
124
+ to_use_files.extend(list(Path(file_path).rglob("*.[pP][yY]")))
125
+ else:
126
+ to_use_files.append(file_path)
127
+
128
+ # Get the code from all these files
129
+ lines = []
130
+ for file_path in to_use_files:
131
+ with open(file_path, mode="r", encoding="utf-8") as f:
132
+ lines.extend(f.readlines())
133
+ return hash_python_lines(lines)
134
+
135
+
136
+ def convert_github_url(url_path: str) -> Tuple[str, Optional[str]]:
137
+ """Convert a link to a file on a github repo in a link to the raw github object."""
138
+ parsed = urlparse(url_path)
139
+ sub_directory = None
140
+ if parsed.scheme in ("http", "https", "s3") and parsed.netloc == "github.com":
141
+ if "blob" in url_path:
142
+ assert url_path.endswith(
143
+ ".py"
144
+ ), f"External import from github at {url_path} should point to a file ending with '.py'"
145
+ url_path = url_path.replace("blob", "raw") # Point to the raw file
146
+ else:
147
+ # Parse github url to point to zip
148
+ github_path = parsed.path[1:]
149
+ repo_info, branch = github_path.split("/tree/") if "/tree/" in github_path else (github_path, "master")
150
+ repo_owner, repo_name = repo_info.split("/")
151
+ url_path = "https://github.com/{}/{}/archive/{}.zip".format(repo_owner, repo_name, branch)
152
+ sub_directory = f"{repo_name}-{branch}"
153
+ return url_path, sub_directory
154
+
155
+
156
+ def get_imports(file_path: str):
157
+ r"""Find whether we should import or clone additional files for a given processing script.
158
+ And list the import.
159
+
160
+ We allow:
161
+ - library dependencies,
162
+ - local dependencies and
163
+ - external dependencies whose url is specified with a comment starting from "# From:' followed by the raw url to a file, an archive or a github repository.
164
+ external dependencies will be downloaded (and extracted if needed in the dataset folder).
165
+ We also add an `__init__.py` to each sub-folder of a downloaded folder so the user can import from them in the script.
166
+
167
+ Note that only direct import in the dataset processing script will be handled
168
+ We don't recursively explore the additional import to download further files.
169
+
170
+ Examples::
171
+
172
+ import tensorflow
173
+ import .c4_utils
174
+ import .clicr.dataset-code.build_json_dataset # From: https://raw.githubusercontent.com/clips/clicr/master/dataset-code/build_json_dataset
175
+ """
176
+ lines = []
177
+ with open(file_path, mode="r", encoding="utf-8") as f:
178
+ lines.extend(f.readlines())
179
+
180
+ logger.debug("Checking %s for additional imports.", file_path)
181
+ imports: List[Tuple[str, str, str, Optional[str]]] = []
182
+ is_in_docstring = False
183
+ for line in lines:
184
+ docstr_start_match = re.findall(r'[\s\S]*?"""[\s\S]*?', line)
185
+
186
+ if len(docstr_start_match) == 1:
187
+ # flip True <=> False only if doctstring
188
+ # starts at line without finishing
189
+ is_in_docstring = not is_in_docstring
190
+
191
+ if is_in_docstring:
192
+ # import statements in doctstrings should
193
+ # not be added as required dependencies
194
+ continue
195
+
196
+ match = re.match(r"^import\s+(\.?)([^\s\.]+)[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)", line, flags=re.MULTILINE)
197
+ if match is None:
198
+ match = re.match(
199
+ r"^from\s+(\.?)([^\s\.]+)(?:[^\s]*)\s+import\s+[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)",
200
+ line,
201
+ flags=re.MULTILINE,
202
+ )
203
+ if match is None:
204
+ continue
205
+ if match.group(1):
206
+ # The import starts with a '.', we will download the relevant file
207
+ if any(imp[1] == match.group(2) for imp in imports):
208
+ # We already have this import
209
+ continue
210
+ if match.group(3):
211
+ # The import has a comment with 'From:', we'll retrieve it from the given url
212
+ url_path = match.group(3)
213
+ url_path, sub_directory = convert_github_url(url_path)
214
+ imports.append(("external", match.group(2), url_path, sub_directory))
215
+ elif match.group(2):
216
+ # The import should be at the same place as the file
217
+ imports.append(("internal", match.group(2), match.group(2), None))
218
+ else:
219
+ if match.group(3):
220
+ # The import has a comment with `From: git+https:...`, asks user to pip install from git.
221
+ url_path = match.group(3)
222
+ imports.append(("library", match.group(2), url_path, None))
223
+ else:
224
+ imports.append(("library", match.group(2), match.group(2), None))
225
+
226
+ return imports
227
+
228
+
229
+ def _resolve_data_files_locally_or_by_urls(
230
+ base_path: str, patterns: Union[str, List[str], Dict], allowed_extensions: Optional[list] = None
231
+ ) -> Union[List[Path], Dict]:
232
+ """
233
+ Return the absolute paths to all the files that match the given patterns.
234
+ It also supports absolute paths in patterns.
235
+ If an URL is passed, it is returned as is."""
236
+ data_files_ignore = ["README.md", "config.json"]
237
+ if isinstance(patterns, str):
238
+ if is_remote_url(patterns):
239
+ return [patterns]
240
+ if is_relative_path(patterns):
241
+ glob_iter = list(Path(base_path).rglob(patterns))
242
+ else:
243
+ glob_iter = [Path(filepath) for filepath in glob.glob(patterns)]
244
+
245
+ matched_paths = [
246
+ filepath.resolve()
247
+ for filepath in glob_iter
248
+ if filepath.name not in data_files_ignore and not filepath.name.startswith(".") and filepath.is_file()
249
+ ]
250
+ if allowed_extensions is not None:
251
+ out = [
252
+ filepath
253
+ for filepath in matched_paths
254
+ if any(suffix[1:] in allowed_extensions for suffix in filepath.suffixes)
255
+ ]
256
+ if len(out) < len(matched_paths):
257
+ invalid_matched_files = list(set(matched_paths) - set(out))
258
+ logger.info(
259
+ f"Some files matched the pattern '{patterns}' at {Path(base_path).resolve()} but don't have valid data file extensions: {invalid_matched_files}"
260
+ )
261
+ else:
262
+ out = matched_paths
263
+ if not out:
264
+ error_msg = f"Unable to resolve any data file that matches '{patterns}' at {Path(base_path).resolve()}"
265
+ if allowed_extensions is not None:
266
+ error_msg += f" with any supported extension {list(allowed_extensions)}"
267
+ raise FileNotFoundError(error_msg)
268
+ return out
269
+ elif isinstance(patterns, dict):
270
+ return {
271
+ k: _resolve_data_files_locally_or_by_urls(base_path, v, allowed_extensions=allowed_extensions)
272
+ for k, v in patterns.items()
273
+ }
274
+ else:
275
+ return sum(
276
+ [
277
+ _resolve_data_files_locally_or_by_urls(base_path, pattern, allowed_extensions=allowed_extensions)
278
+ for pattern in patterns
279
+ ],
280
+ [],
281
+ )
282
+
283
+
284
+ def _resolve_data_files_in_dataset_repository(
285
+ dataset_info: huggingface_hub.hf_api.DatasetInfo,
286
+ patterns: Union[str, List[str], Dict],
287
+ allowed_extensions: Optional[list] = None,
288
+ ) -> Union[List[PurePath], Dict]:
289
+ data_files_ignore = ["README.md", "config.json"]
290
+ if isinstance(patterns, str):
291
+ all_data_files = [
292
+ PurePath("/" + dataset_file.rfilename) for dataset_file in dataset_info.siblings
293
+ ] # add a / at the beginning to make the pattern **/* match files at the root
294
+ matched_paths = [
295
+ filepath.relative_to("/")
296
+ for filepath in all_data_files
297
+ if filepath.name not in data_files_ignore
298
+ and not filepath.name.startswith(".")
299
+ and filepath.match(patterns)
300
+ ]
301
+ if allowed_extensions is not None:
302
+ out = [
303
+ filepath
304
+ for filepath in matched_paths
305
+ if any(suffix[1:] in allowed_extensions for suffix in filepath.suffixes)
306
+ ]
307
+ if len(out) < len(matched_paths):
308
+ invalid_matched_files = list(set(matched_paths) - set(out))
309
+ logger.info(
310
+ f"Some files matched the pattern {patterns} in dataset repository {dataset_info.id} but don't have valid data file extensions: {invalid_matched_files}"
311
+ )
312
+ else:
313
+ out = matched_paths
314
+ if not out:
315
+ error_msg = f"Unable to resolve data_file {patterns} in dataset repository {dataset_info.id}"
316
+ if allowed_extensions is not None:
317
+ error_msg += f" with any supported extension {list(allowed_extensions)}"
318
+ raise FileNotFoundError(error_msg)
319
+ return out
320
+ elif isinstance(patterns, dict):
321
+ return {
322
+ k: _resolve_data_files_in_dataset_repository(dataset_info, v, allowed_extensions=allowed_extensions)
323
+ for k, v in patterns.items()
324
+ }
325
+ else:
326
+ return sum(
327
+ [
328
+ _resolve_data_files_in_dataset_repository(dataset_info, pattern, allowed_extensions=allowed_extensions)
329
+ for pattern in patterns
330
+ ],
331
+ [],
332
+ )
333
+
334
+
335
+ def _infer_module_for_data_files(data_files: Union[PurePath, List[PurePath], Dict]) -> Optional[str]:
336
+ extensions_counter = Counter(
337
+ suffix[1:] for filepath in NestedDataStructure(data_files).flatten() for suffix in filepath.suffixes
338
+ )
339
+ if extensions_counter:
340
+ return _EXTENSION_TO_MODULE[extensions_counter.most_common(1)[0][0]]
341
+
342
+
343
+ def prepare_module(
344
+ path: str,
345
+ revision: Optional[Union[str, Version]] = None,
346
+ download_config: Optional[DownloadConfig] = None,
347
+ download_mode: Optional[GenerateMode] = None,
348
+ dataset: bool = True,
349
+ force_local_path: Optional[str] = None,
350
+ dynamic_modules_path: Optional[str] = None,
351
+ return_resolved_file_path: bool = False,
352
+ return_associated_base_path: bool = False,
353
+ data_files: Optional[Union[Dict, List, str]] = None,
354
+ script_version="deprecated",
355
+ **download_kwargs,
356
+ ) -> Union[Tuple[str, str], Tuple[str, str, Optional[str]]]:
357
+ r"""
358
+ Download/extract/cache a dataset (if dataset==True) or a metric (if dataset==False)
359
+
360
+ Dataset and metrics codes are cached inside the the dynamic modules cache to allow easy import (avoid ugly sys.path tweaks)
361
+ and using cloudpickle (among other things).
362
+
363
+ Args:
364
+
365
+ path (str): Path or name of the dataset, or path to a metric script.
366
+ Depending on ``path``, the module that is returned id either generic moduler (csv, json, text etc.) or a module defined defined a dataset or metric script (a python file).
367
+
368
+ For local datasets:
369
+
370
+ - if ``path`` is a local directory (but doesn't contain a dataset script)
371
+ -> load a generic module (csv, json, text etc.) based on the content of the directory
372
+ e.g. ``'./path/to/directory/with/my/csv/data'``.
373
+ - if ``path`` is a local dataset or metric script or a directory containing a local dataset or metric script (if the script has the same name as the directory):
374
+ -> load the module from the dataset or metric script
375
+ e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``.
376
+
377
+ For datasets on the Hugging Face Hub (list all available datasets and ids with ``datasets.list_datasets()``)
378
+
379
+ - if ``path`` is a canonical dataset or metric on the HF Hub (ex: `glue`, `squad`)
380
+ -> load the module from the dataset or metric script in the github repository at huggingface/datasets
381
+ e.g. ``'squad'`` or ``'glue'`` or ``accuracy``.
382
+ - if ``path`` is a dataset repository on the HF hub (without a dataset script)
383
+ -> load a generic module (csv, text etc.) based on the content of the repository
384
+ e.g. ``'username/dataset_name'``, a dataset repository on the HF hub containing your data files.
385
+ - if ``path`` is a dataset repository on the HF hub with a dataset script (if the script has the same name as the directory)
386
+ -> load the module from the dataset script in the dataset repository
387
+ e.g. ``'username/dataset_name'``, a dataset repository on the HF hub containing a dataset script `'dataset_name.py'`.
388
+
389
+ revision (Optional ``Union[str, datasets.Version]``):
390
+ If specified, the module will be loaded from the datasets repository at this version.
391
+ By default:
392
+ - it is set to the local version of the lib.
393
+ - it will also try to load it from the master branch if it's not available at the local version of the lib.
394
+ Specifying a version that is different from your local version of the lib might cause compatibility issues.
395
+ download_config (:class:`DownloadConfig`, optional): Specific download configuration parameters.
396
+ download_mode (:class:`GenerateMode`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode.
397
+ dataset (bool): True if the script to load is a dataset, False if the script is a metric.
398
+ force_local_path (Optional str): Optional path to a local path to download and prepare the script to.
399
+ Used to inspect or modify the script folder.
400
+ dynamic_modules_path (Optional str, defaults to HF_MODULES_CACHE / "datasets_modules", i.e. ~/.cache/huggingface/modules/datasets_modules):
401
+ Optional path to the directory in which the dynamic modules are saved. It must have been initialized with :obj:`init_dynamic_modules`.
402
+ By default the datasets and metrics are stored inside the `datasets_modules` module.
403
+ return_resolved_file_path (Optional bool, defaults to False):
404
+ If True, the url or path to the resolved dataset or metric script is returned with the other ouputs
405
+ return_associated_base_path (Optional bool, defaults to False):
406
+ If True, the base path associated to the dataset is returned with the other ouputs.
407
+ It corresponds to the directory or base url where the dataset script/dataset repo is at.
408
+ data_files (:obj:`Union[Dict, List, str]`, optional): Defining the data_files of the dataset configuration.
409
+ script_version:
410
+ .. deprecated:: 1.13
411
+ 'script_version' was renamed to 'revision' in version 1.13 and will be removed in 1.15.
412
+ download_kwargs: optional attributes for DownloadConfig() which will override the attributes in download_config if supplied.
413
+
414
+ Returns:
415
+ Tuple[``str``, ``str``]:
416
+ 1. The module path being
417
+ - the import path of the dataset/metric package if force_local_path is False: e.g. 'datasets.datasets.squad'
418
+ - the local path to the dataset/metric file if force_local_path is True: e.g. '/User/huggingface/datasets/datasets/squad/squad.py'
419
+ 2. A hash string computed from the content of the dataset loading script.
420
+ """
421
+ if script_version != "deprecated":
422
+ warnings.warn(
423
+ "'script_version' was renamed to 'revision' in version 1.13 and will be removed in 1.15.", FutureWarning
424
+ )
425
+ revision = script_version
426
+ if download_config is None:
427
+ download_config = DownloadConfig(**download_kwargs)
428
+ download_config.extract_compressed_file = True
429
+ download_config.force_extract = True
430
+
431
+ module_type = "dataset" if dataset else "metric"
432
+ name = list(filter(lambda x: x, path.replace(os.sep, "/").split("/")))[-1]
433
+ if not name.endswith(".py"):
434
+ name = name + ".py"
435
+
436
+ # Short name is name without the '.py' at the end (for the module)
437
+ short_name = name[:-3]
438
+
439
+ # first check if the module is packaged with the `datasets` package
440
+ def prepare_packaged_module(name):
441
+ try:
442
+ head_hf_s3(name, filename=name + ".py", dataset=dataset, max_retries=download_config.max_retries)
443
+ except Exception:
444
+ logger.debug(f"Couldn't head HF s3 for packaged dataset module '{name}'. Running in offline mode.")
445
+ return _PACKAGED_DATASETS_MODULES[name]
446
+
447
+ if dataset and path in _PACKAGED_DATASETS_MODULES:
448
+ output = prepare_packaged_module(path)
449
+ if return_resolved_file_path:
450
+ output += (None,)
451
+ if return_associated_base_path:
452
+ output += (None,)
453
+ return output
454
+
455
+ # otherwise the module is added to the dynamic modules
456
+ dynamic_modules_path = dynamic_modules_path if dynamic_modules_path else init_dynamic_modules()
457
+ module_name_for_dynamic_modules = os.path.basename(dynamic_modules_path)
458
+ datasets_modules_path = os.path.join(dynamic_modules_path, "datasets")
459
+ datasets_modules_name = module_name_for_dynamic_modules + ".datasets"
460
+ metrics_modules_path = os.path.join(dynamic_modules_path, "metrics")
461
+ metrics_modules_name = module_name_for_dynamic_modules + ".metrics"
462
+
463
+ if force_local_path is None:
464
+ main_folder_path = os.path.join(datasets_modules_path if dataset else metrics_modules_path, short_name)
465
+ else:
466
+ main_folder_path = force_local_path
467
+
468
+ # We have several ways to find the processing file:
469
+ # - if os.path.join(path, name) is a local python file
470
+ # -> use the module from the python file
471
+ # - if path is a local directory (but no python file)
472
+ # -> use a packaged module (csv, text etc.) based on content of the directory
473
+ # - if path has no "/" and is a module on github (in /datasets or in /metrics)
474
+ # -> use the module from the python file on github
475
+ # - if path has one "/" and is dataset repository on the HF hub with a python file
476
+ # -> the module from the python file in the dataset repository
477
+ # - if path has one "/" and is dataset repository on the HF hub without a python file
478
+ # -> use a packaged module (csv, text etc.) based on content of the repository
479
+ resource_type = "dataset" if dataset else "metric"
480
+ combined_path = os.path.join(path, name)
481
+ if path.endswith(name):
482
+ if os.path.isfile(path):
483
+ file_path = path
484
+ local_path = path
485
+ base_path = os.path.dirname(path)
486
+ else:
487
+ raise FileNotFoundError(f"Couldn't find a {resource_type} script at {relative_to_absolute_path(path)}")
488
+ elif os.path.isfile(combined_path):
489
+ file_path = combined_path
490
+ local_path = combined_path
491
+ base_path = path
492
+ elif os.path.isfile(path):
493
+ file_path = path
494
+ local_path = path
495
+ base_path = os.path.dirname(path)
496
+ elif os.path.isdir(path):
497
+ resolved_data_files = _resolve_data_files_locally_or_by_urls(
498
+ path, data_files or "*", allowed_extensions=_EXTENSION_TO_MODULE.keys()
499
+ )
500
+ infered_module_name = _infer_module_for_data_files(resolved_data_files)
501
+ if not infered_module_name:
502
+ raise FileNotFoundError(f"No data files or {resource_type} script found in local directory {path}")
503
+ output = prepare_packaged_module(infered_module_name)
504
+ if return_resolved_file_path:
505
+ output += (None,)
506
+ if return_associated_base_path:
507
+ output += (path,)
508
+ return output
509
+ else:
510
+ # Try github (canonical datasets/metrics) and then HF Hub (community datasets)
511
+ combined_path_abs = relative_to_absolute_path(combined_path)
512
+ expected_dir_for_combined_path_abs = os.path.dirname(combined_path_abs)
513
+ try:
514
+ try:
515
+ head_hf_s3(path, filename=name, dataset=dataset, max_retries=download_config.max_retries)
516
+ except Exception:
517
+ pass
518
+ revision = str(revision) if revision is not None else None
519
+ if path.count("/") == 0: # canonical datasets/metrics: github path
520
+ file_path = hf_github_url(path=path, name=name, dataset=dataset, revision=revision)
521
+ try:
522
+ local_path = cached_path(file_path, download_config=download_config)
523
+ except FileNotFoundError:
524
+ if revision is not None:
525
+ raise FileNotFoundError(
526
+ f"Couldn't find a directory or a {resource_type} named '{path}' using version {revision}. "
527
+ f"It doesn't exist locally at {expected_dir_for_combined_path_abs} or remotely at {file_path}"
528
+ ) from None
529
+ else:
530
+ github_file_path = file_path
531
+ file_path = hf_github_url(path=path, name=name, dataset=dataset, revision="master")
532
+ try:
533
+ local_path = cached_path(file_path, download_config=download_config)
534
+ logger.warning(
535
+ f"Couldn't find a directory or a {resource_type} named '{path}'. "
536
+ f"It was picked from the master branch on github instead at {file_path}"
537
+ )
538
+ except FileNotFoundError:
539
+ raise FileNotFoundError(
540
+ f"Couldn't find a directory or a {resource_type} named '{path}'. "
541
+ f"It doesn't exist locally at {expected_dir_for_combined_path_abs} or remotely at {github_file_path}"
542
+ ) from None
543
+ elif path.count("/") == 1: # users datasets/metrics: s3 path (hub for datasets and s3 for metrics)
544
+ file_path = hf_hub_url(path=path, name=name, revision=revision)
545
+ if not dataset:
546
+ # We don't have community metrics on the HF Hub
547
+ raise FileNotFoundError(
548
+ f"Couldn't find a {resource_type} in a directory at '{path}'. "
549
+ f"It doesn't exist locally at {combined_path_abs}"
550
+ )
551
+ try:
552
+ local_path = cached_path(file_path, download_config=download_config)
553
+ except FileNotFoundError:
554
+ hf_api = HfApi(config.HF_ENDPOINT)
555
+ try:
556
+ dataset_info = hf_api.dataset_info(
557
+ repo_id=path, revision=revision, token=download_config.use_auth_token
558
+ )
559
+ except Exception as exc:
560
+ raise FileNotFoundError(
561
+ f"Couldn't find a directory or a {resource_type} named '{path}'. "
562
+ f"It doesn't exist locally at {expected_dir_for_combined_path_abs} or remotely on {hf_api.endpoint}/datasets"
563
+ ) from exc
564
+ resolved_data_files = _resolve_data_files_in_dataset_repository(
565
+ dataset_info,
566
+ data_files if data_files is not None else "*",
567
+ allowed_extensions=_EXTENSION_TO_MODULE.keys(),
568
+ )
569
+ infered_module_name = _infer_module_for_data_files(resolved_data_files)
570
+ if not infered_module_name:
571
+ raise FileNotFoundError(
572
+ f"No data files found in dataset repository '{path}'. Local directory at {expected_dir_for_combined_path_abs} doesn't exist either."
573
+ ) from None
574
+ output = prepare_packaged_module(infered_module_name)
575
+ if return_resolved_file_path:
576
+ output += (None,)
577
+ if return_associated_base_path:
578
+ output += (url_or_path_parent(file_path),)
579
+ return output
580
+ else:
581
+ raise FileNotFoundError(
582
+ f"Couldn't find a {resource_type} directory at '{path}'. "
583
+ f"It doesn't exist locally at {expected_dir_for_combined_path_abs}"
584
+ )
585
+ except Exception as e: # noqa: all the attempts failed, before raising the error we should check if the module already exists.
586
+ if os.path.isdir(main_folder_path):
587
+ hashes = [h for h in os.listdir(main_folder_path) if len(h) == 64]
588
+ if hashes:
589
+ # get most recent
590
+ def _get_modification_time(module_hash):
591
+ return (Path(main_folder_path) / module_hash / name).stat().st_mtime
592
+
593
+ hash = sorted(hashes, key=_get_modification_time)[-1]
594
+ module_path = ".".join(
595
+ [datasets_modules_name if dataset else metrics_modules_name, short_name, hash, short_name]
596
+ )
597
+ logger.warning(
598
+ f"Using the latest cached version of the module from {os.path.join(main_folder_path, hash)} "
599
+ f"(last modified on {time.ctime(_get_modification_time(hash))}) since it "
600
+ f"couldn't be found locally at {combined_path_abs}, or remotely ({type(e).__name__})."
601
+ )
602
+ output = (module_path, hash)
603
+ if return_resolved_file_path:
604
+ with open(os.path.join(main_folder_path, hash, short_name + ".json")) as cache_metadata:
605
+ file_path = json.load(cache_metadata)["original file path"]
606
+ output += (file_path,)
607
+ if return_associated_base_path:
608
+ output += (url_or_path_parent(file_path),)
609
+ return output
610
+ raise
611
+
612
+ # Load the module in two steps:
613
+ # 1. get the processing file on the local filesystem if it's not there (download to cache dir)
614
+ # 2. copy from the local file system inside the modules cache to import it
615
+
616
+ base_path = url_or_path_parent(file_path) # remove the filename
617
+ dataset_infos = url_or_path_join(base_path, config.DATASETDICT_INFOS_FILENAME)
618
+
619
+ # Download the dataset infos file if available
620
+ try:
621
+ local_dataset_infos_path = cached_path(
622
+ dataset_infos,
623
+ download_config=download_config,
624
+ )
625
+ except (FileNotFoundError, ConnectionError):
626
+ local_dataset_infos_path = None
627
+
628
+ # Download external imports if needed
629
+ imports = get_imports(local_path)
630
+ local_imports = []
631
+ library_imports = []
632
+ for import_type, import_name, import_path, sub_directory in imports:
633
+ if import_type == "library":
634
+ library_imports.append((import_name, import_path)) # Import from a library
635
+ continue
636
+
637
+ if import_name == short_name:
638
+ raise ValueError(
639
+ f"Error in {module_type} script at {file_path}, importing relative {import_name} module "
640
+ f"but {import_name} is the name of the {module_type} script. "
641
+ f"Please change relative import {import_name} to another name and add a '# From: URL_OR_PATH' "
642
+ f"comment pointing to the original relative import file path."
643
+ )
644
+ if import_type == "internal":
645
+ url_or_filename = url_or_path_join(base_path, import_path + ".py")
646
+ elif import_type == "external":
647
+ url_or_filename = import_path
648
+ else:
649
+ raise ValueError("Wrong import_type")
650
+
651
+ local_import_path = cached_path(
652
+ url_or_filename,
653
+ download_config=download_config,
654
+ )
655
+ if sub_directory is not None:
656
+ local_import_path = os.path.join(local_import_path, sub_directory)
657
+ local_imports.append((import_name, local_import_path))
658
+
659
+ # Check library imports
660
+ needs_to_be_installed = []
661
+ for library_import_name, library_import_path in library_imports:
662
+ try:
663
+ lib = importlib.import_module(library_import_name) # noqa F841
664
+ except ImportError:
665
+ needs_to_be_installed.append((library_import_name, library_import_path))
666
+ if needs_to_be_installed:
667
+ raise ImportError(
668
+ f"To be able to use this {module_type}, you need to install the following dependencies"
669
+ f"{[lib_name for lib_name, lib_path in needs_to_be_installed]} using 'pip install "
670
+ f"{' '.join([lib_path for lib_name, lib_path in needs_to_be_installed])}' for instance'"
671
+ )
672
+
673
+ # Define a directory with a unique name in our dataset or metric folder
674
+ # path is: ./datasets|metrics/dataset|metric_name/hash_from_code/script.py
675
+ # we use a hash to be able to have multiple versions of a dataset/metric processing file together
676
+ hash = files_to_hash([local_path] + [loc[1] for loc in local_imports])
677
+
678
+ if force_local_path is None:
679
+ hash_folder_path = os.path.join(main_folder_path, hash)
680
+ else:
681
+ hash_folder_path = force_local_path
682
+
683
+ local_file_path = os.path.join(hash_folder_path, name)
684
+ dataset_infos_path = os.path.join(hash_folder_path, config.DATASETDICT_INFOS_FILENAME)
685
+
686
+ # Prevent parallel disk operations
687
+ lock_path = local_path + ".lock"
688
+ with FileLock(lock_path):
689
+ # Create main dataset/metrics folder if needed
690
+ if download_mode == GenerateMode.FORCE_REDOWNLOAD and os.path.exists(main_folder_path):
691
+ shutil.rmtree(main_folder_path)
692
+
693
+ if not os.path.exists(main_folder_path):
694
+ logger.info(f"Creating main folder for {module_type} {file_path} at {main_folder_path}")
695
+ os.makedirs(main_folder_path, exist_ok=True)
696
+ else:
697
+ logger.info(f"Found main folder for {module_type} {file_path} at {main_folder_path}")
698
+
699
+ # add an __init__ file to the main dataset folder if needed
700
+ init_file_path = os.path.join(main_folder_path, "__init__.py")
701
+ if not os.path.exists(init_file_path):
702
+ with open(init_file_path, "w"):
703
+ pass
704
+
705
+ # Create hash dataset folder if needed
706
+ if not os.path.exists(hash_folder_path):
707
+ logger.info(f"Creating specific version folder for {module_type} {file_path} at {hash_folder_path}")
708
+ os.makedirs(hash_folder_path)
709
+ else:
710
+ logger.info(f"Found specific version folder for {module_type} {file_path} at {hash_folder_path}")
711
+
712
+ # add an __init__ file to the hash dataset folder if needed
713
+ init_file_path = os.path.join(hash_folder_path, "__init__.py")
714
+ if not os.path.exists(init_file_path):
715
+ with open(init_file_path, "w"):
716
+ pass
717
+
718
+ # Copy dataset.py file in hash folder if needed
719
+ if not os.path.exists(local_file_path):
720
+ logger.info("Copying script file from %s to %s", file_path, local_file_path)
721
+ shutil.copyfile(local_path, local_file_path)
722
+ else:
723
+ logger.info("Found script file from %s to %s", file_path, local_file_path)
724
+
725
+ # Copy dataset infos file if needed
726
+ if not os.path.exists(dataset_infos_path):
727
+ if local_dataset_infos_path is not None:
728
+ logger.info("Copying dataset infos file from %s to %s", dataset_infos, dataset_infos_path)
729
+ shutil.copyfile(local_dataset_infos_path, dataset_infos_path)
730
+ else:
731
+ logger.info("Couldn't find dataset infos file at %s", dataset_infos)
732
+ else:
733
+ if local_dataset_infos_path is not None and not filecmp.cmp(local_dataset_infos_path, dataset_infos_path):
734
+ logger.info("Updating dataset infos file from %s to %s", dataset_infos, dataset_infos_path)
735
+ shutil.copyfile(local_dataset_infos_path, dataset_infos_path)
736
+ else:
737
+ logger.info("Found dataset infos file from %s to %s", dataset_infos, dataset_infos_path)
738
+
739
+ # Record metadata associating original dataset path with local unique folder
740
+ meta_path = local_file_path.split(".py")[0] + ".json"
741
+ if not os.path.exists(meta_path):
742
+ logger.info(f"Creating metadata file for {module_type} {file_path} at {meta_path}")
743
+ meta = {"original file path": file_path, "local file path": local_file_path}
744
+ # the filename is *.py in our case, so better rename to filenam.json instead of filename.py.json
745
+ with open(meta_path, "w", encoding="utf-8") as meta_file:
746
+ json.dump(meta, meta_file)
747
+ else:
748
+ logger.info(f"Found metadata file for {module_type} {file_path} at {meta_path}")
749
+
750
+ # Copy all the additional imports
751
+ for import_name, import_path in local_imports:
752
+ if os.path.isfile(import_path):
753
+ full_path_local_import = os.path.join(hash_folder_path, import_name + ".py")
754
+ if not os.path.exists(full_path_local_import):
755
+ logger.info("Copying local import file from %s at %s", import_path, full_path_local_import)
756
+ shutil.copyfile(import_path, full_path_local_import)
757
+ else:
758
+ logger.info("Found local import file from %s at %s", import_path, full_path_local_import)
759
+ elif os.path.isdir(import_path):
760
+ full_path_local_import = os.path.join(hash_folder_path, import_name)
761
+ if not os.path.exists(full_path_local_import):
762
+ logger.info("Copying local import directory from %s at %s", import_path, full_path_local_import)
763
+ shutil.copytree(import_path, full_path_local_import)
764
+ else:
765
+ logger.info("Found local import directory from %s at %s", import_path, full_path_local_import)
766
+ else:
767
+ raise OSError(f"Error with local import at {import_path}")
768
+
769
+ if force_local_path is None:
770
+ module_path = ".".join(
771
+ [datasets_modules_name if dataset else metrics_modules_name, short_name, hash, short_name]
772
+ )
773
+ else:
774
+ module_path = local_file_path
775
+
776
+ # make the new module to be noticed by the import system
777
+ importlib.invalidate_caches()
778
+
779
+ output = (module_path, hash)
780
+ if return_resolved_file_path:
781
+ output += (file_path,)
782
+ if return_associated_base_path:
783
+ output += (base_path,)
784
+ return output
785
+
786
+
787
+
788
+ [DOCS]
789
+ def load_metric(
790
+ path: str,
791
+ config_name: Optional[str] = None,
792
+ process_id: int = 0,
793
+ num_process: int = 1,
794
+ cache_dir: Optional[str] = None,
795
+ experiment_id: Optional[str] = None,
796
+ keep_in_memory: bool = False,
797
+ download_config: Optional[DownloadConfig] = None,
798
+ download_mode: Optional[GenerateMode] = None,
799
+ revision: Optional[Union[str, Version]] = None,
800
+ script_version="deprecated",
801
+ **metric_init_kwargs,
802
+ ) -> Metric:
803
+ r"""Load a `datasets.Metric`.
804
+
805
+ Args:
806
+
807
+ path (``str``):
808
+ path to the metric processing script with the metric builder. Can be either:
809
+ - a local path to processing script or the directory containing the script (if the script has the same name as the directory),
810
+ e.g. ``'./metrics/rouge'`` or ``'./metrics/rogue/rouge.py'``
811
+ - a metric identifier on the HuggingFace datasets repo (list all available metrics with ``datasets.list_metrics()``)
812
+ e.g. ``'rouge'`` or ``'bleu'``
813
+ config_name (Optional ``str``): selecting a configuration for the metric (e.g. the GLUE metric has a configuration for each subset)
814
+ process_id (Optional ``int``): for distributed evaluation: id of the process
815
+ num_process (Optional ``int``): for distributed evaluation: total number of processes
816
+ cache_dir (Optional str): path to store the temporary predictions and references (default to `~/.cache/huggingface/metrics/`)
817
+ experiment_id (``str``): A specific experiment id. This is used if several distributed evaluations share the same file system.
818
+ This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
819
+ keep_in_memory (bool): Whether to store the temporary results in memory (defaults to False)
820
+ download_config (Optional ``datasets.DownloadConfig``: specific download configuration parameters.
821
+ download_mode (:class:`GenerateMode`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode.
822
+ revision (Optional ``Union[str, datasets.Version]``): if specified, the module will be loaded from the datasets repository
823
+ at this version. By default it is set to the local version of the lib. Specifying a version that is different from
824
+ your local version of the lib might cause compatibility issues.
825
+ script_version:
826
+ .. deprecated:: 1.13
827
+ 'script_version' was renamed to 'revision' in version 1.13 and will be removed in 1.15.
828
+
829
+ Returns:
830
+ `datasets.Metric`
831
+ """
832
+ if script_version != "deprecated":
833
+ warnings.warn(
834
+ "'script_version' was renamed to 'revision' in version 1.13 and will be removed in 1.15.", FutureWarning
835
+ )
836
+ revision = script_version
837
+ module_path, _ = prepare_module(
838
+ path,
839
+ revision=revision,
840
+ download_config=download_config,
841
+ download_mode=download_mode,
842
+ dataset=False,
843
+ )
844
+ metric_cls = import_main_class(module_path, dataset=False)
845
+ metric = metric_cls(
846
+ config_name=config_name,
847
+ process_id=process_id,
848
+ num_process=num_process,
849
+ cache_dir=cache_dir,
850
+ keep_in_memory=keep_in_memory,
851
+ experiment_id=experiment_id,
852
+ **metric_init_kwargs,
853
+ )
854
+
855
+ # Download and prepare resources for the metric
856
+ metric.download_and_prepare(download_config=download_config)
857
+
858
+ return metric
859
+
860
+
861
+
862
+
863
+ [DOCS]
864
+ def load_dataset_builder(
865
+ path: str,
866
+ name: Optional[str] = None,
867
+ data_dir: Optional[str] = None,
868
+ data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
869
+ cache_dir: Optional[str] = None,
870
+ features: Optional[Features] = None,
871
+ download_config: Optional[DownloadConfig] = None,
872
+ download_mode: Optional[GenerateMode] = None,
873
+ revision: Optional[Union[str, Version]] = None,
874
+ use_auth_token: Optional[Union[bool, str]] = None,
875
+ script_version="deprecated",
876
+ **config_kwargs,
877
+ ) -> DatasetBuilder:
878
+ """Load a builder for the dataset. A dataset builder can be used to inspect general information that is required to build a dataset (cache directory, config, dataset info, etc.)
879
+ without downloading the dataset itself.
880
+
881
+ This method will download and import the dataset loading script from ``path`` if it's not already cached inside the library.
882
+
883
+ Args:
884
+
885
+ path (:obj:`str`): Path or name of the dataset.
886
+ Depending on ``path``, the dataset builder that is returned id either generic dataset builder (csv, json, text etc.) or a dataset builder defined defined a dataset script (a python file).
887
+
888
+ For local datasets:
889
+
890
+ - if ``path`` is a local directory (but doesn't contain a dataset script)
891
+ -> load a generic dataset builder (csv, json, text etc.) based on the content of the directory
892
+ e.g. ``'./path/to/directory/with/my/csv/data'``.
893
+ - if ``path`` is a local dataset script or a directory containing a local dataset script (if the script has the same name as the directory):
894
+ -> load the dataset builder from the dataset script
895
+ e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``.
896
+
897
+ For datasets on the Hugging Face Hub (list all available datasets and ids with ``datasets.list_datasets()``)
898
+
899
+ - if ``path`` is a canonical dataset on the HF Hub (ex: `glue`, `squad`)
900
+ -> load the dataset builder from the dataset script in the github repository at huggingface/datasets
901
+ e.g. ``'squad'`` or ``'glue'``.
902
+ - if ``path`` is a dataset repository on the HF hub (without a dataset script)
903
+ -> load a generic dataset builder (csv, text etc.) based on the content of the repository
904
+ e.g. ``'username/dataset_name'``, a dataset repository on the HF hub containing your data files.
905
+ - if ``path`` is a dataset repository on the HF hub with a dataset script (if the script has the same name as the directory)
906
+ -> load the dataset builder from the dataset script in the dataset repository
907
+ e.g. ``'username/dataset_name'``, a dataset repository on the HF hub containing a dataset script `'dataset_name.py'`.
908
+
909
+
910
+ name (:obj:`str`, optional): Defining the name of the dataset configuration.
911
+ data_dir (:obj:`str`, optional): Defining the data_dir of the dataset configuration.
912
+ data_files (:obj:`str` or :obj:`Sequence` or :obj:`Mapping`, optional): Path(s) to source data file(s).
913
+ cache_dir (:obj:`str`, optional): Directory to read/write data. Defaults to "~/.cache/huggingface/datasets".
914
+ features (:class:`Features`, optional): Set the features type to use for this dataset.
915
+ download_config (:class:`~utils.DownloadConfig`, optional): Specific download configuration parameters.
916
+ download_mode (:class:`GenerateMode`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode.
917
+ revision (:class:`~utils.Version` or :obj:`str`, optional): Version of the dataset script to load:
918
+
919
+ - For canonical datasets in the `huggingface/datasets` library like "squad", the default version of the module is the local version of the lib.
920
+ You can specify a different version from your local version of the lib (e.g. "master" or "1.2.0") but it might cause compatibility issues.
921
+ - For community provided datasets like "lhoestq/squad" that have their own git repository on the Datasets Hub, the default version "main" corresponds to the "main" branch.
922
+ You can specify a different version that the default "main" by using a commit sha or a git tag of the dataset repository.
923
+ use_auth_token (``str`` or ``bool``, optional): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
924
+ If True, will get token from `"~/.huggingface"`.
925
+ script_version:
926
+ .. deprecated:: 1.13
927
+ 'script_version' was renamed to 'revision' in version 1.13 and will be removed in 1.15.
928
+
929
+ Returns:
930
+ :class:`DatasetBuilder`
931
+
932
+ """
933
+ if script_version != "deprecated":
934
+ warnings.warn(
935
+ "'script_version' was renamed to 'revision' in version 1.13 and will be removed in 1.15.", FutureWarning
936
+ )
937
+ revision = script_version
938
+ # Download/copy dataset processing script
939
+ module_path, hash, base_path = prepare_module(
940
+ path,
941
+ revision=revision,
942
+ download_config=download_config,
943
+ download_mode=download_mode,
944
+ dataset=True,
945
+ return_associated_base_path=True,
946
+ use_auth_token=use_auth_token,
947
+ data_files=data_files,
948
+ )
949
+
950
+ # Get dataset builder class from the processing script
951
+ builder_cls = import_main_class(module_path, dataset=True)
952
+
953
+ # For packaged builder used to load data from a dataset repository or dataset directory (no dataset script)
954
+ if module_path.startswith("datasets.") and path not in _PACKAGED_DATASETS_MODULES:
955
+ # Add a nice name to the configuratiom
956
+ if name is None:
957
+ name = path.split("/")[-1].split(os.sep)[-1]
958
+ # Resolve the data files
959
+ allowed_extensions = [
960
+ extension
961
+ for extension in _EXTENSION_TO_MODULE
962
+ if _EXTENSION_TO_MODULE[extension] == camelcase_to_snakecase(builder_cls.__name__)
963
+ ]
964
+ data_files = data_files if data_files is not None else "*"
965
+ if base_path.startswith(config.HF_ENDPOINT):
966
+ dataset_info = HfApi(config.HF_ENDPOINT).dataset_info(path, revision=revision, token=use_auth_token)
967
+ data_files = _resolve_data_files_in_dataset_repository(
968
+ dataset_info, data_files, allowed_extensions=allowed_extensions
969
+ )
970
+ else: # local dir
971
+ data_files = _resolve_data_files_locally_or_by_urls(
972
+ path, data_files, allowed_extensions=allowed_extensions
973
+ )
974
+ elif path in _PACKAGED_DATASETS_MODULES:
975
+ if data_files is None:
976
+ error_msg = f"Please specify the data files to load for the {path} dataset builder."
977
+ example_extensions = [
978
+ extension for extension in _EXTENSION_TO_MODULE if _EXTENSION_TO_MODULE[extension] == path
979
+ ]
980
+ if example_extensions:
981
+ error_msg += f'\nFor example `data_files={{"train": "path/to/data/train/*.{example_extensions[0]}"}}`'
982
+ raise ValueError(error_msg)
983
+ data_files = _resolve_data_files_locally_or_by_urls(".", data_files)
984
+
985
+ # Instantiate the dataset builder
986
+ builder_instance: DatasetBuilder = builder_cls(
987
+ cache_dir=cache_dir,
988
+ name=name,
989
+ data_dir=data_dir,
990
+ data_files=data_files,
991
+ hash=hash,
992
+ base_path=base_path,
993
+ features=features,
994
+ use_auth_token=use_auth_token,
995
+ **config_kwargs,
996
+ )
997
+
998
+ return builder_instance
999
+
1000
+
1001
+
1002
+
1003
+ [DOCS]
1004
+ def load_dataset(
1005
+ path: str,
1006
+ name: Optional[str] = None,
1007
+ data_dir: Optional[str] = None,
1008
+ data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]] = None,
1009
+ split: Optional[Union[str, Split]] = None,
1010
+ cache_dir: Optional[str] = None,
1011
+ features: Optional[Features] = None,
1012
+ download_config: Optional[DownloadConfig] = None,
1013
+ download_mode: Optional[GenerateMode] = None,
1014
+ ignore_verifications: bool = False,
1015
+ keep_in_memory: Optional[bool] = None,
1016
+ save_infos: bool = False,
1017
+ revision: Optional[Union[str, Version]] = None,
1018
+ use_auth_token: Optional[Union[bool, str]] = None,
1019
+ task: Optional[Union[str, TaskTemplate]] = None,
1020
+ streaming: bool = False,
1021
+ script_version="deprecated",
1022
+ **config_kwargs,
1023
+ ) -> Union[DatasetDict, Dataset, IterableDatasetDict, IterableDataset]:
1024
+ """Load a dataset.
1025
+
1026
+ This method does the following under the hood:
1027
+
1028
+ 1. Download and import in the library the dataset loading script from ``path`` if it's not already cached inside the library.
1029
+
1030
+ Processing scripts are small python scripts that define the citation, info and format of the dataset,
1031
+ contain the URL to the original data files and the code to load examples from the original data files.
1032
+
1033
+ You can find some of the scripts here: https://github.com/huggingface/datasets/datasets
1034
+ and easily upload yours to share them using the CLI ``huggingface-cli``.
1035
+ You can find the complete list of datasets in the Datasets Hub at https://huggingface.co/datasets
1036
+
1037
+ 2. Run the dataset loading script which will:
1038
+
1039
+ * Download the dataset file from the original URL (see the script) if it's not already downloaded and cached.
1040
+ * Process and cache the dataset in typed Arrow tables for caching.
1041
+
1042
+ Arrow table are arbitrarily long, typed tables which can store nested objects and be mapped to numpy/pandas/python standard types.
1043
+ They can be directly accessed from drive, loaded in RAM or even streamed over the web.
1044
+
1045
+ 3. Return a dataset built from the requested splits in ``split`` (default: all).
1046
+
1047
+ It also allows to load a dataset from a local directory or a dataset repository on the Hugging Face Hub without dataset script.
1048
+ In this case, it automatically loads all the data files from the directory or the dataset repository.
1049
+
1050
+ Args:
1051
+
1052
+ path (:obj:`str`): Path or name of the dataset.
1053
+ Depending on ``path``, the dataset builder that is returned id either generic dataset builder (csv, json, text etc.) or a dataset builder defined defined a dataset script (a python file).
1054
+
1055
+ For local datasets:
1056
+
1057
+ - if ``path`` is a local directory (but doesn't contain a dataset script)
1058
+ -> load a generic dataset builder (csv, json, text etc.) based on the content of the directory
1059
+ e.g. ``'./path/to/directory/with/my/csv/data'``.
1060
+ - if ``path`` is a local dataset script or a directory containing a local dataset script (if the script has the same name as the directory):
1061
+ -> load the dataset builder from the dataset script
1062
+ e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``.
1063
+
1064
+ For datasets on the Hugging Face Hub (list all available datasets and ids with ``datasets.list_datasets()``)
1065
+
1066
+ - if ``path`` is a canonical dataset on the HF Hub (ex: `glue`, `squad`)
1067
+ -> load the dataset builder from the dataset script in the github repository at huggingface/datasets
1068
+ e.g. ``'squad'`` or ``'glue'``.
1069
+ - if ``path`` is a dataset repository on the HF hub (without a dataset script)
1070
+ -> load a generic dataset builder (csv, text etc.) based on the content of the repository
1071
+ e.g. ``'username/dataset_name'``, a dataset repository on the HF hub containing your data files.
1072
+ - if ``path`` is a dataset repository on the HF hub with a dataset script (if the script has the same name as the directory)
1073
+ -> load the dataset builder from the dataset script in the dataset repository
1074
+ e.g. ``'username/dataset_name'``, a dataset repository on the HF hub containing a dataset script `'dataset_name.py'`.
1075
+
1076
+ name (:obj:`str`, optional): Defining the name of the dataset configuration.
1077
+ data_dir (:obj:`str`, optional): Defining the data_dir of the dataset configuration.
1078
+ data_files (:obj:`str` or :obj:`Sequence` or :obj:`Mapping`, optional): Path(s) to source data file(s).
1079
+ split (:class:`Split` or :obj:`str`): Which split of the data to load.
1080
+ If None, will return a `dict` with all splits (typically `datasets.Split.TRAIN` and `datasets.Split.TEST`).
1081
+ If given, will return a single Dataset.
1082
+ Splits can be combined and specified like in tensorflow-datasets.
1083
+ cache_dir (:obj:`str`, optional): Directory to read/write data. Defaults to "~/.cache/huggingface/datasets".
1084
+ features (:class:`Features`, optional): Set the features type to use for this dataset.
1085
+ download_config (:class:`~utils.DownloadConfig`, optional): Specific download configuration parameters.
1086
+ download_mode (:class:`GenerateMode`, default ``REUSE_DATASET_IF_EXISTS``): Download/generate mode.
1087
+ ignore_verifications (:obj:`bool`, default ``False``): Ignore the verifications of the downloaded/processed dataset information (checksums/size/splits/...).
1088
+ keep_in_memory (:obj:`bool`, default ``None``): Whether to copy the dataset in-memory. If `None`, the dataset
1089
+ will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to
1090
+ nonzero. See more details in the :ref:`load_dataset_enhancing_performance` section.
1091
+ save_infos (:obj:`bool`, default ``False``): Save the dataset information (checksums/size/splits/...).
1092
+ revision (:class:`~utils.Version` or :obj:`str`, optional): Version of the dataset script to load:
1093
+
1094
+ - For canonical datasets in the `huggingface/datasets` library like "squad", the default version of the module is the local version of the lib.
1095
+ You can specify a different version from your local version of the lib (e.g. "master" or "1.2.0") but it might cause compatibility issues.
1096
+ - For community provided datasets like "lhoestq/squad" that have their own git repository on the Datasets Hub, the default version "main" corresponds to the "main" branch.
1097
+ You can specify a different version that the default "main" by using a commit sha or a git tag of the dataset repository.
1098
+ use_auth_token (``str`` or ``bool``, optional): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
1099
+ If True, will get token from `"~/.huggingface"`.
1100
+ task (``str``): The task to prepare the dataset for during training and evaluation. Casts the dataset's :class:`Features` to standardized column names and types as detailed in :py:mod:`datasets.tasks`.
1101
+ streaming (``bool``, default ``False``): If set to True, don't download the data files. Instead, it streams the data progressively while
1102
+ iterating on the dataset. An IterableDataset or IterableDatasetDict is returned instead in this case.
1103
+
1104
+ Note that streaming works for datasets that use data formats that support being iterated over like txt, csv, jsonl for example.
1105
+ Json files may be downloaded completely. Also streaming from remote zip or gzip files is supported but other compressed formats
1106
+ like rar and xz are not yet supported. The tgz format doesn't allow streaming.
1107
+ script_version:
1108
+ .. deprecated:: 1.13
1109
+ 'script_version' was renamed to 'revision' in version 1.13 and will be removed in 1.15.
1110
+ **config_kwargs: Keyword arguments to be passed to the :class:`BuilderConfig` and used in the :class:`DatasetBuilder`.
1111
+
1112
+ Returns:
1113
+ :class:`Dataset` or :class:`DatasetDict`:
1114
+ - if `split` is not None: the dataset requested,
1115
+ - if `split` is None, a ``datasets.DatasetDict`` with each split.
1116
+
1117
+ or :class:`IterableDataset` or :class:`IterableDatasetDict`: if streaming=True
1118
+
1119
+ - if `split` is not None: the dataset requested,
1120
+ - if `split` is None, a ``datasets.streaming.IterableDatasetDict`` with each split.
1121
+
1122
+ """
1123
+ if script_version != "deprecated":
1124
+ warnings.warn(
1125
+ "'script_version' was renamed to 'revision' in version 1.13 and will be removed in 1.15.", FutureWarning
1126
+ )
1127
+ revision = script_version
1128
+ ignore_verifications = ignore_verifications or save_infos
1129
+
1130
+ # Create a dataset builder
1131
+ builder_instance = load_dataset_builder(
1132
+ path=path,
1133
+ name=name,
1134
+ data_dir=data_dir,
1135
+ data_files=data_files,
1136
+ cache_dir=cache_dir,
1137
+ features=features,
1138
+ download_config=download_config,
1139
+ download_mode=download_mode,
1140
+ revision=revision,
1141
+ use_auth_token=use_auth_token,
1142
+ **config_kwargs,
1143
+ )
1144
+
1145
+ # Return iterable dataset in case of streaming
1146
+ if streaming:
1147
+ # this extends the open and os.path.join functions for data streaming
1148
+ extend_module_for_streaming(builder_instance.__module__, use_auth_token=use_auth_token)
1149
+ return builder_instance.as_streaming_dataset(
1150
+ split=split,
1151
+ use_auth_token=use_auth_token,
1152
+ )
1153
+
1154
+ # Some datasets are already processed on the HF google storage
1155
+ # Don't try downloading from google storage for the packaged datasets as text, json, csv or pandas
1156
+ try_from_hf_gcs = path not in _PACKAGED_DATASETS_MODULES
1157
+
1158
+ # Download and prepare data
1159
+ builder_instance.download_and_prepare(
1160
+ download_config=download_config,
1161
+ download_mode=download_mode,
1162
+ ignore_verifications=ignore_verifications,
1163
+ try_from_hf_gcs=try_from_hf_gcs,
1164
+ use_auth_token=use_auth_token,
1165
+ )
1166
+
1167
+ # Build dataset for splits
1168
+ keep_in_memory = (
1169
+ keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size)
1170
+ )
1171
+ ds = builder_instance.as_dataset(split=split, ignore_verifications=ignore_verifications, in_memory=keep_in_memory)
1172
+ # Rename and cast features to match task schema
1173
+ if task is not None:
1174
+ ds = ds.prepare_for_task(task)
1175
+ if save_infos:
1176
+ builder_instance._save_infos()
1177
+
1178
+ return ds
1179
+
1180
+
1181
+
1182
+
1183
+ [DOCS]
1184
+ def load_from_disk(dataset_path: str, fs=None, keep_in_memory: Optional[bool] = None) -> Union[Dataset, DatasetDict]:
1185
+ """
1186
+ Loads a dataset that was previously saved using :meth:`Dataset.save_to_disk` from a dataset directory, or
1187
+ from a filesystem using either :class:`datasets.filesystems.S3FileSystem` or any implementation of
1188
+ ``fsspec.spec.AbstractFileSystem``.
1189
+
1190
+ Args:
1191
+ dataset_path (:obj:`str`): Path (e.g. `"dataset/train"`) or remote URI (e.g.
1192
+ `"s3://my-bucket/dataset/train"`) of the Dataset or DatasetDict directory where the dataset will be
1193
+ loaded from.
1194
+ fs (:class:`~filesystems.S3FileSystem` or ``fsspec.spec.AbstractFileSystem``, optional, default ``None``):
1195
+ Instance of of the remote filesystem used to download the files from.
1196
+ keep_in_memory (:obj:`bool`, default ``None``): Whether to copy the dataset in-memory. If `None`, the dataset
1197
+ will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to
1198
+ nonzero. See more details in the :ref:`load_dataset_enhancing_performance` section.
1199
+
1200
+ Returns:
1201
+ :class:`Dataset` or :class:`DatasetDict`:
1202
+ - If `dataset_path` is a path of a dataset directory: the dataset requested.
1203
+ - If `dataset_path` is a path of a dataset dict directory: a ``datasets.DatasetDict`` with each split.
1204
+ """
1205
+ # gets filesystem from dataset, either s3:// or file:// and adjusted dataset_path
1206
+ if is_remote_filesystem(fs):
1207
+ dest_dataset_path = extract_path_from_uri(dataset_path)
1208
+ else:
1209
+ fs = fsspec.filesystem("file")
1210
+ dest_dataset_path = dataset_path
1211
+
1212
+ if not fs.exists(dest_dataset_path):
1213
+ raise FileNotFoundError("Directory {} not found".format(dataset_path))
1214
+ if fs.isfile(Path(dest_dataset_path, config.DATASET_INFO_FILENAME).as_posix()):
1215
+ return Dataset.load_from_disk(dataset_path, fs, keep_in_memory=keep_in_memory)
1216
+ elif fs.isfile(Path(dest_dataset_path, config.DATASETDICT_JSON_FILENAME).as_posix()):
1217
+ return DatasetDict.load_from_disk(dataset_path, fs, keep_in_memory=keep_in_memory)
1218
+ else:
1219
+ raise FileNotFoundError(
1220
+ "Directory {} is neither a dataset directory nor a dataset dict directory.".format(dataset_path)
1221
+ )