nnilayy commited on
Commit
3ba4dea
·
verified ·
1 Parent(s): c3d41c8

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. lib/python3.10/site-packages/audioread-3.0.1.dist-info/INSTALLER +1 -0
  3. lib/python3.10/site-packages/av/attachments/stream.cpython-310-x86_64-linux-gnu.so +3 -0
  4. lib/python3.10/site-packages/av/codec/codec.cpython-310-x86_64-linux-gnu.so +3 -0
  5. lib/python3.10/site-packages/datasets/commands/__init__.py +13 -0
  6. lib/python3.10/site-packages/datasets/commands/convert.py +195 -0
  7. lib/python3.10/site-packages/datasets/commands/datasets_cli.py +43 -0
  8. lib/python3.10/site-packages/datasets/commands/dummy_data.py +468 -0
  9. lib/python3.10/site-packages/datasets/commands/env.py +39 -0
  10. lib/python3.10/site-packages/datasets/commands/run_beam.py +165 -0
  11. lib/python3.10/site-packages/datasets/commands/test.py +194 -0
  12. lib/python3.10/site-packages/datasets/io/__init__.py +0 -0
  13. lib/python3.10/site-packages/datasets/io/abc.py +53 -0
  14. lib/python3.10/site-packages/datasets/io/csv.py +144 -0
  15. lib/python3.10/site-packages/datasets/io/generator.py +58 -0
  16. lib/python3.10/site-packages/datasets/io/json.py +169 -0
  17. lib/python3.10/site-packages/datasets/io/parquet.py +157 -0
  18. lib/python3.10/site-packages/datasets/io/spark.py +57 -0
  19. lib/python3.10/site-packages/datasets/io/sql.py +127 -0
  20. lib/python3.10/site-packages/datasets/io/text.py +61 -0
  21. lib/python3.10/site-packages/datasets/packaged_modules/arrow/__init__.py +0 -0
  22. lib/python3.10/site-packages/datasets/packaged_modules/arrow/arrow.py +73 -0
  23. lib/python3.10/site-packages/datasets/packaged_modules/csv/__init__.py +0 -0
  24. lib/python3.10/site-packages/datasets/packaged_modules/csv/csv.py +195 -0
  25. lib/python3.10/site-packages/datasets/packaged_modules/generator/__init__.py +0 -0
  26. lib/python3.10/site-packages/datasets/packaged_modules/generator/generator.py +31 -0
  27. lib/python3.10/site-packages/datasets/packaged_modules/json/__init__.py +0 -0
  28. lib/python3.10/site-packages/datasets/packaged_modules/json/json.py +171 -0
  29. lib/python3.10/site-packages/datasets/packaged_modules/pandas/__init__.py +0 -0
  30. lib/python3.10/site-packages/datasets/packaged_modules/parquet/__init__.py +0 -0
  31. lib/python3.10/site-packages/datasets/packaged_modules/spark/__init__.py +0 -0
  32. lib/python3.10/site-packages/datasets/packaged_modules/sql/__init__.py +0 -0
  33. lib/python3.10/site-packages/datasets/packaged_modules/sql/sql.py +118 -0
  34. lib/python3.10/site-packages/dlinfo-2.0.0.dist-info/INSTALLER +1 -0
  35. lib/python3.10/site-packages/dlinfo-2.0.0.dist-info/LICENSE +7 -0
  36. lib/python3.10/site-packages/dlinfo-2.0.0.dist-info/METADATA +43 -0
  37. lib/python3.10/site-packages/dlinfo-2.0.0.dist-info/RECORD +10 -0
  38. lib/python3.10/site-packages/dlinfo-2.0.0.dist-info/REQUESTED +0 -0
  39. lib/python3.10/site-packages/dlinfo-2.0.0.dist-info/WHEEL +5 -0
  40. lib/python3.10/site-packages/dlinfo-2.0.0.dist-info/top_level.txt +1 -0
  41. lib/python3.10/site-packages/importlib_resources/__init__.py +40 -0
  42. lib/python3.10/site-packages/importlib_resources/_adapters.py +168 -0
  43. lib/python3.10/site-packages/importlib_resources/_common.py +211 -0
  44. lib/python3.10/site-packages/importlib_resources/_functional.py +84 -0
  45. lib/python3.10/site-packages/importlib_resources/_itertools.py +38 -0
  46. lib/python3.10/site-packages/importlib_resources/abc.py +193 -0
  47. lib/python3.10/site-packages/importlib_resources/py.typed +0 -0
  48. lib/python3.10/site-packages/importlib_resources/readers.py +202 -0
  49. lib/python3.10/site-packages/importlib_resources/simple.py +106 -0
  50. lib/python3.10/site-packages/importlib_resources/tests/test_contents.py +39 -0
.gitattributes CHANGED
@@ -114,3 +114,5 @@ lib/python3.10/site-packages/av/filter/context.cpython-310-x86_64-linux-gnu.so f
114
  lib/python3.10/site-packages/av/filter/loudnorm.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
115
  lib/python3.10/site-packages/av/subtitles/stream.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
116
  lib/python3.10/site-packages/av/subtitles/subtitle.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
 
114
  lib/python3.10/site-packages/av/filter/loudnorm.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
115
  lib/python3.10/site-packages/av/subtitles/stream.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
116
  lib/python3.10/site-packages/av/subtitles/subtitle.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
117
+ lib/python3.10/site-packages/av/attachments/stream.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
118
+ lib/python3.10/site-packages/av/codec/codec.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
lib/python3.10/site-packages/audioread-3.0.1.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ uv
lib/python3.10/site-packages/av/attachments/stream.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:553f03e1929b382b3f750d81c000d626ac95fad1a09255e1d22dee6113c9e679
3
+ size 338801
lib/python3.10/site-packages/av/codec/codec.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cb8243da193ae6f4c48be60f2627606683549ee0df2f61548f6b93e0b372f32
3
+ size 889001
lib/python3.10/site-packages/datasets/commands/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC, abstractmethod
2
+ from argparse import ArgumentParser
3
+
4
+
5
+ class BaseDatasetsCLICommand(ABC):
6
+ @staticmethod
7
+ @abstractmethod
8
+ def register_subcommand(parser: ArgumentParser):
9
+ raise NotImplementedError()
10
+
11
+ @abstractmethod
12
+ def run(self):
13
+ raise NotImplementedError()
lib/python3.10/site-packages/datasets/commands/convert.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import shutil
4
+ from argparse import ArgumentParser, Namespace
5
+
6
+ from datasets.commands import BaseDatasetsCLICommand
7
+ from datasets.utils.logging import get_logger
8
+
9
+
10
+ HIGHLIGHT_MESSAGE_PRE = """<<<<<<< This should probably be modified because it mentions: """
11
+
12
+ HIGHLIGHT_MESSAGE_POST = """=======
13
+ >>>>>>>
14
+ """
15
+
16
+ TO_HIGHLIGHT = [
17
+ "TextEncoderConfig",
18
+ "ByteTextEncoder",
19
+ "SubwordTextEncoder",
20
+ "encoder_config",
21
+ "maybe_build_from_corpus",
22
+ "manual_dir",
23
+ ]
24
+
25
+ TO_CONVERT = [
26
+ # (pattern, replacement)
27
+ # Order is important here for some replacements
28
+ (r"tfds\.core", r"datasets"),
29
+ (r"tf\.io\.gfile\.GFile", r"open"),
30
+ (r"tf\.([\w\d]+)", r"datasets.Value('\1')"),
31
+ (r"tfds\.features\.Text\(\)", r"datasets.Value('string')"),
32
+ (r"tfds\.features\.Text\(", r"datasets.Value('string'),"),
33
+ (r"features\s*=\s*tfds.features.FeaturesDict\(", r"features=datasets.Features("),
34
+ (r"tfds\.features\.FeaturesDict\(", r"dict("),
35
+ (r"The TensorFlow Datasets Authors", r"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
36
+ (r"tfds\.", r"datasets."),
37
+ (r"dl_manager\.manual_dir", r"self.config.data_dir"),
38
+ (r"self\.builder_config", r"self.config"),
39
+ ]
40
+
41
+
42
+ def convert_command_factory(args: Namespace):
43
+ """
44
+ Factory function used to convert a model TF 1.0 checkpoint in a PyTorch checkpoint.
45
+
46
+ Returns: ConvertCommand
47
+ """
48
+ return ConvertCommand(args.tfds_path, args.datasets_directory)
49
+
50
+
51
+ class ConvertCommand(BaseDatasetsCLICommand):
52
+ @staticmethod
53
+ def register_subcommand(parser: ArgumentParser):
54
+ """
55
+ Register this command to argparse so it's available for the datasets-cli
56
+
57
+ Args:
58
+ parser: Root parser to register command-specific arguments
59
+ """
60
+ train_parser = parser.add_parser(
61
+ "convert",
62
+ help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.",
63
+ )
64
+ train_parser.add_argument(
65
+ "--tfds_path",
66
+ type=str,
67
+ required=True,
68
+ help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.",
69
+ )
70
+ train_parser.add_argument(
71
+ "--datasets_directory", type=str, required=True, help="Path to the HuggingFace Datasets folder."
72
+ )
73
+ train_parser.set_defaults(func=convert_command_factory)
74
+
75
+ def __init__(self, tfds_path: str, datasets_directory: str, *args):
76
+ self._logger = get_logger("datasets-cli/converting")
77
+
78
+ self._tfds_path = tfds_path
79
+ self._datasets_directory = datasets_directory
80
+
81
+ def run(self):
82
+ if os.path.isdir(self._tfds_path):
83
+ abs_tfds_path = os.path.abspath(self._tfds_path)
84
+ elif os.path.isfile(self._tfds_path):
85
+ abs_tfds_path = os.path.dirname(self._tfds_path)
86
+ else:
87
+ raise ValueError("--tfds_path is neither a directory nor a file. Please check path.")
88
+
89
+ abs_datasets_path = os.path.abspath(self._datasets_directory)
90
+
91
+ self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}")
92
+
93
+ utils_files = []
94
+ with_manual_update = []
95
+ imports_to_builder_map = {}
96
+
97
+ if os.path.isdir(self._tfds_path):
98
+ file_names = os.listdir(abs_tfds_path)
99
+ else:
100
+ file_names = [os.path.basename(self._tfds_path)]
101
+
102
+ for f_name in file_names:
103
+ self._logger.info(f"Looking at file {f_name}")
104
+ input_file = os.path.join(abs_tfds_path, f_name)
105
+ output_file = os.path.join(abs_datasets_path, f_name)
106
+
107
+ if not os.path.isfile(input_file) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
108
+ self._logger.info("Skipping file")
109
+ continue
110
+
111
+ with open(input_file, encoding="utf-8") as f:
112
+ lines = f.readlines()
113
+
114
+ out_lines = []
115
+ is_builder = False
116
+ needs_manual_update = False
117
+ tfds_imports = []
118
+ for line in lines:
119
+ out_line = line
120
+
121
+ # Convert imports
122
+ if "import tensorflow.compat.v2 as tf" in out_line:
123
+ continue
124
+ elif "@tfds.core" in out_line:
125
+ continue
126
+ elif "builder=self" in out_line:
127
+ continue
128
+ elif "import tensorflow_datasets.public_api as tfds" in out_line:
129
+ out_line = "import datasets\n"
130
+ elif "import tensorflow" in out_line:
131
+ # order is important here
132
+ out_line = ""
133
+ continue
134
+ elif "from absl import logging" in out_line:
135
+ out_line = "from datasets import logging\n"
136
+ elif "getLogger" in out_line:
137
+ out_line = out_line.replace("getLogger", "get_logger")
138
+ elif any(expression in out_line for expression in TO_HIGHLIGHT):
139
+ needs_manual_update = True
140
+ to_remove = list(filter(lambda e: e in out_line, TO_HIGHLIGHT))
141
+ out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(to_remove) + "\n")
142
+ out_lines.append(out_line)
143
+ out_lines.append(HIGHLIGHT_MESSAGE_POST)
144
+ continue
145
+ else:
146
+ for pattern, replacement in TO_CONVERT:
147
+ out_line = re.sub(pattern, replacement, out_line)
148
+
149
+ # Take care of saving utilities (to later move them together with main script)
150
+ if "tensorflow_datasets" in out_line:
151
+ match = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)", out_line)
152
+ tfds_imports.extend(imp.strip() for imp in match.group(1).split(","))
153
+ out_line = "from . import " + match.group(1)
154
+
155
+ # Check we have not forget anything
156
+ if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
157
+ raise ValueError(f"Error converting {out_line.strip()}")
158
+
159
+ if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
160
+ is_builder = True
161
+ out_lines.append(out_line)
162
+
163
+ if is_builder or "wmt" in f_name:
164
+ # We create a new directory for each dataset
165
+ dir_name = f_name.replace(".py", "")
166
+ output_dir = os.path.join(abs_datasets_path, dir_name)
167
+ output_file = os.path.join(output_dir, f_name)
168
+ os.makedirs(output_dir, exist_ok=True)
169
+ self._logger.info(f"Adding directory {output_dir}")
170
+ imports_to_builder_map.update({imp: output_dir for imp in tfds_imports})
171
+ else:
172
+ # Utilities will be moved at the end
173
+ utils_files.append(output_file)
174
+
175
+ if needs_manual_update:
176
+ with_manual_update.append(output_file)
177
+
178
+ with open(output_file, "w", encoding="utf-8") as f:
179
+ f.writelines(out_lines)
180
+ self._logger.info(f"Converted in {output_file}")
181
+
182
+ for utils_file in utils_files:
183
+ try:
184
+ f_name = os.path.basename(utils_file)
185
+ dest_folder = imports_to_builder_map[f_name.replace(".py", "")]
186
+ self._logger.info(f"Moving {dest_folder} to {utils_file}")
187
+ shutil.copy(utils_file, dest_folder)
188
+ except KeyError:
189
+ self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually.")
190
+
191
+ if with_manual_update:
192
+ for file_path in with_manual_update:
193
+ self._logger.warning(
194
+ f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'."
195
+ )
lib/python3.10/site-packages/datasets/commands/datasets_cli.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ from argparse import ArgumentParser
3
+
4
+ from datasets.commands.convert import ConvertCommand
5
+ from datasets.commands.dummy_data import DummyDataCommand
6
+ from datasets.commands.env import EnvironmentCommand
7
+ from datasets.commands.run_beam import RunBeamCommand
8
+ from datasets.commands.test import TestCommand
9
+ from datasets.utils.logging import set_verbosity_info
10
+
11
+
12
+ def parse_unknown_args(unknown_args):
13
+ return {key.lstrip("-"): value for key, value in zip(unknown_args[::2], unknown_args[1::2])}
14
+
15
+
16
+ def main():
17
+ parser = ArgumentParser(
18
+ "HuggingFace Datasets CLI tool", usage="datasets-cli <command> [<args>]", allow_abbrev=False
19
+ )
20
+ commands_parser = parser.add_subparsers(help="datasets-cli command helpers")
21
+ set_verbosity_info()
22
+
23
+ # Register commands
24
+ ConvertCommand.register_subcommand(commands_parser)
25
+ EnvironmentCommand.register_subcommand(commands_parser)
26
+ TestCommand.register_subcommand(commands_parser)
27
+ RunBeamCommand.register_subcommand(commands_parser)
28
+ DummyDataCommand.register_subcommand(commands_parser)
29
+
30
+ # Parse args
31
+ args, unknown_args = parser.parse_known_args()
32
+ if not hasattr(args, "func"):
33
+ parser.print_help()
34
+ exit(1)
35
+ kwargs = parse_unknown_args(unknown_args)
36
+
37
+ # Run
38
+ service = args.func(args, **kwargs)
39
+ service.run()
40
+
41
+
42
+ if __name__ == "__main__":
43
+ main()
lib/python3.10/site-packages/datasets/commands/dummy_data.py ADDED
@@ -0,0 +1,468 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import fnmatch
2
+ import json
3
+ import os
4
+ import shutil
5
+ import tempfile
6
+ import xml.etree.ElementTree as ET
7
+ from argparse import ArgumentParser
8
+ from pathlib import Path
9
+ from typing import Optional
10
+
11
+ from datasets import config
12
+ from datasets.commands import BaseDatasetsCLICommand
13
+ from datasets.download.download_config import DownloadConfig
14
+ from datasets.download.download_manager import DownloadManager
15
+ from datasets.download.mock_download_manager import MockDownloadManager
16
+ from datasets.load import dataset_module_factory, import_main_class
17
+ from datasets.utils.deprecation_utils import deprecated
18
+ from datasets.utils.logging import get_logger, set_verbosity_warning
19
+ from datasets.utils.py_utils import map_nested
20
+
21
+
22
+ logger = get_logger(__name__)
23
+
24
+ DEFAULT_ENCODING = "utf-8"
25
+
26
+
27
+ def dummy_data_command_factory(args):
28
+ return DummyDataCommand(
29
+ args.path_to_dataset,
30
+ args.auto_generate,
31
+ args.n_lines,
32
+ args.json_field,
33
+ args.xml_tag,
34
+ args.match_text_files,
35
+ args.keep_uncompressed,
36
+ args.cache_dir,
37
+ args.encoding,
38
+ )
39
+
40
+
41
+ class DummyDataGeneratorDownloadManager(DownloadManager):
42
+ def __init__(self, mock_download_manager, *args, **kwargs):
43
+ super().__init__(*args, **kwargs)
44
+ self.mock_download_manager = mock_download_manager
45
+ self.downloaded_dummy_paths = []
46
+ self.expected_dummy_paths = []
47
+
48
+ def download(self, url_or_urls):
49
+ output = super().download(url_or_urls)
50
+ dummy_output = self.mock_download_manager.download(url_or_urls)
51
+ map_nested(self.downloaded_dummy_paths.append, output, map_tuple=True)
52
+ map_nested(self.expected_dummy_paths.append, dummy_output, map_tuple=True)
53
+ return output
54
+
55
+ def download_and_extract(self, url_or_urls):
56
+ output = super().extract(super().download(url_or_urls))
57
+ dummy_output = self.mock_download_manager.download(url_or_urls)
58
+ map_nested(self.downloaded_dummy_paths.append, output, map_tuple=True)
59
+ map_nested(self.expected_dummy_paths.append, dummy_output, map_tuple=True)
60
+ return output
61
+
62
+ def auto_generate_dummy_data_folder(
63
+ self,
64
+ n_lines: int = 5,
65
+ json_field: Optional[str] = None,
66
+ xml_tag: Optional[str] = None,
67
+ match_text_files: Optional[str] = None,
68
+ encoding: Optional[str] = None,
69
+ ) -> bool:
70
+ os.makedirs(
71
+ os.path.join(
72
+ self.mock_download_manager.datasets_scripts_dir,
73
+ self.mock_download_manager.dataset_name,
74
+ self.mock_download_manager.dummy_data_folder,
75
+ "dummy_data",
76
+ ),
77
+ exist_ok=True,
78
+ )
79
+ total = 0
80
+ self.mock_download_manager.load_existing_dummy_data = False
81
+ for src_path, relative_dst_path in zip(self.downloaded_dummy_paths, self.expected_dummy_paths):
82
+ dst_path = os.path.join(
83
+ self.mock_download_manager.datasets_scripts_dir,
84
+ self.mock_download_manager.dataset_name,
85
+ self.mock_download_manager.dummy_data_folder,
86
+ relative_dst_path,
87
+ )
88
+ total += self._create_dummy_data(
89
+ src_path,
90
+ dst_path,
91
+ n_lines=n_lines,
92
+ json_field=json_field,
93
+ xml_tag=xml_tag,
94
+ match_text_files=match_text_files,
95
+ encoding=encoding,
96
+ )
97
+ if total == 0:
98
+ logger.error(
99
+ "Dummy data generation failed: no dummy files were created. "
100
+ "Make sure the data files format is supported by the auto-generation."
101
+ )
102
+ return total > 0
103
+
104
+ def _create_dummy_data(
105
+ self,
106
+ src_path: str,
107
+ dst_path: str,
108
+ n_lines: int,
109
+ json_field: Optional[str] = None,
110
+ xml_tag: Optional[str] = None,
111
+ match_text_files: Optional[str] = None,
112
+ encoding: Optional[str] = None,
113
+ ) -> int:
114
+ encoding = encoding or DEFAULT_ENCODING
115
+ if os.path.isfile(src_path):
116
+ logger.debug(f"Trying to generate dummy data file {dst_path}")
117
+ dst_path_extensions = Path(dst_path).suffixes
118
+ line_by_line_extensions = [".txt", ".csv", ".jsonl", ".tsv"]
119
+ is_line_by_line_text_file = any(extension in dst_path_extensions for extension in line_by_line_extensions)
120
+ if match_text_files is not None:
121
+ file_name = os.path.basename(dst_path)
122
+ for pattern in match_text_files.split(","):
123
+ is_line_by_line_text_file |= fnmatch.fnmatch(file_name, pattern)
124
+ # Line by line text file (txt, csv etc.)
125
+ if is_line_by_line_text_file:
126
+ Path(dst_path).parent.mkdir(exist_ok=True, parents=True)
127
+ with open(src_path, encoding=encoding) as src_file:
128
+ with open(dst_path, "w", encoding=encoding) as dst_file:
129
+ first_lines = []
130
+ for i, line in enumerate(src_file):
131
+ if i >= n_lines:
132
+ break
133
+ first_lines.append(line)
134
+ dst_file.write("".join(first_lines).strip())
135
+ return 1
136
+ # json file
137
+ elif ".json" in dst_path_extensions:
138
+ with open(src_path, encoding=encoding) as src_file:
139
+ json_data = json.load(src_file)
140
+ if json_field is not None:
141
+ json_data = json_data[json_field]
142
+ if isinstance(json_data, dict):
143
+ if not all(isinstance(v, list) for v in json_data.values()):
144
+ raise ValueError(
145
+ f"Couldn't parse columns {list(json_data.keys())}. "
146
+ "Maybe specify which json field must be used "
147
+ "to read the data with --json_field <my_field>."
148
+ )
149
+ first_json_data = {k: v[:n_lines] for k, v in json_data.items()}
150
+ else:
151
+ first_json_data = json_data[:n_lines]
152
+ if json_field is not None:
153
+ first_json_data = {json_field: first_json_data}
154
+ Path(dst_path).parent.mkdir(exist_ok=True, parents=True)
155
+ with open(dst_path, "w", encoding=encoding) as dst_file:
156
+ json.dump(first_json_data, dst_file)
157
+ return 1
158
+ # xml file
159
+ elif any(extension in dst_path_extensions for extension in [".xml", ".txm"]):
160
+ if xml_tag is None:
161
+ logger.warning("Found xml file but 'xml_tag' is set to None. Please provide --xml_tag")
162
+ else:
163
+ self._create_xml_dummy_data(src_path, dst_path, xml_tag, n_lines=n_lines, encoding=encoding)
164
+ return 1
165
+ logger.warning(
166
+ f"Couldn't generate dummy file '{dst_path}'. " "Ignore that if this file is not useful for dummy data."
167
+ )
168
+ return 0
169
+ # directory, iterate through all files
170
+ elif os.path.isdir(src_path):
171
+ total = 0
172
+ for path, _, files in os.walk(src_path):
173
+ for name in files:
174
+ if not name.startswith("."): # ignore files like .DS_Store etc.
175
+ src_file_path = os.path.join(path, name)
176
+ dst_file_path = os.path.join(dst_path, Path(src_file_path).relative_to(src_path))
177
+ total += self._create_dummy_data(
178
+ src_file_path,
179
+ dst_file_path,
180
+ n_lines=n_lines,
181
+ json_field=json_field,
182
+ xml_tag=xml_tag,
183
+ match_text_files=match_text_files,
184
+ encoding=encoding,
185
+ )
186
+ return total
187
+
188
+ @staticmethod
189
+ def _create_xml_dummy_data(src_path, dst_path, xml_tag, n_lines=5, encoding=DEFAULT_ENCODING):
190
+ Path(dst_path).parent.mkdir(exist_ok=True, parents=True)
191
+ with open(src_path, encoding=encoding) as src_file:
192
+ n_line = 0
193
+ parents = []
194
+ for event, elem in ET.iterparse(src_file, events=("start", "end")):
195
+ if event == "start":
196
+ parents.append(elem)
197
+ else:
198
+ _ = parents.pop()
199
+ if elem.tag == xml_tag:
200
+ if n_line < n_lines:
201
+ n_line += 1
202
+ else:
203
+ if parents:
204
+ parents[-1].remove(elem)
205
+ ET.ElementTree(element=elem).write(dst_path, encoding=encoding)
206
+
207
+ def compress_autogenerated_dummy_data(self, path_to_dataset):
208
+ root_dir = os.path.join(path_to_dataset, self.mock_download_manager.dummy_data_folder)
209
+ base_name = os.path.join(root_dir, "dummy_data")
210
+ base_dir = "dummy_data"
211
+ logger.info(f"Compressing dummy data folder to '{base_name}.zip'")
212
+ shutil.make_archive(base_name, "zip", root_dir, base_dir)
213
+ shutil.rmtree(base_name)
214
+
215
+
216
+ @deprecated(
217
+ "The `datasets` repository does not host the dataset scripts anymore. Therefore, dummy data is no longer needed to test their loading with CI."
218
+ )
219
+ class DummyDataCommand(BaseDatasetsCLICommand):
220
+ @staticmethod
221
+ def register_subcommand(parser: ArgumentParser):
222
+ test_parser = parser.add_parser("dummy_data", help="Generate dummy data.")
223
+ test_parser.add_argument("--auto_generate", action="store_true", help="Automatically generate dummy data")
224
+ test_parser.add_argument(
225
+ "--n_lines", type=int, default=5, help="Number of lines or samples to keep when auto-generating dummy data"
226
+ )
227
+ test_parser.add_argument(
228
+ "--json_field",
229
+ type=str,
230
+ default=None,
231
+ help="Optional, json field to read the data from when auto-generating dummy data. In the json data files, this field must point to a list of samples as json objects (ex: the 'data' field for squad-like files)",
232
+ )
233
+ test_parser.add_argument(
234
+ "--xml_tag",
235
+ type=str,
236
+ default=None,
237
+ help="Optional, xml tag name of the samples inside the xml files when auto-generating dummy data.",
238
+ )
239
+ test_parser.add_argument(
240
+ "--match_text_files",
241
+ type=str,
242
+ default=None,
243
+ help="Optional, a comma separated list of file patterns that looks for line-by-line text files other than *.txt or *.csv. Example: --match_text_files *.label",
244
+ )
245
+ test_parser.add_argument(
246
+ "--keep_uncompressed",
247
+ action="store_true",
248
+ help="Whether to leave the dummy data folders uncompressed when auto-generating dummy data. Useful for debugging for to do manual adjustements before compressing.",
249
+ )
250
+ test_parser.add_argument(
251
+ "--cache_dir",
252
+ type=str,
253
+ default=None,
254
+ help="Cache directory to download and cache files when auto-generating dummy data",
255
+ )
256
+ test_parser.add_argument(
257
+ "--encoding",
258
+ type=str,
259
+ default=None,
260
+ help=f"Encoding to use when auto-generating dummy data. Defaults to {DEFAULT_ENCODING}",
261
+ )
262
+ test_parser.add_argument("path_to_dataset", type=str, help="Path to the dataset (example: ./datasets/squad)")
263
+ test_parser.set_defaults(func=dummy_data_command_factory)
264
+
265
+ def __init__(
266
+ self,
267
+ path_to_dataset: str,
268
+ auto_generate: bool,
269
+ n_lines: int,
270
+ json_field: Optional[str],
271
+ xml_tag: Optional[str],
272
+ match_text_files: Optional[str],
273
+ keep_uncompressed: bool,
274
+ cache_dir: Optional[str],
275
+ encoding: Optional[str],
276
+ ):
277
+ self._path_to_dataset = path_to_dataset
278
+ if os.path.isdir(path_to_dataset):
279
+ self._dataset_name = path_to_dataset.replace(os.sep, "/").split("/")[-1]
280
+ else:
281
+ self._dataset_name = path_to_dataset.replace(os.sep, "/").split("/")[-2]
282
+ cache_dir = os.path.expanduser(cache_dir or config.HF_DATASETS_CACHE)
283
+ self._auto_generate = auto_generate
284
+ self._n_lines = n_lines
285
+ self._json_field = json_field
286
+ self._xml_tag = xml_tag
287
+ self._match_text_files = match_text_files
288
+ self._keep_uncompressed = keep_uncompressed
289
+ self._cache_dir = cache_dir
290
+ self._encoding = encoding
291
+
292
+ def run(self):
293
+ set_verbosity_warning()
294
+ dataset_module = dataset_module_factory(self._path_to_dataset)
295
+ builder_cls = import_main_class(dataset_module.module_path)
296
+
297
+ # use `None` as config if no configs
298
+ builder_configs = builder_cls.BUILDER_CONFIGS or [None]
299
+ auto_generate_results = []
300
+ with tempfile.TemporaryDirectory() as tmp_dir:
301
+ for builder_config in builder_configs:
302
+ config_name = builder_config.name if builder_config else None
303
+ dataset_builder = builder_cls(config_name=config_name, hash=dataset_module.hash, cache_dir=tmp_dir)
304
+ version = builder_config.version if builder_config else dataset_builder.config.version
305
+ mock_dl_manager = MockDownloadManager(
306
+ dataset_name=self._dataset_name,
307
+ config=builder_config,
308
+ version=version,
309
+ use_local_dummy_data=True,
310
+ load_existing_dummy_data=False,
311
+ )
312
+
313
+ if self._auto_generate:
314
+ auto_generate_results.append(
315
+ self._autogenerate_dummy_data(
316
+ dataset_builder=dataset_builder,
317
+ mock_dl_manager=mock_dl_manager,
318
+ keep_uncompressed=self._keep_uncompressed,
319
+ )
320
+ )
321
+ else:
322
+ self._print_dummy_data_instructions(
323
+ dataset_builder=dataset_builder, mock_dl_manager=mock_dl_manager
324
+ )
325
+ if self._auto_generate and not self._keep_uncompressed:
326
+ if all(auto_generate_results):
327
+ print(f"Automatic dummy data generation succeeded for all configs of '{self._path_to_dataset}'")
328
+ else:
329
+ print(f"Automatic dummy data generation failed for some configs of '{self._path_to_dataset}'")
330
+
331
+ def _autogenerate_dummy_data(self, dataset_builder, mock_dl_manager, keep_uncompressed) -> Optional[bool]:
332
+ dl_cache_dir = (
333
+ os.path.join(self._cache_dir, config.DOWNLOADED_DATASETS_DIR)
334
+ if self._cache_dir
335
+ else config.DOWNLOADED_DATASETS_PATH
336
+ )
337
+ download_config = DownloadConfig(cache_dir=dl_cache_dir)
338
+ dl_manager = DummyDataGeneratorDownloadManager(
339
+ dataset_name=self._dataset_name, mock_download_manager=mock_dl_manager, download_config=download_config
340
+ )
341
+ dataset_builder._split_generators(dl_manager)
342
+ mock_dl_manager.load_existing_dummy_data = False # don't use real dummy data
343
+ dl_manager.auto_generate_dummy_data_folder(
344
+ n_lines=self._n_lines,
345
+ json_field=self._json_field,
346
+ xml_tag=self._xml_tag,
347
+ match_text_files=self._match_text_files,
348
+ encoding=self._encoding,
349
+ )
350
+ if not keep_uncompressed:
351
+ path_do_dataset = os.path.join(mock_dl_manager.datasets_scripts_dir, mock_dl_manager.dataset_name)
352
+ dl_manager.compress_autogenerated_dummy_data(path_do_dataset)
353
+ # now test that the dummy_data.zip file actually works
354
+ mock_dl_manager.load_existing_dummy_data = True # use real dummy data
355
+ n_examples_per_split = {}
356
+ os.makedirs(dataset_builder._cache_dir, exist_ok=True)
357
+ try:
358
+ split_generators = dataset_builder._split_generators(mock_dl_manager)
359
+ for split_generator in split_generators:
360
+ dataset_builder._prepare_split(split_generator, check_duplicate_keys=False)
361
+ n_examples_per_split[split_generator.name] = split_generator.split_info.num_examples
362
+ except OSError as e:
363
+ logger.error(
364
+ f"Failed to load dummy data for config '{dataset_builder.config.name}''.\nOriginal error:\n"
365
+ + str(e)
366
+ )
367
+ return False
368
+ else:
369
+ if all(n_examples > 0 for n_examples in n_examples_per_split.values()):
370
+ logger.warning(
371
+ f"Dummy data generation done and dummy data test succeeded for config '{dataset_builder.config.name}''."
372
+ )
373
+ return True
374
+ else:
375
+ empty_splits = [
376
+ split_name for split_name in n_examples_per_split if n_examples_per_split[split_name] == 0
377
+ ]
378
+ logger.warning(
379
+ f"Dummy data generation done but dummy data test failed since splits {empty_splits} have 0 examples for config '{dataset_builder.config.name}''."
380
+ )
381
+ return False
382
+ else:
383
+ generated_dummy_data_dir = os.path.join(self._path_to_dataset, mock_dl_manager.dummy_data_folder)
384
+ logger.info(
385
+ f"Dummy data generated in directory '{generated_dummy_data_dir}' but kept uncompressed. "
386
+ "Please compress this directory into a zip file to use it for dummy data tests."
387
+ )
388
+
389
+ def _print_dummy_data_instructions(self, dataset_builder, mock_dl_manager):
390
+ dummy_data_folder = os.path.join(self._path_to_dataset, mock_dl_manager.dummy_data_folder)
391
+ logger.info(f"Creating dummy folder structure for {dummy_data_folder}... ")
392
+ os.makedirs(dummy_data_folder, exist_ok=True)
393
+
394
+ try:
395
+ generator_splits = dataset_builder._split_generators(mock_dl_manager)
396
+ except FileNotFoundError as e:
397
+ print(
398
+ f"Dataset {self._dataset_name} with config {mock_dl_manager.config} seems to already open files in the method `_split_generators(...)`. You might consider to instead only open files in the method `_generate_examples(...)` instead. If this is not possible the dummy data has to be created with less guidance. Make sure you create the file {e.filename}."
399
+ )
400
+
401
+ files_to_create = set()
402
+ split_names = []
403
+ dummy_file_name = mock_dl_manager.dummy_file_name
404
+
405
+ for split in generator_splits:
406
+ logger.info(f"Collecting dummy data file paths to create for {split.name}")
407
+ split_names.append(split.name)
408
+ gen_kwargs = split.gen_kwargs
409
+ generator = dataset_builder._generate_examples(**gen_kwargs)
410
+
411
+ try:
412
+ dummy_data_guidance_print = "\n" + 30 * "=" + "DUMMY DATA INSTRUCTIONS" + 30 * "=" + "\n"
413
+ config_string = (
414
+ f"config {mock_dl_manager.config.name} of " if mock_dl_manager.config is not None else ""
415
+ )
416
+ dummy_data_guidance_print += (
417
+ "- In order to create the dummy data for "
418
+ + config_string
419
+ + f"{self._dataset_name}, please go into the folder '{dummy_data_folder}' with `cd {dummy_data_folder}` . \n\n"
420
+ )
421
+
422
+ # trigger generate function
423
+ for key, record in generator:
424
+ pass
425
+
426
+ dummy_data_guidance_print += f"- It appears that the function `_generate_examples(...)` expects one or more files in the folder {dummy_file_name} using the function `glob.glob(...)`. In this case, please refer to the `_generate_examples(...)` method to see under which filename the dummy data files should be created. \n\n"
427
+
428
+ except FileNotFoundError as e:
429
+ files_to_create.add(e.filename)
430
+
431
+ split_names = ", ".join(split_names)
432
+ if len(files_to_create) > 0:
433
+ # no glob.glob(...) in `_generate_examples(...)`
434
+ if len(files_to_create) == 1 and next(iter(files_to_create)) == dummy_file_name:
435
+ dummy_data_guidance_print += f"- Please create a single dummy data file called '{next(iter(files_to_create))}' from the folder '{dummy_data_folder}'. Make sure that the dummy data file provides at least one example for the split(s) '{split_names}' \n\n"
436
+ files_string = dummy_file_name
437
+ else:
438
+ files_string = ", ".join(files_to_create)
439
+ dummy_data_guidance_print += f"- Please create the following dummy data files '{files_string}' from the folder '{dummy_data_folder}'\n\n"
440
+
441
+ dummy_data_guidance_print += f"- For each of the splits '{split_names}', make sure that one or more of the dummy data files provide at least one example \n\n"
442
+
443
+ dummy_data_guidance_print += f"- If the method `_generate_examples(...)` includes multiple `open()` statements, you might have to create other files in addition to '{files_string}'. In this case please refer to the `_generate_examples(...)` method \n\n"
444
+
445
+ if len(files_to_create) == 1 and next(iter(files_to_create)) == dummy_file_name:
446
+ dummy_data_guidance_print += f"- After the dummy data file is created, it should be zipped to '{dummy_file_name}.zip' with the command `zip {dummy_file_name}.zip {dummy_file_name}` \n\n"
447
+
448
+ dummy_data_guidance_print += (
449
+ f"- You can now delete the file '{dummy_file_name}' with the command `rm {dummy_file_name}` \n\n"
450
+ )
451
+
452
+ dummy_data_guidance_print += f"- To get the file '{dummy_file_name}' back for further changes to the dummy data, simply unzip {dummy_file_name}.zip with the command `unzip {dummy_file_name}.zip` \n\n"
453
+ else:
454
+ dummy_data_guidance_print += f"- After all dummy data files are created, they should be zipped recursively to '{dummy_file_name}.zip' with the command `zip -r {dummy_file_name}.zip {dummy_file_name}/` \n\n"
455
+
456
+ dummy_data_guidance_print += (
457
+ f"- You can now delete the folder '{dummy_file_name}' with the command `rm -r {dummy_file_name}` \n\n"
458
+ )
459
+
460
+ dummy_data_guidance_print += f"- To get the folder '{dummy_file_name}' back for further changes to the dummy data, simply unzip {dummy_file_name}.zip with the command `unzip {dummy_file_name}.zip` \n\n"
461
+
462
+ dummy_data_guidance_print += (
463
+ f"- Make sure you have created the file '{dummy_file_name}.zip' in '{dummy_data_folder}' \n"
464
+ )
465
+
466
+ dummy_data_guidance_print += 83 * "=" + "\n"
467
+
468
+ print(dummy_data_guidance_print)
lib/python3.10/site-packages/datasets/commands/env.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import platform
2
+ from argparse import ArgumentParser
3
+
4
+ import huggingface_hub
5
+ import pandas
6
+ import pyarrow
7
+
8
+ from datasets import __version__ as version
9
+ from datasets.commands import BaseDatasetsCLICommand
10
+
11
+
12
+ def info_command_factory(_):
13
+ return EnvironmentCommand()
14
+
15
+
16
+ class EnvironmentCommand(BaseDatasetsCLICommand):
17
+ @staticmethod
18
+ def register_subcommand(parser: ArgumentParser):
19
+ download_parser = parser.add_parser("env", help="Print relevant system environment info.")
20
+ download_parser.set_defaults(func=info_command_factory)
21
+
22
+ def run(self):
23
+ info = {
24
+ "`datasets` version": version,
25
+ "Platform": platform.platform(),
26
+ "Python version": platform.python_version(),
27
+ "Huggingface_hub version": huggingface_hub.__version__,
28
+ "PyArrow version": pyarrow.__version__,
29
+ "Pandas version": pandas.__version__,
30
+ }
31
+
32
+ print("\nCopy-and-paste the text below in your GitHub issue.\n")
33
+ print(self.format_dict(info))
34
+
35
+ return info
36
+
37
+ @staticmethod
38
+ def format_dict(d):
39
+ return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n"
lib/python3.10/site-packages/datasets/commands/run_beam.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from argparse import ArgumentParser
3
+ from pathlib import Path
4
+ from shutil import copyfile
5
+ from typing import List
6
+
7
+ from datasets import config
8
+ from datasets.builder import DatasetBuilder
9
+ from datasets.commands import BaseDatasetsCLICommand
10
+ from datasets.download.download_config import DownloadConfig
11
+ from datasets.download.download_manager import DownloadMode
12
+ from datasets.load import dataset_module_factory, import_main_class
13
+ from datasets.utils.info_utils import VerificationMode
14
+
15
+
16
+ def run_beam_command_factory(args, **kwargs):
17
+ return RunBeamCommand(
18
+ args.dataset,
19
+ args.name,
20
+ args.cache_dir,
21
+ args.beam_pipeline_options,
22
+ args.data_dir,
23
+ args.all_configs,
24
+ args.save_info or args.save_infos,
25
+ args.ignore_verifications,
26
+ args.force_redownload,
27
+ **kwargs,
28
+ )
29
+
30
+
31
+ class RunBeamCommand(BaseDatasetsCLICommand):
32
+ @staticmethod
33
+ def register_subcommand(parser: ArgumentParser):
34
+ run_beam_parser = parser.add_parser("run_beam", help="Run a Beam dataset processing pipeline")
35
+ run_beam_parser.add_argument("dataset", type=str, help="Name of the dataset to download")
36
+ run_beam_parser.add_argument("--name", type=str, default=None, help="Dataset config name")
37
+ run_beam_parser.add_argument(
38
+ "--cache_dir",
39
+ type=str,
40
+ default=None,
41
+ help="Cache directory where the datasets are stored",
42
+ )
43
+ run_beam_parser.add_argument(
44
+ "--beam_pipeline_options",
45
+ type=str,
46
+ default="",
47
+ help="Beam pipeline options, separated by commas. Example:: `--beam_pipeline_options=job_name=my-job,project=my-project`",
48
+ )
49
+ run_beam_parser.add_argument(
50
+ "--data_dir",
51
+ type=str,
52
+ default=None,
53
+ help="Can be used to specify a manual directory to get the files from",
54
+ )
55
+ run_beam_parser.add_argument("--all_configs", action="store_true", help="Test all dataset configurations")
56
+ run_beam_parser.add_argument("--save_info", action="store_true", help="Save the dataset infos file")
57
+ run_beam_parser.add_argument(
58
+ "--ignore_verifications", action="store_true", help="Run the test without checksums and splits checks"
59
+ )
60
+ run_beam_parser.add_argument("--force_redownload", action="store_true", help="Force dataset redownload")
61
+ # aliases
62
+ run_beam_parser.add_argument("--save_infos", action="store_true", help="alias for save_info")
63
+ run_beam_parser.set_defaults(func=run_beam_command_factory)
64
+
65
+ def __init__(
66
+ self,
67
+ dataset: str,
68
+ name: str,
69
+ cache_dir: str,
70
+ beam_pipeline_options: str,
71
+ data_dir: str,
72
+ all_configs: bool,
73
+ save_infos: bool,
74
+ ignore_verifications: bool,
75
+ force_redownload: bool,
76
+ **config_kwargs,
77
+ ):
78
+ self._dataset = dataset
79
+ self._name = name
80
+ self._cache_dir = cache_dir
81
+ self._beam_pipeline_options = beam_pipeline_options
82
+ self._data_dir = data_dir
83
+ self._all_configs = all_configs
84
+ self._save_infos = save_infos
85
+ self._ignore_verifications = ignore_verifications
86
+ self._force_redownload = force_redownload
87
+ self._config_kwargs = config_kwargs
88
+
89
+ def run(self):
90
+ import apache_beam as beam
91
+
92
+ if self._name is not None and self._all_configs:
93
+ print("Both parameters `name` and `all_configs` can't be used at once.")
94
+ exit(1)
95
+ path, config_name = self._dataset, self._name
96
+ dataset_module = dataset_module_factory(path)
97
+ builder_cls = import_main_class(dataset_module.module_path)
98
+ builders: List[DatasetBuilder] = []
99
+ if self._beam_pipeline_options:
100
+ beam_options = beam.options.pipeline_options.PipelineOptions(
101
+ flags=[f"--{opt.strip()}" for opt in self._beam_pipeline_options.split(",") if opt]
102
+ )
103
+ else:
104
+ beam_options = None
105
+ if self._all_configs and len(builder_cls.BUILDER_CONFIGS) > 0:
106
+ for builder_config in builder_cls.BUILDER_CONFIGS:
107
+ builders.append(
108
+ builder_cls(
109
+ config_name=builder_config.name,
110
+ data_dir=self._data_dir,
111
+ hash=dataset_module.hash,
112
+ beam_options=beam_options,
113
+ cache_dir=self._cache_dir,
114
+ base_path=dataset_module.builder_kwargs.get("base_path"),
115
+ )
116
+ )
117
+ else:
118
+ builders.append(
119
+ builder_cls(
120
+ config_name=config_name,
121
+ data_dir=self._data_dir,
122
+ beam_options=beam_options,
123
+ cache_dir=self._cache_dir,
124
+ base_path=dataset_module.builder_kwargs.get("base_path"),
125
+ **self._config_kwargs,
126
+ )
127
+ )
128
+
129
+ for builder in builders:
130
+ builder.download_and_prepare(
131
+ download_mode=DownloadMode.REUSE_CACHE_IF_EXISTS
132
+ if not self._force_redownload
133
+ else DownloadMode.FORCE_REDOWNLOAD,
134
+ download_config=DownloadConfig(cache_dir=config.DOWNLOADED_DATASETS_PATH),
135
+ verification_mode=VerificationMode.NO_CHECKS
136
+ if self._ignore_verifications
137
+ else VerificationMode.ALL_CHECKS,
138
+ try_from_hf_gcs=False,
139
+ )
140
+ if self._save_infos:
141
+ builder._save_infos()
142
+
143
+ print("Apache beam run successful.")
144
+
145
+ # If save_infos=True, the dataset infos file is created next to the loaded module file.
146
+ # Let's move it to the original directory of the dataset script, to allow the user to
147
+ # upload them on S3 at the same time afterwards.
148
+ if self._save_infos:
149
+ dataset_infos_path = os.path.join(builder_cls.get_imported_module_dir(), config.DATASETDICT_INFOS_FILENAME)
150
+
151
+ name = Path(path).name + ".py"
152
+
153
+ combined_path = os.path.join(path, name)
154
+ if os.path.isfile(path):
155
+ dataset_dir = os.path.dirname(path)
156
+ elif os.path.isfile(combined_path):
157
+ dataset_dir = path
158
+ else: # in case of a remote dataset
159
+ print(f"Dataset Infos file saved at {dataset_infos_path}")
160
+ exit(1)
161
+
162
+ # Move datasetinfo back to the user
163
+ user_dataset_infos_path = os.path.join(dataset_dir, config.DATASETDICT_INFOS_FILENAME)
164
+ copyfile(dataset_infos_path, user_dataset_infos_path)
165
+ print(f"Dataset Infos file saved at {user_dataset_infos_path}")
lib/python3.10/site-packages/datasets/commands/test.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from argparse import ArgumentParser
3
+ from pathlib import Path
4
+ from shutil import copyfile, rmtree
5
+ from typing import Generator
6
+
7
+ import datasets.config
8
+ from datasets.builder import DatasetBuilder
9
+ from datasets.commands import BaseDatasetsCLICommand
10
+ from datasets.download.download_manager import DownloadMode
11
+ from datasets.load import dataset_module_factory, import_main_class
12
+ from datasets.utils.filelock import logger as fl_logger
13
+ from datasets.utils.info_utils import VerificationMode
14
+ from datasets.utils.logging import ERROR, get_logger
15
+
16
+
17
+ logger = get_logger(__name__)
18
+
19
+
20
+ def _test_command_factory(args):
21
+ return TestCommand(
22
+ args.dataset,
23
+ args.name,
24
+ args.cache_dir,
25
+ args.data_dir,
26
+ args.all_configs,
27
+ args.save_info or args.save_infos,
28
+ args.ignore_verifications,
29
+ args.force_redownload,
30
+ args.clear_cache,
31
+ )
32
+
33
+
34
+ class TestCommand(BaseDatasetsCLICommand):
35
+ __test__ = False # to tell pytest it's not a test class
36
+
37
+ @staticmethod
38
+ def register_subcommand(parser: ArgumentParser):
39
+ test_parser = parser.add_parser("test", help="Test dataset implementation.")
40
+ test_parser.add_argument("--name", type=str, default=None, help="Dataset processing name")
41
+ test_parser.add_argument(
42
+ "--cache_dir",
43
+ type=str,
44
+ default=None,
45
+ help="Cache directory where the datasets are stored.",
46
+ )
47
+ test_parser.add_argument(
48
+ "--data_dir",
49
+ type=str,
50
+ default=None,
51
+ help="Can be used to specify a manual directory to get the files from.",
52
+ )
53
+ test_parser.add_argument("--all_configs", action="store_true", help="Test all dataset configurations")
54
+ test_parser.add_argument(
55
+ "--save_info", action="store_true", help="Save the dataset infos in the dataset card (README.md)"
56
+ )
57
+ test_parser.add_argument(
58
+ "--ignore_verifications",
59
+ action="store_true",
60
+ help="Run the test without checksums and splits checks.",
61
+ )
62
+ test_parser.add_argument("--force_redownload", action="store_true", help="Force dataset redownload")
63
+ test_parser.add_argument(
64
+ "--clear_cache",
65
+ action="store_true",
66
+ help="Remove downloaded files and cached datasets after each config test",
67
+ )
68
+ # aliases
69
+ test_parser.add_argument("--save_infos", action="store_true", help="alias to save_info")
70
+ test_parser.add_argument("dataset", type=str, help="Name of the dataset to download")
71
+ test_parser.set_defaults(func=_test_command_factory)
72
+
73
+ def __init__(
74
+ self,
75
+ dataset: str,
76
+ name: str,
77
+ cache_dir: str,
78
+ data_dir: str,
79
+ all_configs: bool,
80
+ save_infos: bool,
81
+ ignore_verifications: bool,
82
+ force_redownload: bool,
83
+ clear_cache: bool,
84
+ ):
85
+ self._dataset = dataset
86
+ self._name = name
87
+ self._cache_dir = cache_dir
88
+ self._data_dir = data_dir
89
+ self._all_configs = all_configs
90
+ self._save_infos = save_infos
91
+ self._ignore_verifications = ignore_verifications
92
+ self._force_redownload = force_redownload
93
+ self._clear_cache = clear_cache
94
+ if clear_cache and not cache_dir:
95
+ print(
96
+ "When --clear_cache is used, specifying a cache directory is mandatory.\n"
97
+ "The 'download' folder of the cache directory and the dataset builder cache will be deleted after each configuration test.\n"
98
+ "Please provide a --cache_dir that will be used to test the dataset script."
99
+ )
100
+ exit(1)
101
+ if save_infos:
102
+ self._ignore_verifications = True
103
+
104
+ def run(self):
105
+ fl_logger().setLevel(ERROR)
106
+ if self._name is not None and self._all_configs:
107
+ print("Both parameters `config` and `all_configs` can't be used at once.")
108
+ exit(1)
109
+ path, config_name = self._dataset, self._name
110
+ module = dataset_module_factory(path)
111
+ builder_cls = import_main_class(module.module_path)
112
+ n_builders = len(builder_cls.BUILDER_CONFIGS) if self._all_configs and builder_cls.BUILDER_CONFIGS else 1
113
+
114
+ def get_builders() -> Generator[DatasetBuilder, None, None]:
115
+ if self._all_configs and builder_cls.BUILDER_CONFIGS:
116
+ for i, config in enumerate(builder_cls.BUILDER_CONFIGS):
117
+ if "config_name" in module.builder_kwargs:
118
+ yield builder_cls(
119
+ cache_dir=self._cache_dir,
120
+ data_dir=self._data_dir,
121
+ **module.builder_kwargs,
122
+ )
123
+ else:
124
+ yield builder_cls(
125
+ config_name=config.name,
126
+ cache_dir=self._cache_dir,
127
+ data_dir=self._data_dir,
128
+ **module.builder_kwargs,
129
+ )
130
+ else:
131
+ if "config_name" in module.builder_kwargs:
132
+ yield builder_cls(cache_dir=self._cache_dir, data_dir=self._data_dir, **module.builder_kwargs)
133
+ else:
134
+ yield builder_cls(
135
+ config_name=config_name,
136
+ cache_dir=self._cache_dir,
137
+ data_dir=self._data_dir,
138
+ **module.builder_kwargs,
139
+ )
140
+
141
+ for j, builder in enumerate(get_builders()):
142
+ print(f"Testing builder '{builder.config.name}' ({j + 1}/{n_builders})")
143
+ builder._record_infos = os.path.exists(
144
+ os.path.join(builder.get_imported_module_dir(), datasets.config.DATASETDICT_INFOS_FILENAME)
145
+ ) # record checksums only if we need to update a (deprecated) dataset_infos.json
146
+ builder.download_and_prepare(
147
+ download_mode=DownloadMode.REUSE_CACHE_IF_EXISTS
148
+ if not self._force_redownload
149
+ else DownloadMode.FORCE_REDOWNLOAD,
150
+ verification_mode=VerificationMode.NO_CHECKS
151
+ if self._ignore_verifications
152
+ else VerificationMode.ALL_CHECKS,
153
+ try_from_hf_gcs=False,
154
+ )
155
+ builder.as_dataset()
156
+ if self._save_infos:
157
+ builder._save_infos()
158
+
159
+ # If save_infos=True, the dataset card (README.md) is created next to the loaded module file.
160
+ # The dataset_infos are saved in the YAML part of the README.md
161
+
162
+ # Let's move it to the original directory of the dataset script, to allow the user to
163
+ # upload them on S3 at the same time afterwards.
164
+ if self._save_infos:
165
+ dataset_readme_path = os.path.join(builder_cls.get_imported_module_dir(), "README.md")
166
+ name = Path(path).name + ".py"
167
+ combined_path = os.path.join(path, name)
168
+ if os.path.isfile(path):
169
+ dataset_dir = os.path.dirname(path)
170
+ elif os.path.isfile(combined_path):
171
+ dataset_dir = path
172
+ elif os.path.isdir(path): # for local directories containing only data files
173
+ dataset_dir = path
174
+ else: # in case of a remote dataset
175
+ dataset_dir = None
176
+ print(f"Dataset card saved at {dataset_readme_path}")
177
+
178
+ # Move dataset_info back to the user
179
+ if dataset_dir is not None:
180
+ user_dataset_readme_path = os.path.join(dataset_dir, "README.md")
181
+ copyfile(dataset_readme_path, user_dataset_readme_path)
182
+ print(f"Dataset card saved at {user_dataset_readme_path}")
183
+
184
+ # If clear_cache=True, the download folder and the dataset builder cache directory are deleted
185
+ if self._clear_cache:
186
+ if os.path.isdir(builder._cache_dir):
187
+ logger.warning(f"Clearing cache at {builder._cache_dir}")
188
+ rmtree(builder._cache_dir)
189
+ download_dir = os.path.join(self._cache_dir, datasets.config.DOWNLOADED_DATASETS_DIR)
190
+ if os.path.isdir(download_dir):
191
+ logger.warning(f"Clearing cache at {download_dir}")
192
+ rmtree(download_dir)
193
+
194
+ print("Test successful.")
lib/python3.10/site-packages/datasets/io/__init__.py ADDED
File without changes
lib/python3.10/site-packages/datasets/io/abc.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC, abstractmethod
2
+ from typing import Optional, Union
3
+
4
+ from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
5
+ from ..utils.typing import NestedDataStructureLike, PathLike
6
+
7
+
8
+ class AbstractDatasetReader(ABC):
9
+ def __init__(
10
+ self,
11
+ path_or_paths: Optional[NestedDataStructureLike[PathLike]] = None,
12
+ split: Optional[NamedSplit] = None,
13
+ features: Optional[Features] = None,
14
+ cache_dir: str = None,
15
+ keep_in_memory: bool = False,
16
+ streaming: bool = False,
17
+ num_proc: Optional[int] = None,
18
+ **kwargs,
19
+ ):
20
+ self.path_or_paths = path_or_paths
21
+ self.split = split if split or isinstance(path_or_paths, dict) else "train"
22
+ self.features = features
23
+ self.cache_dir = cache_dir
24
+ self.keep_in_memory = keep_in_memory
25
+ self.streaming = streaming
26
+ self.num_proc = num_proc
27
+ self.kwargs = kwargs
28
+
29
+ @abstractmethod
30
+ def read(self) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
31
+ pass
32
+
33
+
34
+ class AbstractDatasetInputStream(ABC):
35
+ def __init__(
36
+ self,
37
+ features: Optional[Features] = None,
38
+ cache_dir: str = None,
39
+ keep_in_memory: bool = False,
40
+ streaming: bool = False,
41
+ num_proc: Optional[int] = None,
42
+ **kwargs,
43
+ ):
44
+ self.features = features
45
+ self.cache_dir = cache_dir
46
+ self.keep_in_memory = keep_in_memory
47
+ self.streaming = streaming
48
+ self.num_proc = num_proc
49
+ self.kwargs = kwargs
50
+
51
+ @abstractmethod
52
+ def read(self) -> Union[Dataset, IterableDataset]:
53
+ pass
lib/python3.10/site-packages/datasets/io/csv.py ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import multiprocessing
2
+ import os
3
+ from typing import BinaryIO, Optional, Union
4
+
5
+ from .. import Dataset, Features, NamedSplit, config
6
+ from ..formatting import query_table
7
+ from ..packaged_modules.csv.csv import Csv
8
+ from ..utils import logging
9
+ from ..utils.typing import NestedDataStructureLike, PathLike
10
+ from .abc import AbstractDatasetReader
11
+
12
+
13
+ class CsvDatasetReader(AbstractDatasetReader):
14
+ def __init__(
15
+ self,
16
+ path_or_paths: NestedDataStructureLike[PathLike],
17
+ split: Optional[NamedSplit] = None,
18
+ features: Optional[Features] = None,
19
+ cache_dir: str = None,
20
+ keep_in_memory: bool = False,
21
+ streaming: bool = False,
22
+ num_proc: Optional[int] = None,
23
+ **kwargs,
24
+ ):
25
+ super().__init__(
26
+ path_or_paths,
27
+ split=split,
28
+ features=features,
29
+ cache_dir=cache_dir,
30
+ keep_in_memory=keep_in_memory,
31
+ streaming=streaming,
32
+ num_proc=num_proc,
33
+ **kwargs,
34
+ )
35
+ path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
36
+ self.builder = Csv(
37
+ cache_dir=cache_dir,
38
+ data_files=path_or_paths,
39
+ features=features,
40
+ **kwargs,
41
+ )
42
+
43
+ def read(self):
44
+ # Build iterable dataset
45
+ if self.streaming:
46
+ dataset = self.builder.as_streaming_dataset(split=self.split)
47
+ # Build regular (map-style) dataset
48
+ else:
49
+ download_config = None
50
+ download_mode = None
51
+ verification_mode = None
52
+ base_path = None
53
+
54
+ self.builder.download_and_prepare(
55
+ download_config=download_config,
56
+ download_mode=download_mode,
57
+ verification_mode=verification_mode,
58
+ # try_from_hf_gcs=try_from_hf_gcs,
59
+ base_path=base_path,
60
+ num_proc=self.num_proc,
61
+ )
62
+ dataset = self.builder.as_dataset(
63
+ split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
64
+ )
65
+ return dataset
66
+
67
+
68
+ class CsvDatasetWriter:
69
+ def __init__(
70
+ self,
71
+ dataset: Dataset,
72
+ path_or_buf: Union[PathLike, BinaryIO],
73
+ batch_size: Optional[int] = None,
74
+ num_proc: Optional[int] = None,
75
+ **to_csv_kwargs,
76
+ ):
77
+ if num_proc is not None and num_proc <= 0:
78
+ raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
79
+
80
+ self.dataset = dataset
81
+ self.path_or_buf = path_or_buf
82
+ self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
83
+ self.num_proc = num_proc
84
+ self.encoding = "utf-8"
85
+ self.to_csv_kwargs = to_csv_kwargs
86
+
87
+ def write(self) -> int:
88
+ _ = self.to_csv_kwargs.pop("path_or_buf", None)
89
+ header = self.to_csv_kwargs.pop("header", True)
90
+ index = self.to_csv_kwargs.pop("index", False)
91
+
92
+ if isinstance(self.path_or_buf, (str, bytes, os.PathLike)):
93
+ with open(self.path_or_buf, "wb+") as buffer:
94
+ written = self._write(file_obj=buffer, header=header, index=index, **self.to_csv_kwargs)
95
+ else:
96
+ written = self._write(file_obj=self.path_or_buf, header=header, index=index, **self.to_csv_kwargs)
97
+ return written
98
+
99
+ def _batch_csv(self, args):
100
+ offset, header, index, to_csv_kwargs = args
101
+
102
+ batch = query_table(
103
+ table=self.dataset.data,
104
+ key=slice(offset, offset + self.batch_size),
105
+ indices=self.dataset._indices,
106
+ )
107
+ csv_str = batch.to_pandas().to_csv(
108
+ path_or_buf=None, header=header if (offset == 0) else False, index=index, **to_csv_kwargs
109
+ )
110
+ return csv_str.encode(self.encoding)
111
+
112
+ def _write(self, file_obj: BinaryIO, header, index, **to_csv_kwargs) -> int:
113
+ """Writes the pyarrow table as CSV to a binary file handle.
114
+
115
+ Caller is responsible for opening and closing the handle.
116
+ """
117
+ written = 0
118
+
119
+ if self.num_proc is None or self.num_proc == 1:
120
+ for offset in logging.tqdm(
121
+ range(0, len(self.dataset), self.batch_size),
122
+ unit="ba",
123
+ disable=not logging.is_progress_bar_enabled(),
124
+ desc="Creating CSV from Arrow format",
125
+ ):
126
+ csv_str = self._batch_csv((offset, header, index, to_csv_kwargs))
127
+ written += file_obj.write(csv_str)
128
+
129
+ else:
130
+ num_rows, batch_size = len(self.dataset), self.batch_size
131
+ with multiprocessing.Pool(self.num_proc) as pool:
132
+ for csv_str in logging.tqdm(
133
+ pool.imap(
134
+ self._batch_csv,
135
+ [(offset, header, index, to_csv_kwargs) for offset in range(0, num_rows, batch_size)],
136
+ ),
137
+ total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,
138
+ unit="ba",
139
+ disable=not logging.is_progress_bar_enabled(),
140
+ desc="Creating CSV from Arrow format",
141
+ ):
142
+ written += file_obj.write(csv_str)
143
+
144
+ return written
lib/python3.10/site-packages/datasets/io/generator.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Callable, Optional
2
+
3
+ from .. import Features
4
+ from ..packaged_modules.generator.generator import Generator
5
+ from .abc import AbstractDatasetInputStream
6
+
7
+
8
+ class GeneratorDatasetInputStream(AbstractDatasetInputStream):
9
+ def __init__(
10
+ self,
11
+ generator: Callable,
12
+ features: Optional[Features] = None,
13
+ cache_dir: str = None,
14
+ keep_in_memory: bool = False,
15
+ streaming: bool = False,
16
+ gen_kwargs: Optional[dict] = None,
17
+ num_proc: Optional[int] = None,
18
+ **kwargs,
19
+ ):
20
+ super().__init__(
21
+ features=features,
22
+ cache_dir=cache_dir,
23
+ keep_in_memory=keep_in_memory,
24
+ streaming=streaming,
25
+ num_proc=num_proc,
26
+ **kwargs,
27
+ )
28
+ self.builder = Generator(
29
+ cache_dir=cache_dir,
30
+ features=features,
31
+ generator=generator,
32
+ gen_kwargs=gen_kwargs,
33
+ **kwargs,
34
+ )
35
+
36
+ def read(self):
37
+ # Build iterable dataset
38
+ if self.streaming:
39
+ dataset = self.builder.as_streaming_dataset(split="train")
40
+ # Build regular (map-style) dataset
41
+ else:
42
+ download_config = None
43
+ download_mode = None
44
+ verification_mode = None
45
+ base_path = None
46
+
47
+ self.builder.download_and_prepare(
48
+ download_config=download_config,
49
+ download_mode=download_mode,
50
+ verification_mode=verification_mode,
51
+ # try_from_hf_gcs=try_from_hf_gcs,
52
+ base_path=base_path,
53
+ num_proc=self.num_proc,
54
+ )
55
+ dataset = self.builder.as_dataset(
56
+ split="train", verification_mode=verification_mode, in_memory=self.keep_in_memory
57
+ )
58
+ return dataset
lib/python3.10/site-packages/datasets/io/json.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import multiprocessing
2
+ import os
3
+ from typing import BinaryIO, Optional, Union
4
+
5
+ import fsspec
6
+
7
+ from .. import Dataset, Features, NamedSplit, config
8
+ from ..formatting import query_table
9
+ from ..packaged_modules.json.json import Json
10
+ from ..utils import logging
11
+ from ..utils.typing import NestedDataStructureLike, PathLike
12
+ from .abc import AbstractDatasetReader
13
+
14
+
15
+ class JsonDatasetReader(AbstractDatasetReader):
16
+ def __init__(
17
+ self,
18
+ path_or_paths: NestedDataStructureLike[PathLike],
19
+ split: Optional[NamedSplit] = None,
20
+ features: Optional[Features] = None,
21
+ cache_dir: str = None,
22
+ keep_in_memory: bool = False,
23
+ streaming: bool = False,
24
+ field: Optional[str] = None,
25
+ num_proc: Optional[int] = None,
26
+ **kwargs,
27
+ ):
28
+ super().__init__(
29
+ path_or_paths,
30
+ split=split,
31
+ features=features,
32
+ cache_dir=cache_dir,
33
+ keep_in_memory=keep_in_memory,
34
+ streaming=streaming,
35
+ num_proc=num_proc,
36
+ **kwargs,
37
+ )
38
+ self.field = field
39
+ path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
40
+ self.builder = Json(
41
+ cache_dir=cache_dir,
42
+ data_files=path_or_paths,
43
+ features=features,
44
+ field=field,
45
+ **kwargs,
46
+ )
47
+
48
+ def read(self):
49
+ # Build iterable dataset
50
+ if self.streaming:
51
+ dataset = self.builder.as_streaming_dataset(split=self.split)
52
+ # Build regular (map-style) dataset
53
+ else:
54
+ download_config = None
55
+ download_mode = None
56
+ verification_mode = None
57
+ base_path = None
58
+
59
+ self.builder.download_and_prepare(
60
+ download_config=download_config,
61
+ download_mode=download_mode,
62
+ verification_mode=verification_mode,
63
+ # try_from_hf_gcs=try_from_hf_gcs,
64
+ base_path=base_path,
65
+ num_proc=self.num_proc,
66
+ )
67
+ dataset = self.builder.as_dataset(
68
+ split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
69
+ )
70
+ return dataset
71
+
72
+
73
+ class JsonDatasetWriter:
74
+ def __init__(
75
+ self,
76
+ dataset: Dataset,
77
+ path_or_buf: Union[PathLike, BinaryIO],
78
+ batch_size: Optional[int] = None,
79
+ num_proc: Optional[int] = None,
80
+ **to_json_kwargs,
81
+ ):
82
+ if num_proc is not None and num_proc <= 0:
83
+ raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
84
+
85
+ self.dataset = dataset
86
+ self.path_or_buf = path_or_buf
87
+ self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
88
+ self.num_proc = num_proc
89
+ self.encoding = "utf-8"
90
+ self.to_json_kwargs = to_json_kwargs
91
+
92
+ def write(self) -> int:
93
+ _ = self.to_json_kwargs.pop("path_or_buf", None)
94
+ orient = self.to_json_kwargs.pop("orient", "records")
95
+ lines = self.to_json_kwargs.pop("lines", True if orient == "records" else False)
96
+ index = self.to_json_kwargs.pop("index", False if orient in ["split", "table"] else True)
97
+ compression = self.to_json_kwargs.pop("compression", None)
98
+
99
+ if compression not in [None, "infer", "gzip", "bz2", "xz"]:
100
+ raise NotImplementedError(f"`datasets` currently does not support {compression} compression")
101
+
102
+ if isinstance(self.path_or_buf, (str, bytes, os.PathLike)):
103
+ with fsspec.open(self.path_or_buf, "wb", compression=compression) as buffer:
104
+ written = self._write(file_obj=buffer, orient=orient, lines=lines, index=index, **self.to_json_kwargs)
105
+ else:
106
+ if compression:
107
+ raise NotImplementedError(
108
+ f"The compression parameter is not supported when writing to a buffer, but compression={compression}"
109
+ " was passed. Please provide a local path instead."
110
+ )
111
+ written = self._write(
112
+ file_obj=self.path_or_buf, orient=orient, lines=lines, index=index, **self.to_json_kwargs
113
+ )
114
+ return written
115
+
116
+ def _batch_json(self, args):
117
+ offset, orient, lines, index, to_json_kwargs = args
118
+
119
+ batch = query_table(
120
+ table=self.dataset.data,
121
+ key=slice(offset, offset + self.batch_size),
122
+ indices=self.dataset._indices,
123
+ )
124
+ json_str = batch.to_pandas().to_json(
125
+ path_or_buf=None, orient=orient, lines=lines, index=index, **to_json_kwargs
126
+ )
127
+ if not json_str.endswith("\n"):
128
+ json_str += "\n"
129
+ return json_str.encode(self.encoding)
130
+
131
+ def _write(
132
+ self,
133
+ file_obj: BinaryIO,
134
+ orient,
135
+ lines,
136
+ index,
137
+ **to_json_kwargs,
138
+ ) -> int:
139
+ """Writes the pyarrow table as JSON lines to a binary file handle.
140
+
141
+ Caller is responsible for opening and closing the handle.
142
+ """
143
+ written = 0
144
+
145
+ if self.num_proc is None or self.num_proc == 1:
146
+ for offset in logging.tqdm(
147
+ range(0, len(self.dataset), self.batch_size),
148
+ unit="ba",
149
+ disable=not logging.is_progress_bar_enabled(),
150
+ desc="Creating json from Arrow format",
151
+ ):
152
+ json_str = self._batch_json((offset, orient, lines, index, to_json_kwargs))
153
+ written += file_obj.write(json_str)
154
+ else:
155
+ num_rows, batch_size = len(self.dataset), self.batch_size
156
+ with multiprocessing.Pool(self.num_proc) as pool:
157
+ for json_str in logging.tqdm(
158
+ pool.imap(
159
+ self._batch_json,
160
+ [(offset, orient, lines, index, to_json_kwargs) for offset in range(0, num_rows, batch_size)],
161
+ ),
162
+ total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,
163
+ unit="ba",
164
+ disable=not logging.is_progress_bar_enabled(),
165
+ desc="Creating json from Arrow format",
166
+ ):
167
+ written += file_obj.write(json_str)
168
+
169
+ return written
lib/python3.10/site-packages/datasets/io/parquet.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import BinaryIO, Optional, Union
3
+
4
+ import numpy as np
5
+ import pyarrow.parquet as pq
6
+
7
+ from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
8
+ from ..features.features import FeatureType, _visit
9
+ from ..formatting import query_table
10
+ from ..packaged_modules import _PACKAGED_DATASETS_MODULES
11
+ from ..packaged_modules.parquet.parquet import Parquet
12
+ from ..utils import logging
13
+ from ..utils.typing import NestedDataStructureLike, PathLike
14
+ from .abc import AbstractDatasetReader
15
+
16
+
17
+ def get_writer_batch_size(features: Features) -> Optional[int]:
18
+ """
19
+ Get the writer_batch_size that defines the maximum row group size in the parquet files.
20
+ The default in `datasets` is 1,000 but we lower it to 100 for image datasets.
21
+ This allows to optimize random access to parquet file, since accessing 1 row requires
22
+ to read its entire row group.
23
+
24
+ This can be improved to get optimized size for querying/iterating
25
+ but at least it matches the dataset viewer expectations on HF.
26
+
27
+ Args:
28
+ ds_config_info (`datasets.info.DatasetInfo`):
29
+ Dataset info from `datasets`.
30
+ Returns:
31
+ writer_batch_size (`Optional[int]`):
32
+ Writer batch size to pass to a dataset builder.
33
+ If `None`, then it will use the `datasets` default.
34
+ """
35
+
36
+ batch_size = np.inf
37
+
38
+ def set_batch_size(feature: FeatureType) -> None:
39
+ nonlocal batch_size
40
+ if isinstance(feature, Image):
41
+ batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS)
42
+ elif isinstance(feature, Audio):
43
+ batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS)
44
+ elif isinstance(feature, Value) and feature.dtype == "binary":
45
+ batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS)
46
+
47
+ _visit(features, set_batch_size)
48
+
49
+ return None if batch_size is np.inf else batch_size
50
+
51
+
52
+ class ParquetDatasetReader(AbstractDatasetReader):
53
+ def __init__(
54
+ self,
55
+ path_or_paths: NestedDataStructureLike[PathLike],
56
+ split: Optional[NamedSplit] = None,
57
+ features: Optional[Features] = None,
58
+ cache_dir: str = None,
59
+ keep_in_memory: bool = False,
60
+ streaming: bool = False,
61
+ num_proc: Optional[int] = None,
62
+ **kwargs,
63
+ ):
64
+ super().__init__(
65
+ path_or_paths,
66
+ split=split,
67
+ features=features,
68
+ cache_dir=cache_dir,
69
+ keep_in_memory=keep_in_memory,
70
+ streaming=streaming,
71
+ num_proc=num_proc,
72
+ **kwargs,
73
+ )
74
+ path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
75
+ hash = _PACKAGED_DATASETS_MODULES["parquet"][1]
76
+ self.builder = Parquet(
77
+ cache_dir=cache_dir,
78
+ data_files=path_or_paths,
79
+ features=features,
80
+ hash=hash,
81
+ **kwargs,
82
+ )
83
+
84
+ def read(self):
85
+ # Build iterable dataset
86
+ if self.streaming:
87
+ dataset = self.builder.as_streaming_dataset(split=self.split)
88
+ # Build regular (map-style) dataset
89
+ else:
90
+ download_config = None
91
+ download_mode = None
92
+ verification_mode = None
93
+ base_path = None
94
+
95
+ self.builder.download_and_prepare(
96
+ download_config=download_config,
97
+ download_mode=download_mode,
98
+ verification_mode=verification_mode,
99
+ # try_from_hf_gcs=try_from_hf_gcs,
100
+ base_path=base_path,
101
+ num_proc=self.num_proc,
102
+ )
103
+ dataset = self.builder.as_dataset(
104
+ split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
105
+ )
106
+ return dataset
107
+
108
+
109
+ class ParquetDatasetWriter:
110
+ def __init__(
111
+ self,
112
+ dataset: Dataset,
113
+ path_or_buf: Union[PathLike, BinaryIO],
114
+ batch_size: Optional[int] = None,
115
+ **parquet_writer_kwargs,
116
+ ):
117
+ self.dataset = dataset
118
+ self.path_or_buf = path_or_buf
119
+ self.batch_size = batch_size or get_writer_batch_size(dataset.features)
120
+ self.parquet_writer_kwargs = parquet_writer_kwargs
121
+
122
+ def write(self) -> int:
123
+ batch_size = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
124
+
125
+ if isinstance(self.path_or_buf, (str, bytes, os.PathLike)):
126
+ with open(self.path_or_buf, "wb+") as buffer:
127
+ written = self._write(file_obj=buffer, batch_size=batch_size, **self.parquet_writer_kwargs)
128
+ else:
129
+ written = self._write(file_obj=self.path_or_buf, batch_size=batch_size, **self.parquet_writer_kwargs)
130
+ return written
131
+
132
+ def _write(self, file_obj: BinaryIO, batch_size: int, **parquet_writer_kwargs) -> int:
133
+ """Writes the pyarrow table as Parquet to a binary file handle.
134
+
135
+ Caller is responsible for opening and closing the handle.
136
+ """
137
+ written = 0
138
+ _ = parquet_writer_kwargs.pop("path_or_buf", None)
139
+ schema = self.dataset.features.arrow_schema
140
+
141
+ writer = pq.ParquetWriter(file_obj, schema=schema, **parquet_writer_kwargs)
142
+
143
+ for offset in logging.tqdm(
144
+ range(0, len(self.dataset), batch_size),
145
+ unit="ba",
146
+ disable=not logging.is_progress_bar_enabled(),
147
+ desc="Creating parquet from Arrow format",
148
+ ):
149
+ batch = query_table(
150
+ table=self.dataset._data,
151
+ key=slice(offset, offset + batch_size),
152
+ indices=self.dataset._indices if self.dataset._indices is not None else None,
153
+ )
154
+ writer.write_table(batch)
155
+ written += batch.nbytes
156
+ writer.close()
157
+ return written
lib/python3.10/site-packages/datasets/io/spark.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ import pyspark
4
+
5
+ from .. import Features, NamedSplit
6
+ from ..download import DownloadMode
7
+ from ..packaged_modules.spark.spark import Spark
8
+ from .abc import AbstractDatasetReader
9
+
10
+
11
+ class SparkDatasetReader(AbstractDatasetReader):
12
+ """A dataset reader that reads from a Spark DataFrame.
13
+
14
+ When caching, cache materialization is parallelized over Spark; an NFS that is accessible to the driver must be
15
+ provided. Streaming is not currently supported.
16
+ """
17
+
18
+ def __init__(
19
+ self,
20
+ df: pyspark.sql.DataFrame,
21
+ split: Optional[NamedSplit] = None,
22
+ features: Optional[Features] = None,
23
+ streaming: bool = True,
24
+ cache_dir: str = None,
25
+ keep_in_memory: bool = False,
26
+ working_dir: str = None,
27
+ load_from_cache_file: bool = True,
28
+ file_format: str = "arrow",
29
+ **kwargs,
30
+ ):
31
+ super().__init__(
32
+ split=split,
33
+ features=features,
34
+ cache_dir=cache_dir,
35
+ keep_in_memory=keep_in_memory,
36
+ streaming=streaming,
37
+ **kwargs,
38
+ )
39
+ self._load_from_cache_file = load_from_cache_file
40
+ self._file_format = file_format
41
+ self.builder = Spark(
42
+ df=df,
43
+ features=features,
44
+ cache_dir=cache_dir,
45
+ working_dir=working_dir,
46
+ **kwargs,
47
+ )
48
+
49
+ def read(self):
50
+ if self.streaming:
51
+ return self.builder.as_streaming_dataset(split=self.split)
52
+ download_mode = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
53
+ self.builder.download_and_prepare(
54
+ download_mode=download_mode,
55
+ file_format=self._file_format,
56
+ )
57
+ return self.builder.as_dataset(split=self.split)
lib/python3.10/site-packages/datasets/io/sql.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import multiprocessing
2
+ from typing import TYPE_CHECKING, Optional, Union
3
+
4
+ from .. import Dataset, Features, config
5
+ from ..formatting import query_table
6
+ from ..packaged_modules.sql.sql import Sql
7
+ from ..utils import logging
8
+ from .abc import AbstractDatasetInputStream
9
+
10
+
11
+ if TYPE_CHECKING:
12
+ import sqlite3
13
+
14
+ import sqlalchemy
15
+
16
+
17
+ class SqlDatasetReader(AbstractDatasetInputStream):
18
+ def __init__(
19
+ self,
20
+ sql: Union[str, "sqlalchemy.sql.Selectable"],
21
+ con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
22
+ features: Optional[Features] = None,
23
+ cache_dir: str = None,
24
+ keep_in_memory: bool = False,
25
+ **kwargs,
26
+ ):
27
+ super().__init__(features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs)
28
+ self.builder = Sql(
29
+ cache_dir=cache_dir,
30
+ features=features,
31
+ sql=sql,
32
+ con=con,
33
+ **kwargs,
34
+ )
35
+
36
+ def read(self):
37
+ download_config = None
38
+ download_mode = None
39
+ verification_mode = None
40
+ base_path = None
41
+
42
+ self.builder.download_and_prepare(
43
+ download_config=download_config,
44
+ download_mode=download_mode,
45
+ verification_mode=verification_mode,
46
+ # try_from_hf_gcs=try_from_hf_gcs,
47
+ base_path=base_path,
48
+ )
49
+
50
+ # Build dataset for splits
51
+ dataset = self.builder.as_dataset(
52
+ split="train", verification_mode=verification_mode, in_memory=self.keep_in_memory
53
+ )
54
+ return dataset
55
+
56
+
57
+ class SqlDatasetWriter:
58
+ def __init__(
59
+ self,
60
+ dataset: Dataset,
61
+ name: str,
62
+ con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
63
+ batch_size: Optional[int] = None,
64
+ num_proc: Optional[int] = None,
65
+ **to_sql_kwargs,
66
+ ):
67
+ if num_proc is not None and num_proc <= 0:
68
+ raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
69
+
70
+ self.dataset = dataset
71
+ self.name = name
72
+ self.con = con
73
+ self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
74
+ self.num_proc = num_proc
75
+ self.to_sql_kwargs = to_sql_kwargs
76
+
77
+ def write(self) -> int:
78
+ _ = self.to_sql_kwargs.pop("sql", None)
79
+ _ = self.to_sql_kwargs.pop("con", None)
80
+ index = self.to_sql_kwargs.pop("index", False)
81
+
82
+ written = self._write(index=index, **self.to_sql_kwargs)
83
+ return written
84
+
85
+ def _batch_sql(self, args):
86
+ offset, index, to_sql_kwargs = args
87
+ to_sql_kwargs = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
88
+ batch = query_table(
89
+ table=self.dataset.data,
90
+ key=slice(offset, offset + self.batch_size),
91
+ indices=self.dataset._indices,
92
+ )
93
+ df = batch.to_pandas()
94
+ num_rows = df.to_sql(self.name, self.con, index=index, **to_sql_kwargs)
95
+ return num_rows or len(df)
96
+
97
+ def _write(self, index, **to_sql_kwargs) -> int:
98
+ """Writes the pyarrow table as SQL to a database.
99
+
100
+ Caller is responsible for opening and closing the SQL connection.
101
+ """
102
+ written = 0
103
+
104
+ if self.num_proc is None or self.num_proc == 1:
105
+ for offset in logging.tqdm(
106
+ range(0, len(self.dataset), self.batch_size),
107
+ unit="ba",
108
+ disable=not logging.is_progress_bar_enabled(),
109
+ desc="Creating SQL from Arrow format",
110
+ ):
111
+ written += self._batch_sql((offset, index, to_sql_kwargs))
112
+ else:
113
+ num_rows, batch_size = len(self.dataset), self.batch_size
114
+ with multiprocessing.Pool(self.num_proc) as pool:
115
+ for num_rows in logging.tqdm(
116
+ pool.imap(
117
+ self._batch_sql,
118
+ [(offset, index, to_sql_kwargs) for offset in range(0, num_rows, batch_size)],
119
+ ),
120
+ total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,
121
+ unit="ba",
122
+ disable=not logging.is_progress_bar_enabled(),
123
+ desc="Creating SQL from Arrow format",
124
+ ):
125
+ written += num_rows
126
+
127
+ return written
lib/python3.10/site-packages/datasets/io/text.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ from .. import Features, NamedSplit
4
+ from ..packaged_modules.text.text import Text
5
+ from ..utils.typing import NestedDataStructureLike, PathLike
6
+ from .abc import AbstractDatasetReader
7
+
8
+
9
+ class TextDatasetReader(AbstractDatasetReader):
10
+ def __init__(
11
+ self,
12
+ path_or_paths: NestedDataStructureLike[PathLike],
13
+ split: Optional[NamedSplit] = None,
14
+ features: Optional[Features] = None,
15
+ cache_dir: str = None,
16
+ keep_in_memory: bool = False,
17
+ streaming: bool = False,
18
+ num_proc: Optional[int] = None,
19
+ **kwargs,
20
+ ):
21
+ super().__init__(
22
+ path_or_paths,
23
+ split=split,
24
+ features=features,
25
+ cache_dir=cache_dir,
26
+ keep_in_memory=keep_in_memory,
27
+ streaming=streaming,
28
+ num_proc=num_proc,
29
+ **kwargs,
30
+ )
31
+ path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths}
32
+ self.builder = Text(
33
+ cache_dir=cache_dir,
34
+ data_files=path_or_paths,
35
+ features=features,
36
+ **kwargs,
37
+ )
38
+
39
+ def read(self):
40
+ # Build iterable dataset
41
+ if self.streaming:
42
+ dataset = self.builder.as_streaming_dataset(split=self.split)
43
+ # Build regular (map-style) dataset
44
+ else:
45
+ download_config = None
46
+ download_mode = None
47
+ verification_mode = None
48
+ base_path = None
49
+
50
+ self.builder.download_and_prepare(
51
+ download_config=download_config,
52
+ download_mode=download_mode,
53
+ verification_mode=verification_mode,
54
+ # try_from_hf_gcs=try_from_hf_gcs,
55
+ base_path=base_path,
56
+ num_proc=self.num_proc,
57
+ )
58
+ dataset = self.builder.as_dataset(
59
+ split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory
60
+ )
61
+ return dataset
lib/python3.10/site-packages/datasets/packaged_modules/arrow/__init__.py ADDED
File without changes
lib/python3.10/site-packages/datasets/packaged_modules/arrow/arrow.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ from dataclasses import dataclass
3
+ from typing import Optional
4
+
5
+ import pyarrow as pa
6
+
7
+ import datasets
8
+ from datasets.table import table_cast
9
+
10
+
11
+ logger = datasets.utils.logging.get_logger(__name__)
12
+
13
+
14
+ @dataclass
15
+ class ArrowConfig(datasets.BuilderConfig):
16
+ """BuilderConfig for Arrow."""
17
+
18
+ features: Optional[datasets.Features] = None
19
+
20
+
21
+ class Arrow(datasets.ArrowBasedBuilder):
22
+ BUILDER_CONFIG_CLASS = ArrowConfig
23
+
24
+ def _info(self):
25
+ return datasets.DatasetInfo(features=self.config.features)
26
+
27
+ def _split_generators(self, dl_manager):
28
+ """We handle string, list and dicts in datafiles"""
29
+ if not self.config.data_files:
30
+ raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
31
+ data_files = dl_manager.download_and_extract(self.config.data_files)
32
+ if isinstance(data_files, (str, list, tuple)):
33
+ files = data_files
34
+ if isinstance(files, str):
35
+ files = [files]
36
+ # Use `dl_manager.iter_files` to skip hidden files in an extracted archive
37
+ files = [dl_manager.iter_files(file) for file in files]
38
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
39
+ splits = []
40
+ for split_name, files in data_files.items():
41
+ if isinstance(files, str):
42
+ files = [files]
43
+ # Use `dl_manager.iter_files` to skip hidden files in an extracted archive
44
+ files = [dl_manager.iter_files(file) for file in files]
45
+ # Infer features is they are stoed in the arrow schema
46
+ if self.info.features is None:
47
+ for file in itertools.chain.from_iterable(files):
48
+ with open(file, "rb") as f:
49
+ self.info.features = datasets.Features.from_arrow_schema(pa.ipc.open_stream(f).schema)
50
+ break
51
+ splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
52
+ return splits
53
+
54
+ def _cast_table(self, pa_table: pa.Table) -> pa.Table:
55
+ if self.info.features is not None:
56
+ # more expensive cast to support nested features with keys in a different order
57
+ # allows str <-> int/float or str to Audio for example
58
+ pa_table = table_cast(pa_table, self.info.features.arrow_schema)
59
+ return pa_table
60
+
61
+ def _generate_tables(self, files):
62
+ for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
63
+ with open(file, "rb") as f:
64
+ try:
65
+ for batch_idx, record_batch in enumerate(pa.ipc.open_stream(f)):
66
+ pa_table = pa.Table.from_batches([record_batch])
67
+ # Uncomment for debugging (will print the Arrow table size and elements)
68
+ # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
69
+ # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
70
+ yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)
71
+ except ValueError as e:
72
+ logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
73
+ raise
lib/python3.10/site-packages/datasets/packaged_modules/csv/__init__.py ADDED
File without changes
lib/python3.10/site-packages/datasets/packaged_modules/csv/csv.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ from dataclasses import dataclass
3
+ from typing import Any, Callable, Dict, List, Optional, Union
4
+
5
+ import pandas as pd
6
+ import pyarrow as pa
7
+
8
+ import datasets
9
+ import datasets.config
10
+ from datasets.features.features import require_storage_cast
11
+ from datasets.table import table_cast
12
+ from datasets.utils.py_utils import Literal
13
+
14
+
15
+ logger = datasets.utils.logging.get_logger(__name__)
16
+
17
+ _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS = ["names", "prefix"]
18
+ _PANDAS_READ_CSV_DEPRECATED_PARAMETERS = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
19
+ _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS = ["encoding_errors", "on_bad_lines"]
20
+ _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS = ["date_format"]
21
+
22
+
23
+ @dataclass
24
+ class CsvConfig(datasets.BuilderConfig):
25
+ """BuilderConfig for CSV."""
26
+
27
+ sep: str = ","
28
+ delimiter: Optional[str] = None
29
+ header: Optional[Union[int, List[int], str]] = "infer"
30
+ names: Optional[List[str]] = None
31
+ column_names: Optional[List[str]] = None
32
+ index_col: Optional[Union[int, str, List[int], List[str]]] = None
33
+ usecols: Optional[Union[List[int], List[str]]] = None
34
+ prefix: Optional[str] = None
35
+ mangle_dupe_cols: bool = True
36
+ engine: Optional[Literal["c", "python", "pyarrow"]] = None
37
+ converters: Dict[Union[int, str], Callable[[Any], Any]] = None
38
+ true_values: Optional[list] = None
39
+ false_values: Optional[list] = None
40
+ skipinitialspace: bool = False
41
+ skiprows: Optional[Union[int, List[int]]] = None
42
+ nrows: Optional[int] = None
43
+ na_values: Optional[Union[str, List[str]]] = None
44
+ keep_default_na: bool = True
45
+ na_filter: bool = True
46
+ verbose: bool = False
47
+ skip_blank_lines: bool = True
48
+ thousands: Optional[str] = None
49
+ decimal: str = "."
50
+ lineterminator: Optional[str] = None
51
+ quotechar: str = '"'
52
+ quoting: int = 0
53
+ escapechar: Optional[str] = None
54
+ comment: Optional[str] = None
55
+ encoding: Optional[str] = None
56
+ dialect: Optional[str] = None
57
+ error_bad_lines: bool = True
58
+ warn_bad_lines: bool = True
59
+ skipfooter: int = 0
60
+ doublequote: bool = True
61
+ memory_map: bool = False
62
+ float_precision: Optional[str] = None
63
+ chunksize: int = 10_000
64
+ features: Optional[datasets.Features] = None
65
+ encoding_errors: Optional[str] = "strict"
66
+ on_bad_lines: Literal["error", "warn", "skip"] = "error"
67
+ date_format: Optional[str] = None
68
+
69
+ def __post_init__(self):
70
+ if self.delimiter is not None:
71
+ self.sep = self.delimiter
72
+ if self.column_names is not None:
73
+ self.names = self.column_names
74
+
75
+ @property
76
+ def pd_read_csv_kwargs(self):
77
+ pd_read_csv_kwargs = {
78
+ "sep": self.sep,
79
+ "header": self.header,
80
+ "names": self.names,
81
+ "index_col": self.index_col,
82
+ "usecols": self.usecols,
83
+ "prefix": self.prefix,
84
+ "mangle_dupe_cols": self.mangle_dupe_cols,
85
+ "engine": self.engine,
86
+ "converters": self.converters,
87
+ "true_values": self.true_values,
88
+ "false_values": self.false_values,
89
+ "skipinitialspace": self.skipinitialspace,
90
+ "skiprows": self.skiprows,
91
+ "nrows": self.nrows,
92
+ "na_values": self.na_values,
93
+ "keep_default_na": self.keep_default_na,
94
+ "na_filter": self.na_filter,
95
+ "verbose": self.verbose,
96
+ "skip_blank_lines": self.skip_blank_lines,
97
+ "thousands": self.thousands,
98
+ "decimal": self.decimal,
99
+ "lineterminator": self.lineterminator,
100
+ "quotechar": self.quotechar,
101
+ "quoting": self.quoting,
102
+ "escapechar": self.escapechar,
103
+ "comment": self.comment,
104
+ "encoding": self.encoding,
105
+ "dialect": self.dialect,
106
+ "error_bad_lines": self.error_bad_lines,
107
+ "warn_bad_lines": self.warn_bad_lines,
108
+ "skipfooter": self.skipfooter,
109
+ "doublequote": self.doublequote,
110
+ "memory_map": self.memory_map,
111
+ "float_precision": self.float_precision,
112
+ "chunksize": self.chunksize,
113
+ "encoding_errors": self.encoding_errors,
114
+ "on_bad_lines": self.on_bad_lines,
115
+ "date_format": self.date_format,
116
+ }
117
+
118
+ # some kwargs must not be passed if they don't have a default value
119
+ # some others are deprecated and we can also not pass them if they are the default value
120
+ for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
121
+ if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), pd_read_csv_parameter):
122
+ del pd_read_csv_kwargs[pd_read_csv_parameter]
123
+
124
+ # Remove 2.0 new arguments
125
+ if not (datasets.config.PANDAS_VERSION.major >= 2):
126
+ for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
127
+ del pd_read_csv_kwargs[pd_read_csv_parameter]
128
+
129
+ # Remove 1.3 new arguments
130
+ if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
131
+ for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
132
+ del pd_read_csv_kwargs[pd_read_csv_parameter]
133
+
134
+ return pd_read_csv_kwargs
135
+
136
+
137
+ class Csv(datasets.ArrowBasedBuilder):
138
+ BUILDER_CONFIG_CLASS = CsvConfig
139
+
140
+ def _info(self):
141
+ return datasets.DatasetInfo(features=self.config.features)
142
+
143
+ def _split_generators(self, dl_manager):
144
+ """We handle string, list and dicts in datafiles"""
145
+ if not self.config.data_files:
146
+ raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
147
+ data_files = dl_manager.download_and_extract(self.config.data_files)
148
+ if isinstance(data_files, (str, list, tuple)):
149
+ files = data_files
150
+ if isinstance(files, str):
151
+ files = [files]
152
+ files = [dl_manager.iter_files(file) for file in files]
153
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
154
+ splits = []
155
+ for split_name, files in data_files.items():
156
+ if isinstance(files, str):
157
+ files = [files]
158
+ files = [dl_manager.iter_files(file) for file in files]
159
+ splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
160
+ return splits
161
+
162
+ def _cast_table(self, pa_table: pa.Table) -> pa.Table:
163
+ if self.config.features is not None:
164
+ schema = self.config.features.arrow_schema
165
+ if all(not require_storage_cast(feature) for feature in self.config.features.values()):
166
+ # cheaper cast
167
+ pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema)
168
+ else:
169
+ # more expensive cast; allows str <-> int/float or str to Audio for example
170
+ pa_table = table_cast(pa_table, schema)
171
+ return pa_table
172
+
173
+ def _generate_tables(self, files):
174
+ schema = self.config.features.arrow_schema if self.config.features else None
175
+ # dtype allows reading an int column as str
176
+ dtype = (
177
+ {
178
+ name: dtype.to_pandas_dtype() if not require_storage_cast(feature) else object
179
+ for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values())
180
+ }
181
+ if schema is not None
182
+ else None
183
+ )
184
+ for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
185
+ csv_file_reader = pd.read_csv(file, iterator=True, dtype=dtype, **self.config.pd_read_csv_kwargs)
186
+ try:
187
+ for batch_idx, df in enumerate(csv_file_reader):
188
+ pa_table = pa.Table.from_pandas(df)
189
+ # Uncomment for debugging (will print the Arrow table size and elements)
190
+ # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
191
+ # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
192
+ yield (file_idx, batch_idx), self._cast_table(pa_table)
193
+ except ValueError as e:
194
+ logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
195
+ raise
lib/python3.10/site-packages/datasets/packaged_modules/generator/__init__.py ADDED
File without changes
lib/python3.10/site-packages/datasets/packaged_modules/generator/generator.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import Callable, Optional
3
+
4
+ import datasets
5
+
6
+
7
+ @dataclass
8
+ class GeneratorConfig(datasets.BuilderConfig):
9
+ generator: Optional[Callable] = None
10
+ gen_kwargs: Optional[dict] = None
11
+ features: Optional[datasets.Features] = None
12
+
13
+ def __post_init__(self):
14
+ assert self.generator is not None, "generator must be specified"
15
+
16
+ if self.gen_kwargs is None:
17
+ self.gen_kwargs = {}
18
+
19
+
20
+ class Generator(datasets.GeneratorBasedBuilder):
21
+ BUILDER_CONFIG_CLASS = GeneratorConfig
22
+
23
+ def _info(self):
24
+ return datasets.DatasetInfo(features=self.config.features)
25
+
26
+ def _split_generators(self, dl_manager):
27
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs=self.config.gen_kwargs)]
28
+
29
+ def _generate_examples(self, **gen_kwargs):
30
+ for idx, ex in enumerate(self.config.generator(**gen_kwargs)):
31
+ yield idx, ex
lib/python3.10/site-packages/datasets/packaged_modules/json/__init__.py ADDED
File without changes
lib/python3.10/site-packages/datasets/packaged_modules/json/json.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import itertools
3
+ import json
4
+ from dataclasses import dataclass
5
+ from typing import Optional
6
+
7
+ import pyarrow as pa
8
+ import pyarrow.json as paj
9
+
10
+ import datasets
11
+ from datasets.table import table_cast
12
+ from datasets.utils.file_utils import readline
13
+
14
+
15
+ logger = datasets.utils.logging.get_logger(__name__)
16
+
17
+
18
+ @dataclass
19
+ class JsonConfig(datasets.BuilderConfig):
20
+ """BuilderConfig for JSON."""
21
+
22
+ features: Optional[datasets.Features] = None
23
+ encoding: str = "utf-8"
24
+ encoding_errors: Optional[str] = None
25
+ field: Optional[str] = None
26
+ use_threads: bool = True # deprecated
27
+ block_size: Optional[int] = None # deprecated
28
+ chunksize: int = 10 << 20 # 10MB
29
+ newlines_in_values: Optional[bool] = None
30
+
31
+
32
+ class Json(datasets.ArrowBasedBuilder):
33
+ BUILDER_CONFIG_CLASS = JsonConfig
34
+
35
+ def _info(self):
36
+ if self.config.block_size is not None:
37
+ logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead")
38
+ self.config.chunksize = self.config.block_size
39
+ if self.config.use_threads is not True:
40
+ logger.warning(
41
+ "The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore."
42
+ )
43
+ if self.config.newlines_in_values is not None:
44
+ raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported")
45
+ return datasets.DatasetInfo(features=self.config.features)
46
+
47
+ def _split_generators(self, dl_manager):
48
+ """We handle string, list and dicts in datafiles"""
49
+ if not self.config.data_files:
50
+ raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
51
+ data_files = dl_manager.download_and_extract(self.config.data_files)
52
+ if isinstance(data_files, (str, list, tuple)):
53
+ files = data_files
54
+ if isinstance(files, str):
55
+ files = [files]
56
+ files = [dl_manager.iter_files(file) for file in files]
57
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
58
+ splits = []
59
+ for split_name, files in data_files.items():
60
+ if isinstance(files, str):
61
+ files = [files]
62
+ files = [dl_manager.iter_files(file) for file in files]
63
+ splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
64
+ return splits
65
+
66
+ def _cast_table(self, pa_table: pa.Table) -> pa.Table:
67
+ if self.config.features is not None:
68
+ # adding missing columns
69
+ for column_name in set(self.config.features) - set(pa_table.column_names):
70
+ type = self.config.features.arrow_schema.field(column_name).type
71
+ pa_table = pa_table.append_column(column_name, pa.array([None] * len(pa_table), type=type))
72
+ # more expensive cast to support nested structures with keys in a different order
73
+ # allows str <-> int/float or str to Audio for example
74
+ pa_table = table_cast(pa_table, self.config.features.arrow_schema)
75
+ return pa_table
76
+
77
+ def _generate_tables(self, files):
78
+ for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
79
+ # If the file is one json object and if we need to look at the list of items in one specific field
80
+ if self.config.field is not None:
81
+ with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f:
82
+ dataset = json.load(f)
83
+
84
+ # We keep only the field we are interested in
85
+ dataset = dataset[self.config.field]
86
+
87
+ # We accept two format: a list of dicts or a dict of lists
88
+ if isinstance(dataset, (list, tuple)):
89
+ keys = set().union(*[row.keys() for row in dataset])
90
+ mapping = {col: [row.get(col) for row in dataset] for col in keys}
91
+ else:
92
+ mapping = dataset
93
+ pa_table = pa.Table.from_pydict(mapping)
94
+ yield file_idx, self._cast_table(pa_table)
95
+
96
+ # If the file has one json object per line
97
+ else:
98
+ with open(file, "rb") as f:
99
+ batch_idx = 0
100
+ # Use block_size equal to the chunk size divided by 32 to leverage multithreading
101
+ # Set a default minimum value of 16kB if the chunk size is really small
102
+ block_size = max(self.config.chunksize // 32, 16 << 10)
103
+ encoding_errors = (
104
+ self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
105
+ )
106
+ while True:
107
+ batch = f.read(self.config.chunksize)
108
+ if not batch:
109
+ break
110
+ # Finish current line
111
+ try:
112
+ batch += f.readline()
113
+ except (AttributeError, io.UnsupportedOperation):
114
+ batch += readline(f)
115
+ # PyArrow only accepts utf-8 encoded bytes
116
+ if self.config.encoding != "utf-8":
117
+ batch = batch.decode(self.config.encoding, errors=encoding_errors).encode("utf-8")
118
+ try:
119
+ while True:
120
+ try:
121
+ pa_table = paj.read_json(
122
+ io.BytesIO(batch), read_options=paj.ReadOptions(block_size=block_size)
123
+ )
124
+ break
125
+ except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
126
+ if (
127
+ isinstance(e, pa.ArrowInvalid)
128
+ and "straddling" not in str(e)
129
+ or block_size > len(batch)
130
+ ):
131
+ raise
132
+ else:
133
+ # Increase the block size in case it was too small.
134
+ # The block size will be reset for the next file.
135
+ logger.debug(
136
+ f"Batch of {len(batch)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}."
137
+ )
138
+ block_size *= 2
139
+ except pa.ArrowInvalid as e:
140
+ try:
141
+ with open(
142
+ file, encoding=self.config.encoding, errors=self.config.encoding_errors
143
+ ) as f:
144
+ dataset = json.load(f)
145
+ except json.JSONDecodeError:
146
+ logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
147
+ raise e
148
+ # If possible, parse the file as a list of json objects and exit the loop
149
+ if isinstance(dataset, list): # list is the only sequence type supported in JSON
150
+ try:
151
+ keys = set().union(*[row.keys() for row in dataset])
152
+ mapping = {col: [row.get(col) for row in dataset] for col in keys}
153
+ pa_table = pa.Table.from_pydict(mapping)
154
+ except (pa.ArrowInvalid, AttributeError) as e:
155
+ logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
156
+ raise ValueError(f"Not able to read records in the JSON file at {file}.") from None
157
+ yield file_idx, self._cast_table(pa_table)
158
+ break
159
+ else:
160
+ logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
161
+ raise ValueError(
162
+ f"Not able to read records in the JSON file at {file}. "
163
+ f"You should probably indicate the field of the JSON file containing your records. "
164
+ f"This JSON file contain the following fields: {str(list(dataset.keys()))}. "
165
+ f"Select the correct one and provide it as `field='XXX'` to the dataset loading method. "
166
+ ) from None
167
+ # Uncomment for debugging (will print the Arrow table size and elements)
168
+ # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
169
+ # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
170
+ yield (file_idx, batch_idx), self._cast_table(pa_table)
171
+ batch_idx += 1
lib/python3.10/site-packages/datasets/packaged_modules/pandas/__init__.py ADDED
File without changes
lib/python3.10/site-packages/datasets/packaged_modules/parquet/__init__.py ADDED
File without changes
lib/python3.10/site-packages/datasets/packaged_modules/spark/__init__.py ADDED
File without changes
lib/python3.10/site-packages/datasets/packaged_modules/sql/__init__.py ADDED
File without changes
lib/python3.10/site-packages/datasets/packaged_modules/sql/sql.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from dataclasses import dataclass
3
+ from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
4
+
5
+ import pandas as pd
6
+ import pyarrow as pa
7
+
8
+ import datasets
9
+ import datasets.config
10
+ from datasets.features.features import require_storage_cast
11
+ from datasets.table import table_cast
12
+
13
+
14
+ if TYPE_CHECKING:
15
+ import sqlite3
16
+
17
+ import sqlalchemy
18
+
19
+
20
+ logger = datasets.utils.logging.get_logger(__name__)
21
+
22
+
23
+ @dataclass
24
+ class SqlConfig(datasets.BuilderConfig):
25
+ """BuilderConfig for SQL."""
26
+
27
+ sql: Union[str, "sqlalchemy.sql.Selectable"] = None
28
+ con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] = None
29
+ index_col: Optional[Union[str, List[str]]] = None
30
+ coerce_float: bool = True
31
+ params: Optional[Union[List, Tuple, Dict]] = None
32
+ parse_dates: Optional[Union[List, Dict]] = None
33
+ columns: Optional[List[str]] = None
34
+ chunksize: Optional[int] = 10_000
35
+ features: Optional[datasets.Features] = None
36
+
37
+ def __post_init__(self):
38
+ if self.sql is None:
39
+ raise ValueError("sql must be specified")
40
+ if self.con is None:
41
+ raise ValueError("con must be specified")
42
+
43
+ def create_config_id(
44
+ self,
45
+ config_kwargs: dict,
46
+ custom_features: Optional[datasets.Features] = None,
47
+ ) -> str:
48
+ config_kwargs = config_kwargs.copy()
49
+ # We need to stringify the Selectable object to make its hash deterministic
50
+
51
+ # The process of stringifying is explained here: http://docs.sqlalchemy.org/en/latest/faq/sqlexpressions.html
52
+ sql = config_kwargs["sql"]
53
+ if not isinstance(sql, str):
54
+ if datasets.config.SQLALCHEMY_AVAILABLE and "sqlalchemy" in sys.modules:
55
+ import sqlalchemy
56
+
57
+ if isinstance(sql, sqlalchemy.sql.Selectable):
58
+ engine = sqlalchemy.create_engine(config_kwargs["con"].split("://")[0] + "://")
59
+ sql_str = str(sql.compile(dialect=engine.dialect))
60
+ config_kwargs["sql"] = sql_str
61
+ else:
62
+ raise TypeError(
63
+ f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}"
64
+ )
65
+ else:
66
+ raise TypeError(
67
+ f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}"
68
+ )
69
+ con = config_kwargs["con"]
70
+ if not isinstance(con, str):
71
+ config_kwargs["con"] = id(con)
72
+ logger.info(
73
+ f"SQL connection 'con' of type {type(con)} couldn't be hashed properly. To enable hashing, specify 'con' as URI string instead."
74
+ )
75
+
76
+ return super().create_config_id(config_kwargs, custom_features=custom_features)
77
+
78
+ @property
79
+ def pd_read_sql_kwargs(self):
80
+ pd_read_sql_kwargs = {
81
+ "index_col": self.index_col,
82
+ "columns": self.columns,
83
+ "params": self.params,
84
+ "coerce_float": self.coerce_float,
85
+ "parse_dates": self.parse_dates,
86
+ }
87
+ return pd_read_sql_kwargs
88
+
89
+
90
+ class Sql(datasets.ArrowBasedBuilder):
91
+ BUILDER_CONFIG_CLASS = SqlConfig
92
+
93
+ def _info(self):
94
+ return datasets.DatasetInfo(features=self.config.features)
95
+
96
+ def _split_generators(self, dl_manager):
97
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={})]
98
+
99
+ def _cast_table(self, pa_table: pa.Table) -> pa.Table:
100
+ if self.config.features is not None:
101
+ schema = self.config.features.arrow_schema
102
+ if all(not require_storage_cast(feature) for feature in self.config.features.values()):
103
+ # cheaper cast
104
+ pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema)
105
+ else:
106
+ # more expensive cast; allows str <-> int/float or str to Audio for example
107
+ pa_table = table_cast(pa_table, schema)
108
+ return pa_table
109
+
110
+ def _generate_tables(self):
111
+ chunksize = self.config.chunksize
112
+ sql_reader = pd.read_sql(
113
+ self.config.sql, self.config.con, chunksize=chunksize, **self.config.pd_read_sql_kwargs
114
+ )
115
+ sql_reader = [sql_reader] if chunksize is None else sql_reader
116
+ for chunk_idx, df in enumerate(sql_reader):
117
+ pa_table = pa.Table.from_pandas(df)
118
+ yield chunk_idx, self._cast_table(pa_table)
lib/python3.10/site-packages/dlinfo-2.0.0.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ uv
lib/python3.10/site-packages/dlinfo-2.0.0.dist-info/LICENSE ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ Copyright 2019 Catalysts GmbH, a Cloudflight Company <space@catalysts.cc>
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
4
+
5
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6
+
7
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
lib/python3.10/site-packages/dlinfo-2.0.0.dist-info/METADATA ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: dlinfo
3
+ Version: 2.0.0
4
+ Summary: Python wrapper for libc's dlinfo and dyld_find on Mac
5
+ Home-page: https://github.com/fphammerle/python-dlinfo
6
+ Maintainer: Fabian Peter Hammerle
7
+ Maintainer-email: fabian.dlinfo@hammerle.me
8
+ License: MIT
9
+ Classifier: Development Status :: 5 - Production/Stable
10
+ Classifier: Intended Audience :: Developers
11
+ Classifier: Programming Language :: Python :: 3.9
12
+ Classifier: Programming Language :: Python :: 3.10
13
+ Classifier: Programming Language :: Python :: 3.11
14
+ Classifier: Programming Language :: Python :: 3.12
15
+ Classifier: Operating System :: MacOS :: MacOS X
16
+ Classifier: Operating System :: POSIX :: Linux
17
+ Requires-Python: >=3.9
18
+ License-File: LICENSE
19
+
20
+ python-dlinfo
21
+ =============
22
+
23
+ Python wrapper for libc's dlinfo
24
+
25
+ Install
26
+ -------
27
+
28
+ .. code:: sh
29
+
30
+ pip install dlinfo
31
+ # or
32
+ pipenv install dlinfo
33
+
34
+ Usage
35
+ -----
36
+
37
+ .. code:: python
38
+
39
+ >>> from dlinfo import DLInfo
40
+ >>> lib = ctypes.cdll.LoadLibrary(ctypes.util.find_library('c'))
41
+ >>> dlinfo = DLInfo(lib)
42
+ >>> dlinfo.path
43
+ '/lib/x86_64-linux-gnu/libc.so.6'
lib/python3.10/site-packages/dlinfo-2.0.0.dist-info/RECORD ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ dlinfo-2.0.0.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2
2
+ dlinfo-2.0.0.dist-info/LICENSE,sha256=H9hqUp3q1-1XMLNmCAu9EGZdWIkT_S2c5Q6zoWEJX7k,1098
3
+ dlinfo-2.0.0.dist-info/METADATA,sha256=OAw2-t1rNPAdqmpKtRN5zSVjtH-f-_QPfHALsKaN8Tg,1070
4
+ dlinfo-2.0.0.dist-info/RECORD,,
5
+ dlinfo-2.0.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ dlinfo-2.0.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
7
+ dlinfo-2.0.0.dist-info/top_level.txt,sha256=JZ0mAh2JRPwKUx0iZgbGA9bPo3zUXdZ7JLWl4TAUxiE,7
8
+ dlinfo/__init__.py,sha256=muJlk1jFQBz5e70hI5N8ntlaKybPu4890HoRu1MKZ_M,192
9
+ dlinfo/_glibc.py,sha256=PYEb2ZLL29xAodLRHaCcu2Bg3F7lLMyxI1Fqw93dKqo,1278
10
+ dlinfo/_macosx.py,sha256=h8lOieLm7-hoIYhzipyFHuj52JGewYVAvGafwcYoAWI,523
lib/python3.10/site-packages/dlinfo-2.0.0.dist-info/REQUESTED ADDED
File without changes
lib/python3.10/site-packages/dlinfo-2.0.0.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.38.4)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
lib/python3.10/site-packages/dlinfo-2.0.0.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ dlinfo
lib/python3.10/site-packages/importlib_resources/__init__.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Read resources contained within a package.
3
+
4
+ This codebase is shared between importlib.resources in the stdlib
5
+ and importlib_resources in PyPI. See
6
+ https://github.com/python/importlib_metadata/wiki/Development-Methodology
7
+ for more detail.
8
+ """
9
+
10
+ from ._common import (
11
+ Anchor,
12
+ Package,
13
+ as_file,
14
+ files,
15
+ )
16
+ from ._functional import (
17
+ contents,
18
+ is_resource,
19
+ open_binary,
20
+ open_text,
21
+ path,
22
+ read_binary,
23
+ read_text,
24
+ )
25
+ from .abc import ResourceReader
26
+
27
+ __all__ = [
28
+ 'Package',
29
+ 'Anchor',
30
+ 'ResourceReader',
31
+ 'as_file',
32
+ 'files',
33
+ 'contents',
34
+ 'is_resource',
35
+ 'open_binary',
36
+ 'open_text',
37
+ 'path',
38
+ 'read_binary',
39
+ 'read_text',
40
+ ]
lib/python3.10/site-packages/importlib_resources/_adapters.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from contextlib import suppress
2
+ from io import TextIOWrapper
3
+
4
+ from . import abc
5
+
6
+
7
+ class SpecLoaderAdapter:
8
+ """
9
+ Adapt a package spec to adapt the underlying loader.
10
+ """
11
+
12
+ def __init__(self, spec, adapter=lambda spec: spec.loader):
13
+ self.spec = spec
14
+ self.loader = adapter(spec)
15
+
16
+ def __getattr__(self, name):
17
+ return getattr(self.spec, name)
18
+
19
+
20
+ class TraversableResourcesLoader:
21
+ """
22
+ Adapt a loader to provide TraversableResources.
23
+ """
24
+
25
+ def __init__(self, spec):
26
+ self.spec = spec
27
+
28
+ def get_resource_reader(self, name):
29
+ return CompatibilityFiles(self.spec)._native()
30
+
31
+
32
+ def _io_wrapper(file, mode='r', *args, **kwargs):
33
+ if mode == 'r':
34
+ return TextIOWrapper(file, *args, **kwargs)
35
+ elif mode == 'rb':
36
+ return file
37
+ raise ValueError(f"Invalid mode value '{mode}', only 'r' and 'rb' are supported")
38
+
39
+
40
+ class CompatibilityFiles:
41
+ """
42
+ Adapter for an existing or non-existent resource reader
43
+ to provide a compatibility .files().
44
+ """
45
+
46
+ class SpecPath(abc.Traversable):
47
+ """
48
+ Path tied to a module spec.
49
+ Can be read and exposes the resource reader children.
50
+ """
51
+
52
+ def __init__(self, spec, reader):
53
+ self._spec = spec
54
+ self._reader = reader
55
+
56
+ def iterdir(self):
57
+ if not self._reader:
58
+ return iter(())
59
+ return iter(
60
+ CompatibilityFiles.ChildPath(self._reader, path)
61
+ for path in self._reader.contents()
62
+ )
63
+
64
+ def is_file(self):
65
+ return False
66
+
67
+ is_dir = is_file
68
+
69
+ def joinpath(self, other):
70
+ if not self._reader:
71
+ return CompatibilityFiles.OrphanPath(other)
72
+ return CompatibilityFiles.ChildPath(self._reader, other)
73
+
74
+ @property
75
+ def name(self):
76
+ return self._spec.name
77
+
78
+ def open(self, mode='r', *args, **kwargs):
79
+ return _io_wrapper(self._reader.open_resource(None), mode, *args, **kwargs)
80
+
81
+ class ChildPath(abc.Traversable):
82
+ """
83
+ Path tied to a resource reader child.
84
+ Can be read but doesn't expose any meaningful children.
85
+ """
86
+
87
+ def __init__(self, reader, name):
88
+ self._reader = reader
89
+ self._name = name
90
+
91
+ def iterdir(self):
92
+ return iter(())
93
+
94
+ def is_file(self):
95
+ return self._reader.is_resource(self.name)
96
+
97
+ def is_dir(self):
98
+ return not self.is_file()
99
+
100
+ def joinpath(self, other):
101
+ return CompatibilityFiles.OrphanPath(self.name, other)
102
+
103
+ @property
104
+ def name(self):
105
+ return self._name
106
+
107
+ def open(self, mode='r', *args, **kwargs):
108
+ return _io_wrapper(
109
+ self._reader.open_resource(self.name), mode, *args, **kwargs
110
+ )
111
+
112
+ class OrphanPath(abc.Traversable):
113
+ """
114
+ Orphan path, not tied to a module spec or resource reader.
115
+ Can't be read and doesn't expose any meaningful children.
116
+ """
117
+
118
+ def __init__(self, *path_parts):
119
+ if len(path_parts) < 1:
120
+ raise ValueError('Need at least one path part to construct a path')
121
+ self._path = path_parts
122
+
123
+ def iterdir(self):
124
+ return iter(())
125
+
126
+ def is_file(self):
127
+ return False
128
+
129
+ is_dir = is_file
130
+
131
+ def joinpath(self, other):
132
+ return CompatibilityFiles.OrphanPath(*self._path, other)
133
+
134
+ @property
135
+ def name(self):
136
+ return self._path[-1]
137
+
138
+ def open(self, mode='r', *args, **kwargs):
139
+ raise FileNotFoundError("Can't open orphan path")
140
+
141
+ def __init__(self, spec):
142
+ self.spec = spec
143
+
144
+ @property
145
+ def _reader(self):
146
+ with suppress(AttributeError):
147
+ return self.spec.loader.get_resource_reader(self.spec.name)
148
+
149
+ def _native(self):
150
+ """
151
+ Return the native reader if it supports files().
152
+ """
153
+ reader = self._reader
154
+ return reader if hasattr(reader, 'files') else self
155
+
156
+ def __getattr__(self, attr):
157
+ return getattr(self._reader, attr)
158
+
159
+ def files(self):
160
+ return CompatibilityFiles.SpecPath(self.spec, self._reader)
161
+
162
+
163
+ def wrap_spec(package):
164
+ """
165
+ Construct a package spec with traversable compatibility
166
+ on the spec/loader/reader.
167
+ """
168
+ return SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)
lib/python3.10/site-packages/importlib_resources/_common.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import functools
3
+ import importlib
4
+ import inspect
5
+ import itertools
6
+ import os
7
+ import pathlib
8
+ import tempfile
9
+ import types
10
+ import warnings
11
+ from typing import Optional, Union, cast
12
+
13
+ from .abc import ResourceReader, Traversable
14
+
15
+ Package = Union[types.ModuleType, str]
16
+ Anchor = Package
17
+
18
+
19
+ def package_to_anchor(func):
20
+ """
21
+ Replace 'package' parameter as 'anchor' and warn about the change.
22
+
23
+ Other errors should fall through.
24
+
25
+ >>> files('a', 'b')
26
+ Traceback (most recent call last):
27
+ TypeError: files() takes from 0 to 1 positional arguments but 2 were given
28
+
29
+ Remove this compatibility in Python 3.14.
30
+ """
31
+ undefined = object()
32
+
33
+ @functools.wraps(func)
34
+ def wrapper(anchor=undefined, package=undefined):
35
+ if package is not undefined:
36
+ if anchor is not undefined:
37
+ return func(anchor, package)
38
+ warnings.warn(
39
+ "First parameter to files is renamed to 'anchor'",
40
+ DeprecationWarning,
41
+ stacklevel=2,
42
+ )
43
+ return func(package)
44
+ elif anchor is undefined:
45
+ return func()
46
+ return func(anchor)
47
+
48
+ return wrapper
49
+
50
+
51
+ @package_to_anchor
52
+ def files(anchor: Optional[Anchor] = None) -> Traversable:
53
+ """
54
+ Get a Traversable resource for an anchor.
55
+ """
56
+ return from_package(resolve(anchor))
57
+
58
+
59
+ def get_resource_reader(package: types.ModuleType) -> Optional[ResourceReader]:
60
+ """
61
+ Return the package's loader if it's a ResourceReader.
62
+ """
63
+ # We can't use
64
+ # a issubclass() check here because apparently abc.'s __subclasscheck__()
65
+ # hook wants to create a weak reference to the object, but
66
+ # zipimport.zipimporter does not support weak references, resulting in a
67
+ # TypeError. That seems terrible.
68
+ spec = package.__spec__
69
+ reader = getattr(spec.loader, 'get_resource_reader', None) # type: ignore[union-attr]
70
+ if reader is None:
71
+ return None
72
+ return reader(spec.name) # type: ignore[union-attr]
73
+
74
+
75
+ @functools.singledispatch
76
+ def resolve(cand: Optional[Anchor]) -> types.ModuleType:
77
+ return cast(types.ModuleType, cand)
78
+
79
+
80
+ @resolve.register
81
+ def _(cand: str) -> types.ModuleType:
82
+ return importlib.import_module(cand)
83
+
84
+
85
+ @resolve.register
86
+ def _(cand: None) -> types.ModuleType:
87
+ return resolve(_infer_caller().f_globals['__name__'])
88
+
89
+
90
+ def _infer_caller():
91
+ """
92
+ Walk the stack and find the frame of the first caller not in this module.
93
+ """
94
+
95
+ def is_this_file(frame_info):
96
+ return frame_info.filename == stack[0].filename
97
+
98
+ def is_wrapper(frame_info):
99
+ return frame_info.function == 'wrapper'
100
+
101
+ stack = inspect.stack()
102
+ not_this_file = itertools.filterfalse(is_this_file, stack)
103
+ # also exclude 'wrapper' due to singledispatch in the call stack
104
+ callers = itertools.filterfalse(is_wrapper, not_this_file)
105
+ return next(callers).frame
106
+
107
+
108
+ def from_package(package: types.ModuleType):
109
+ """
110
+ Return a Traversable object for the given package.
111
+
112
+ """
113
+ # deferred for performance (python/cpython#109829)
114
+ from .future.adapters import wrap_spec
115
+
116
+ spec = wrap_spec(package)
117
+ reader = spec.loader.get_resource_reader(spec.name)
118
+ return reader.files()
119
+
120
+
121
+ @contextlib.contextmanager
122
+ def _tempfile(
123
+ reader,
124
+ suffix='',
125
+ # gh-93353: Keep a reference to call os.remove() in late Python
126
+ # finalization.
127
+ *,
128
+ _os_remove=os.remove,
129
+ ):
130
+ # Not using tempfile.NamedTemporaryFile as it leads to deeper 'try'
131
+ # blocks due to the need to close the temporary file to work on Windows
132
+ # properly.
133
+ fd, raw_path = tempfile.mkstemp(suffix=suffix)
134
+ try:
135
+ try:
136
+ os.write(fd, reader())
137
+ finally:
138
+ os.close(fd)
139
+ del reader
140
+ yield pathlib.Path(raw_path)
141
+ finally:
142
+ try:
143
+ _os_remove(raw_path)
144
+ except FileNotFoundError:
145
+ pass
146
+
147
+
148
+ def _temp_file(path):
149
+ return _tempfile(path.read_bytes, suffix=path.name)
150
+
151
+
152
+ def _is_present_dir(path: Traversable) -> bool:
153
+ """
154
+ Some Traversables implement ``is_dir()`` to raise an
155
+ exception (i.e. ``FileNotFoundError``) when the
156
+ directory doesn't exist. This function wraps that call
157
+ to always return a boolean and only return True
158
+ if there's a dir and it exists.
159
+ """
160
+ with contextlib.suppress(FileNotFoundError):
161
+ return path.is_dir()
162
+ return False
163
+
164
+
165
+ @functools.singledispatch
166
+ def as_file(path):
167
+ """
168
+ Given a Traversable object, return that object as a
169
+ path on the local file system in a context manager.
170
+ """
171
+ return _temp_dir(path) if _is_present_dir(path) else _temp_file(path)
172
+
173
+
174
+ @as_file.register(pathlib.Path)
175
+ @contextlib.contextmanager
176
+ def _(path):
177
+ """
178
+ Degenerate behavior for pathlib.Path objects.
179
+ """
180
+ yield path
181
+
182
+
183
+ @contextlib.contextmanager
184
+ def _temp_path(dir: tempfile.TemporaryDirectory):
185
+ """
186
+ Wrap tempfile.TemporaryDirectory to return a pathlib object.
187
+ """
188
+ with dir as result:
189
+ yield pathlib.Path(result)
190
+
191
+
192
+ @contextlib.contextmanager
193
+ def _temp_dir(path):
194
+ """
195
+ Given a traversable dir, recursively replicate the whole tree
196
+ to the file system in a context manager.
197
+ """
198
+ assert path.is_dir()
199
+ with _temp_path(tempfile.TemporaryDirectory()) as temp_dir:
200
+ yield _write_contents(temp_dir, path)
201
+
202
+
203
+ def _write_contents(target, source):
204
+ child = target.joinpath(source.name)
205
+ if source.is_dir():
206
+ child.mkdir()
207
+ for item in source.iterdir():
208
+ _write_contents(child, item)
209
+ else:
210
+ child.write_bytes(source.read_bytes())
211
+ return child
lib/python3.10/site-packages/importlib_resources/_functional.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Simplified function-based API for importlib.resources"""
2
+
3
+ import warnings
4
+
5
+ from ._common import as_file, files
6
+ from .abc import TraversalError
7
+
8
+ _MISSING = object()
9
+
10
+
11
+ def open_binary(anchor, *path_names):
12
+ """Open for binary reading the *resource* within *package*."""
13
+ return _get_resource(anchor, path_names).open('rb')
14
+
15
+
16
+ def open_text(anchor, *path_names, encoding=_MISSING, errors='strict'):
17
+ """Open for text reading the *resource* within *package*."""
18
+ encoding = _get_encoding_arg(path_names, encoding)
19
+ resource = _get_resource(anchor, path_names)
20
+ return resource.open('r', encoding=encoding, errors=errors)
21
+
22
+
23
+ def read_binary(anchor, *path_names):
24
+ """Read and return contents of *resource* within *package* as bytes."""
25
+ return _get_resource(anchor, path_names).read_bytes()
26
+
27
+
28
+ def read_text(anchor, *path_names, encoding=_MISSING, errors='strict'):
29
+ """Read and return contents of *resource* within *package* as str."""
30
+ encoding = _get_encoding_arg(path_names, encoding)
31
+ resource = _get_resource(anchor, path_names)
32
+ return resource.read_text(encoding=encoding, errors=errors)
33
+
34
+
35
+ def path(anchor, *path_names):
36
+ """Return the path to the *resource* as an actual file system path."""
37
+ return as_file(_get_resource(anchor, path_names))
38
+
39
+
40
+ def is_resource(anchor, *path_names):
41
+ """Return ``True`` if there is a resource named *name* in the package,
42
+
43
+ Otherwise returns ``False``.
44
+ """
45
+ try:
46
+ return _get_resource(anchor, path_names).is_file()
47
+ except TraversalError:
48
+ return False
49
+
50
+
51
+ def contents(anchor, *path_names):
52
+ """Return an iterable over the named resources within the package.
53
+
54
+ The iterable returns :class:`str` resources (e.g. files).
55
+ The iterable does not recurse into subdirectories.
56
+ """
57
+ warnings.warn(
58
+ "importlib.resources.contents is deprecated. "
59
+ "Use files(anchor).iterdir() instead.",
60
+ DeprecationWarning,
61
+ stacklevel=1,
62
+ )
63
+ return (resource.name for resource in _get_resource(anchor, path_names).iterdir())
64
+
65
+
66
+ def _get_encoding_arg(path_names, encoding):
67
+ # For compatibility with versions where *encoding* was a positional
68
+ # argument, it needs to be given explicitly when there are multiple
69
+ # *path_names*.
70
+ # This limitation can be removed in Python 3.15.
71
+ if encoding is _MISSING:
72
+ if len(path_names) > 1:
73
+ raise TypeError(
74
+ "'encoding' argument required with multiple path names",
75
+ )
76
+ else:
77
+ return 'utf-8'
78
+ return encoding
79
+
80
+
81
+ def _get_resource(anchor, path_names):
82
+ if anchor is None:
83
+ raise TypeError("anchor must be module or string, got None")
84
+ return files(anchor).joinpath(*path_names)
lib/python3.10/site-packages/importlib_resources/_itertools.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # from more_itertools 9.0
2
+ def only(iterable, default=None, too_long=None):
3
+ """If *iterable* has only one item, return it.
4
+ If it has zero items, return *default*.
5
+ If it has more than one item, raise the exception given by *too_long*,
6
+ which is ``ValueError`` by default.
7
+ >>> only([], default='missing')
8
+ 'missing'
9
+ >>> only([1])
10
+ 1
11
+ >>> only([1, 2]) # doctest: +IGNORE_EXCEPTION_DETAIL
12
+ Traceback (most recent call last):
13
+ ...
14
+ ValueError: Expected exactly one item in iterable, but got 1, 2,
15
+ and perhaps more.'
16
+ >>> only([1, 2], too_long=TypeError) # doctest: +IGNORE_EXCEPTION_DETAIL
17
+ Traceback (most recent call last):
18
+ ...
19
+ TypeError
20
+ Note that :func:`only` attempts to advance *iterable* twice to ensure there
21
+ is only one item. See :func:`spy` or :func:`peekable` to check
22
+ iterable contents less destructively.
23
+ """
24
+ it = iter(iterable)
25
+ first_value = next(it, default)
26
+
27
+ try:
28
+ second_value = next(it)
29
+ except StopIteration:
30
+ pass
31
+ else:
32
+ msg = (
33
+ 'Expected exactly one item in iterable, but got {!r}, {!r}, '
34
+ 'and perhaps more.'.format(first_value, second_value)
35
+ )
36
+ raise too_long or ValueError(msg)
37
+
38
+ return first_value
lib/python3.10/site-packages/importlib_resources/abc.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ import itertools
3
+ import os
4
+ import pathlib
5
+ from typing import (
6
+ Any,
7
+ BinaryIO,
8
+ Iterable,
9
+ Iterator,
10
+ NoReturn,
11
+ Literal,
12
+ Optional,
13
+ Protocol,
14
+ Text,
15
+ TextIO,
16
+ Union,
17
+ overload,
18
+ runtime_checkable,
19
+ )
20
+
21
+ StrPath = Union[str, os.PathLike[str]]
22
+
23
+ __all__ = ["ResourceReader", "Traversable", "TraversableResources"]
24
+
25
+
26
+ class ResourceReader(metaclass=abc.ABCMeta):
27
+ """Abstract base class for loaders to provide resource reading support."""
28
+
29
+ @abc.abstractmethod
30
+ def open_resource(self, resource: Text) -> BinaryIO:
31
+ """Return an opened, file-like object for binary reading.
32
+
33
+ The 'resource' argument is expected to represent only a file name.
34
+ If the resource cannot be found, FileNotFoundError is raised.
35
+ """
36
+ # This deliberately raises FileNotFoundError instead of
37
+ # NotImplementedError so that if this method is accidentally called,
38
+ # it'll still do the right thing.
39
+ raise FileNotFoundError
40
+
41
+ @abc.abstractmethod
42
+ def resource_path(self, resource: Text) -> Text:
43
+ """Return the file system path to the specified resource.
44
+
45
+ The 'resource' argument is expected to represent only a file name.
46
+ If the resource does not exist on the file system, raise
47
+ FileNotFoundError.
48
+ """
49
+ # This deliberately raises FileNotFoundError instead of
50
+ # NotImplementedError so that if this method is accidentally called,
51
+ # it'll still do the right thing.
52
+ raise FileNotFoundError
53
+
54
+ @abc.abstractmethod
55
+ def is_resource(self, path: Text) -> bool:
56
+ """Return True if the named 'path' is a resource.
57
+
58
+ Files are resources, directories are not.
59
+ """
60
+ raise FileNotFoundError
61
+
62
+ @abc.abstractmethod
63
+ def contents(self) -> Iterable[str]:
64
+ """Return an iterable of entries in `package`."""
65
+ raise FileNotFoundError
66
+
67
+
68
+ class TraversalError(Exception):
69
+ pass
70
+
71
+
72
+ @runtime_checkable
73
+ class Traversable(Protocol):
74
+ """
75
+ An object with a subset of pathlib.Path methods suitable for
76
+ traversing directories and opening files.
77
+
78
+ Any exceptions that occur when accessing the backing resource
79
+ may propagate unaltered.
80
+ """
81
+
82
+ @abc.abstractmethod
83
+ def iterdir(self) -> Iterator["Traversable"]:
84
+ """
85
+ Yield Traversable objects in self
86
+ """
87
+
88
+ def read_bytes(self) -> bytes:
89
+ """
90
+ Read contents of self as bytes
91
+ """
92
+ with self.open('rb') as strm:
93
+ return strm.read()
94
+
95
+ def read_text(
96
+ self, encoding: Optional[str] = None, errors: Optional[str] = None
97
+ ) -> str:
98
+ """
99
+ Read contents of self as text
100
+ """
101
+ with self.open(encoding=encoding, errors=errors) as strm:
102
+ return strm.read()
103
+
104
+ @abc.abstractmethod
105
+ def is_dir(self) -> bool:
106
+ """
107
+ Return True if self is a directory
108
+ """
109
+
110
+ @abc.abstractmethod
111
+ def is_file(self) -> bool:
112
+ """
113
+ Return True if self is a file
114
+ """
115
+
116
+ def joinpath(self, *descendants: StrPath) -> "Traversable":
117
+ """
118
+ Return Traversable resolved with any descendants applied.
119
+
120
+ Each descendant should be a path segment relative to self
121
+ and each may contain multiple levels separated by
122
+ ``posixpath.sep`` (``/``).
123
+ """
124
+ if not descendants:
125
+ return self
126
+ names = itertools.chain.from_iterable(
127
+ path.parts for path in map(pathlib.PurePosixPath, descendants)
128
+ )
129
+ target = next(names)
130
+ matches = (
131
+ traversable for traversable in self.iterdir() if traversable.name == target
132
+ )
133
+ try:
134
+ match = next(matches)
135
+ except StopIteration:
136
+ raise TraversalError(
137
+ "Target not found during traversal.", target, list(names)
138
+ )
139
+ return match.joinpath(*names)
140
+
141
+ def __truediv__(self, child: StrPath) -> "Traversable":
142
+ """
143
+ Return Traversable child in self
144
+ """
145
+ return self.joinpath(child)
146
+
147
+ @overload
148
+ def open(self, mode: Literal['r'] = 'r', *args: Any, **kwargs: Any) -> TextIO: ...
149
+
150
+ @overload
151
+ def open(self, mode: Literal['rb'], *args: Any, **kwargs: Any) -> BinaryIO: ...
152
+
153
+ @abc.abstractmethod
154
+ def open(
155
+ self, mode: str = 'r', *args: Any, **kwargs: Any
156
+ ) -> Union[TextIO, BinaryIO]:
157
+ """
158
+ mode may be 'r' or 'rb' to open as text or binary. Return a handle
159
+ suitable for reading (same as pathlib.Path.open).
160
+
161
+ When opening as text, accepts encoding parameters such as those
162
+ accepted by io.TextIOWrapper.
163
+ """
164
+
165
+ @property
166
+ @abc.abstractmethod
167
+ def name(self) -> str:
168
+ """
169
+ The base name of this object without any parent references.
170
+ """
171
+
172
+
173
+ class TraversableResources(ResourceReader):
174
+ """
175
+ The required interface for providing traversable
176
+ resources.
177
+ """
178
+
179
+ @abc.abstractmethod
180
+ def files(self) -> "Traversable":
181
+ """Return a Traversable object for the loaded package."""
182
+
183
+ def open_resource(self, resource: StrPath) -> BinaryIO:
184
+ return self.files().joinpath(resource).open('rb')
185
+
186
+ def resource_path(self, resource: Any) -> NoReturn:
187
+ raise FileNotFoundError(resource)
188
+
189
+ def is_resource(self, path: StrPath) -> bool:
190
+ return self.files().joinpath(path).is_file()
191
+
192
+ def contents(self) -> Iterator[str]:
193
+ return (item.name for item in self.files().iterdir())
lib/python3.10/site-packages/importlib_resources/py.typed ADDED
File without changes
lib/python3.10/site-packages/importlib_resources/readers.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import collections
4
+ import contextlib
5
+ import itertools
6
+ import operator
7
+ import pathlib
8
+ import re
9
+ import warnings
10
+ from collections.abc import Iterator
11
+
12
+ from . import abc
13
+ from ._itertools import only
14
+ from .compat.py39 import ZipPath
15
+
16
+
17
+ def remove_duplicates(items):
18
+ return iter(collections.OrderedDict.fromkeys(items))
19
+
20
+
21
+ class FileReader(abc.TraversableResources):
22
+ def __init__(self, loader):
23
+ self.path = pathlib.Path(loader.path).parent
24
+
25
+ def resource_path(self, resource):
26
+ """
27
+ Return the file system path to prevent
28
+ `resources.path()` from creating a temporary
29
+ copy.
30
+ """
31
+ return str(self.path.joinpath(resource))
32
+
33
+ def files(self):
34
+ return self.path
35
+
36
+
37
+ class ZipReader(abc.TraversableResources):
38
+ def __init__(self, loader, module):
39
+ self.prefix = loader.prefix.replace('\\', '/')
40
+ if loader.is_package(module):
41
+ _, _, name = module.rpartition('.')
42
+ self.prefix += name + '/'
43
+ self.archive = loader.archive
44
+
45
+ def open_resource(self, resource):
46
+ try:
47
+ return super().open_resource(resource)
48
+ except KeyError as exc:
49
+ raise FileNotFoundError(exc.args[0])
50
+
51
+ def is_resource(self, path):
52
+ """
53
+ Workaround for `zipfile.Path.is_file` returning true
54
+ for non-existent paths.
55
+ """
56
+ target = self.files().joinpath(path)
57
+ return target.is_file() and target.exists()
58
+
59
+ def files(self):
60
+ return ZipPath(self.archive, self.prefix)
61
+
62
+
63
+ class MultiplexedPath(abc.Traversable):
64
+ """
65
+ Given a series of Traversable objects, implement a merged
66
+ version of the interface across all objects. Useful for
67
+ namespace packages which may be multihomed at a single
68
+ name.
69
+ """
70
+
71
+ def __init__(self, *paths):
72
+ self._paths = list(map(_ensure_traversable, remove_duplicates(paths)))
73
+ if not self._paths:
74
+ message = 'MultiplexedPath must contain at least one path'
75
+ raise FileNotFoundError(message)
76
+ if not all(path.is_dir() for path in self._paths):
77
+ raise NotADirectoryError('MultiplexedPath only supports directories')
78
+
79
+ def iterdir(self):
80
+ children = (child for path in self._paths for child in path.iterdir())
81
+ by_name = operator.attrgetter('name')
82
+ groups = itertools.groupby(sorted(children, key=by_name), key=by_name)
83
+ return map(self._follow, (locs for name, locs in groups))
84
+
85
+ def read_bytes(self):
86
+ raise FileNotFoundError(f'{self} is not a file')
87
+
88
+ def read_text(self, *args, **kwargs):
89
+ raise FileNotFoundError(f'{self} is not a file')
90
+
91
+ def is_dir(self):
92
+ return True
93
+
94
+ def is_file(self):
95
+ return False
96
+
97
+ def joinpath(self, *descendants):
98
+ try:
99
+ return super().joinpath(*descendants)
100
+ except abc.TraversalError:
101
+ # One of the paths did not resolve (a directory does not exist).
102
+ # Just return something that will not exist.
103
+ return self._paths[0].joinpath(*descendants)
104
+
105
+ @classmethod
106
+ def _follow(cls, children):
107
+ """
108
+ Construct a MultiplexedPath if needed.
109
+
110
+ If children contains a sole element, return it.
111
+ Otherwise, return a MultiplexedPath of the items.
112
+ Unless one of the items is not a Directory, then return the first.
113
+ """
114
+ subdirs, one_dir, one_file = itertools.tee(children, 3)
115
+
116
+ try:
117
+ return only(one_dir)
118
+ except ValueError:
119
+ try:
120
+ return cls(*subdirs)
121
+ except NotADirectoryError:
122
+ return next(one_file)
123
+
124
+ def open(self, *args, **kwargs):
125
+ raise FileNotFoundError(f'{self} is not a file')
126
+
127
+ @property
128
+ def name(self):
129
+ return self._paths[0].name
130
+
131
+ def __repr__(self):
132
+ paths = ', '.join(f"'{path}'" for path in self._paths)
133
+ return f'MultiplexedPath({paths})'
134
+
135
+
136
+ class NamespaceReader(abc.TraversableResources):
137
+ def __init__(self, namespace_path):
138
+ if 'NamespacePath' not in str(namespace_path):
139
+ raise ValueError('Invalid path')
140
+ self.path = MultiplexedPath(*filter(bool, map(self._resolve, namespace_path)))
141
+
142
+ @classmethod
143
+ def _resolve(cls, path_str) -> abc.Traversable | None:
144
+ r"""
145
+ Given an item from a namespace path, resolve it to a Traversable.
146
+
147
+ path_str might be a directory on the filesystem or a path to a
148
+ zipfile plus the path within the zipfile, e.g. ``/foo/bar`` or
149
+ ``/foo/baz.zip/inner_dir`` or ``foo\baz.zip\inner_dir\sub``.
150
+
151
+ path_str might also be a sentinel used by editable packages to
152
+ trigger other behaviors (see python/importlib_resources#311).
153
+ In that case, return None.
154
+ """
155
+ dirs = (cand for cand in cls._candidate_paths(path_str) if cand.is_dir())
156
+ return next(dirs, None)
157
+
158
+ @classmethod
159
+ def _candidate_paths(cls, path_str: str) -> Iterator[abc.Traversable]:
160
+ yield pathlib.Path(path_str)
161
+ yield from cls._resolve_zip_path(path_str)
162
+
163
+ @staticmethod
164
+ def _resolve_zip_path(path_str: str):
165
+ for match in reversed(list(re.finditer(r'[\\/]', path_str))):
166
+ with contextlib.suppress(
167
+ FileNotFoundError,
168
+ IsADirectoryError,
169
+ NotADirectoryError,
170
+ PermissionError,
171
+ ):
172
+ inner = path_str[match.end() :].replace('\\', '/') + '/'
173
+ yield ZipPath(path_str[: match.start()], inner.lstrip('/'))
174
+
175
+ def resource_path(self, resource):
176
+ """
177
+ Return the file system path to prevent
178
+ `resources.path()` from creating a temporary
179
+ copy.
180
+ """
181
+ return str(self.path.joinpath(resource))
182
+
183
+ def files(self):
184
+ return self.path
185
+
186
+
187
+ def _ensure_traversable(path):
188
+ """
189
+ Convert deprecated string arguments to traversables (pathlib.Path).
190
+
191
+ Remove with Python 3.15.
192
+ """
193
+ if not isinstance(path, str):
194
+ return path
195
+
196
+ warnings.warn(
197
+ "String arguments are deprecated. Pass a Traversable instead.",
198
+ DeprecationWarning,
199
+ stacklevel=3,
200
+ )
201
+
202
+ return pathlib.Path(path)
lib/python3.10/site-packages/importlib_resources/simple.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Interface adapters for low-level readers.
3
+ """
4
+
5
+ import abc
6
+ import io
7
+ import itertools
8
+ from typing import BinaryIO, List
9
+
10
+ from .abc import Traversable, TraversableResources
11
+
12
+
13
+ class SimpleReader(abc.ABC):
14
+ """
15
+ The minimum, low-level interface required from a resource
16
+ provider.
17
+ """
18
+
19
+ @property
20
+ @abc.abstractmethod
21
+ def package(self) -> str:
22
+ """
23
+ The name of the package for which this reader loads resources.
24
+ """
25
+
26
+ @abc.abstractmethod
27
+ def children(self) -> List['SimpleReader']:
28
+ """
29
+ Obtain an iterable of SimpleReader for available
30
+ child containers (e.g. directories).
31
+ """
32
+
33
+ @abc.abstractmethod
34
+ def resources(self) -> List[str]:
35
+ """
36
+ Obtain available named resources for this virtual package.
37
+ """
38
+
39
+ @abc.abstractmethod
40
+ def open_binary(self, resource: str) -> BinaryIO:
41
+ """
42
+ Obtain a File-like for a named resource.
43
+ """
44
+
45
+ @property
46
+ def name(self):
47
+ return self.package.split('.')[-1]
48
+
49
+
50
+ class ResourceContainer(Traversable):
51
+ """
52
+ Traversable container for a package's resources via its reader.
53
+ """
54
+
55
+ def __init__(self, reader: SimpleReader):
56
+ self.reader = reader
57
+
58
+ def is_dir(self):
59
+ return True
60
+
61
+ def is_file(self):
62
+ return False
63
+
64
+ def iterdir(self):
65
+ files = (ResourceHandle(self, name) for name in self.reader.resources)
66
+ dirs = map(ResourceContainer, self.reader.children())
67
+ return itertools.chain(files, dirs)
68
+
69
+ def open(self, *args, **kwargs):
70
+ raise IsADirectoryError()
71
+
72
+
73
+ class ResourceHandle(Traversable):
74
+ """
75
+ Handle to a named resource in a ResourceReader.
76
+ """
77
+
78
+ def __init__(self, parent: ResourceContainer, name: str):
79
+ self.parent = parent
80
+ self.name = name # type: ignore[misc]
81
+
82
+ def is_file(self):
83
+ return True
84
+
85
+ def is_dir(self):
86
+ return False
87
+
88
+ def open(self, mode='r', *args, **kwargs):
89
+ stream = self.parent.reader.open_binary(self.name)
90
+ if 'b' not in mode:
91
+ stream = io.TextIOWrapper(stream, *args, **kwargs)
92
+ return stream
93
+
94
+ def joinpath(self, name):
95
+ raise RuntimeError("Cannot traverse into a resource")
96
+
97
+
98
+ class TraversableReader(TraversableResources, SimpleReader):
99
+ """
100
+ A TraversableResources based on SimpleReader. Resource providers
101
+ may derive from this class to provide the TraversableResources
102
+ interface by supplying the SimpleReader interface.
103
+ """
104
+
105
+ def files(self):
106
+ return ResourceContainer(self)
lib/python3.10/site-packages/importlib_resources/tests/test_contents.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import unittest
2
+
3
+ import importlib_resources as resources
4
+
5
+ from . import util
6
+
7
+
8
+ class ContentsTests:
9
+ expected = {
10
+ '__init__.py',
11
+ 'binary.file',
12
+ 'subdirectory',
13
+ 'utf-16.file',
14
+ 'utf-8.file',
15
+ }
16
+
17
+ def test_contents(self):
18
+ contents = {path.name for path in resources.files(self.data).iterdir()}
19
+ assert self.expected <= contents
20
+
21
+
22
+ class ContentsDiskTests(ContentsTests, util.DiskSetup, unittest.TestCase):
23
+ pass
24
+
25
+
26
+ class ContentsZipTests(ContentsTests, util.ZipSetup, unittest.TestCase):
27
+ pass
28
+
29
+
30
+ class ContentsNamespaceTests(ContentsTests, util.DiskSetup, unittest.TestCase):
31
+ MODULE = 'namespacedata01'
32
+
33
+ expected = {
34
+ # no __init__ because of namespace design
35
+ 'binary.file',
36
+ 'subdirectory',
37
+ 'utf-16.file',
38
+ 'utf-8.file',
39
+ }