ZTWHHH commited on
Commit
2a235e9
·
verified ·
1 Parent(s): 3758c17

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. evalkit_tf437/lib/python3.10/site-packages/aiosignal/__init__.pyi +12 -0
  3. evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/arrow_reader.cpython-310.pyc +0 -0
  4. evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/arrow_writer.cpython-310.pyc +0 -0
  5. evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/combine.cpython-310.pyc +0 -0
  6. evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/data_files.cpython-310.pyc +0 -0
  7. evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/dataset_dict.cpython-310.pyc +0 -0
  8. evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/exceptions.cpython-310.pyc +0 -0
  9. evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/info.cpython-310.pyc +0 -0
  10. evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/inspect.cpython-310.pyc +0 -0
  11. evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/naming.cpython-310.pyc +0 -0
  12. evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/splits.cpython-310.pyc +0 -0
  13. evalkit_tf437/lib/python3.10/site-packages/datasets/combine.py +215 -0
  14. evalkit_tf437/lib/python3.10/site-packages/datasets/commands/__pycache__/__init__.cpython-310.pyc +0 -0
  15. evalkit_tf437/lib/python3.10/site-packages/datasets/commands/__pycache__/convert.cpython-310.pyc +0 -0
  16. evalkit_tf437/lib/python3.10/site-packages/datasets/commands/__pycache__/datasets_cli.cpython-310.pyc +0 -0
  17. evalkit_tf437/lib/python3.10/site-packages/datasets/commands/__pycache__/dummy_data.cpython-310.pyc +0 -0
  18. evalkit_tf437/lib/python3.10/site-packages/datasets/download/__pycache__/__init__.cpython-310.pyc +0 -0
  19. evalkit_tf437/lib/python3.10/site-packages/datasets/download/__pycache__/mock_download_manager.cpython-310.pyc +0 -0
  20. evalkit_tf437/lib/python3.10/site-packages/datasets/download/__pycache__/streaming_download_manager.cpython-310.pyc +0 -0
  21. evalkit_tf437/lib/python3.10/site-packages/datasets/features/audio.py +277 -0
  22. evalkit_tf437/lib/python3.10/site-packages/datasets/features/features.py +2167 -0
  23. evalkit_tf437/lib/python3.10/site-packages/datasets/features/image.py +376 -0
  24. evalkit_tf437/lib/python3.10/site-packages/datasets/filesystems/__pycache__/compression.cpython-310.pyc +0 -0
  25. evalkit_tf437/lib/python3.10/site-packages/datasets/formatting/__pycache__/torch_formatter.cpython-310.pyc +0 -0
  26. evalkit_tf437/lib/python3.10/site-packages/datasets/formatting/np_formatter.py +106 -0
  27. evalkit_tf437/lib/python3.10/site-packages/datasets/formatting/tf_formatter.py +115 -0
  28. evalkit_tf437/lib/python3.10/site-packages/datasets/formatting/torch_formatter.py +111 -0
  29. evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/__init__.cpython-310.pyc +0 -0
  30. evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/abc.cpython-310.pyc +0 -0
  31. evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/spark.cpython-310.pyc +0 -0
  32. evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/audiofolder.cpython-310.pyc +0 -0
  33. evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/cache/__init__.py +0 -0
  34. evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/folder_based_builder.py +406 -0
  35. evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/generator/__pycache__/generator.cpython-310.pyc +0 -0
  36. evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/json/__pycache__/__init__.cpython-310.pyc +0 -0
  37. evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/json/__pycache__/json.cpython-310.pyc +0 -0
  38. evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__init__.py +0 -0
  39. evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__pycache__/__init__.cpython-310.pyc +0 -0
  40. evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/parquet/__init__.py +0 -0
  41. evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/parquet/__pycache__/__init__.cpython-310.pyc +0 -0
  42. evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/parquet/parquet.py +99 -0
  43. evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/spark/__pycache__/spark.cpython-310.pyc +0 -0
  44. evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/sql.cpython-310.pyc +0 -0
  45. evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/sql/sql.py +118 -0
  46. evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/webdataset.cpython-310.pyc +0 -0
  47. evalkit_tf437/lib/python3.10/site-packages/datasets/parallel/__init__.py +1 -0
  48. evalkit_tf437/lib/python3.10/site-packages/datasets/parallel/__pycache__/parallel.cpython-310.pyc +0 -0
  49. evalkit_tf437/lib/python3.10/site-packages/datasets/parallel/parallel.py +113 -0
  50. evalkit_tf437/lib/python3.10/site-packages/datasets/tasks/__init__.py +46 -0
.gitattributes CHANGED
@@ -332,3 +332,4 @@ evalkit_tf437/lib/python3.10/site-packages/regex/_regex.cpython-310-x86_64-linux
332
  evalkit_tf437/lib/python3.10/site-packages/scikit_learn.libs/libgomp-a34b3233.so.1.0.0 filter=lfs diff=lfs merge=lfs -text
333
  evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/otData.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
334
  evalkit_tf437/lib/python3.10/site-packages/pandas/io/__pycache__/stata.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
332
  evalkit_tf437/lib/python3.10/site-packages/scikit_learn.libs/libgomp-a34b3233.so.1.0.0 filter=lfs diff=lfs merge=lfs -text
333
  evalkit_tf437/lib/python3.10/site-packages/fontTools/ttLib/tables/__pycache__/otData.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
334
  evalkit_tf437/lib/python3.10/site-packages/pandas/io/__pycache__/stata.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
335
+ evalkit_tf437/lib/python3.10/site-packages/pandas/io/formats/__pycache__/style.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
evalkit_tf437/lib/python3.10/site-packages/aiosignal/__init__.pyi ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Generic, TypeVar
2
+
3
+ from frozenlist import FrozenList
4
+
5
+ __all__ = ("Signal",)
6
+
7
+ _T = TypeVar("_T")
8
+
9
+ class Signal(FrozenList[_T], Generic[_T]):
10
+ def __init__(self, owner: Any) -> None: ...
11
+ def __repr__(self) -> str: ...
12
+ async def send(self, *args: Any, **kwargs: Any) -> None: ...
evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/arrow_reader.cpython-310.pyc ADDED
Binary file (23.3 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/arrow_writer.cpython-310.pyc ADDED
Binary file (24.4 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/combine.cpython-310.pyc ADDED
Binary file (9.11 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/data_files.cpython-310.pyc ADDED
Binary file (27.8 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/dataset_dict.cpython-310.pyc ADDED
Binary file (98.9 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/exceptions.cpython-310.pyc ADDED
Binary file (3.54 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/info.cpython-310.pyc ADDED
Binary file (22.8 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/inspect.cpython-310.pyc ADDED
Binary file (23.4 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/naming.cpython-310.pyc ADDED
Binary file (2.85 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/datasets/__pycache__/splits.cpython-310.pyc ADDED
Binary file (23 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/datasets/combine.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, TypeVar
2
+
3
+ from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
4
+ from .dataset_dict import DatasetDict, IterableDatasetDict
5
+ from .info import DatasetInfo
6
+ from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
7
+ from .splits import NamedSplit
8
+ from .utils import logging
9
+ from .utils.py_utils import Literal
10
+
11
+
12
+ logger = logging.get_logger(__name__)
13
+
14
+
15
+ DatasetType = TypeVar("DatasetType", Dataset, IterableDataset)
16
+
17
+
18
+ def interleave_datasets(
19
+ datasets: List[DatasetType],
20
+ probabilities: Optional[List[float]] = None,
21
+ seed: Optional[int] = None,
22
+ info: Optional[DatasetInfo] = None,
23
+ split: Optional[NamedSplit] = None,
24
+ stopping_strategy: Literal["first_exhausted", "all_exhausted"] = "first_exhausted",
25
+ ) -> DatasetType:
26
+ """
27
+ Interleave several datasets (sources) into a single dataset.
28
+ The new dataset is constructed by alternating between the sources to get the examples.
29
+
30
+ You can use this function on a list of [`Dataset`] objects, or on a list of [`IterableDataset`] objects.
31
+
32
+ - If `probabilities` is `None` (default) the new dataset is constructed by cycling between each source to get the examples.
33
+ - If `probabilities` is not `None`, the new dataset is constructed by getting examples from a random source at a time according to the provided probabilities.
34
+
35
+ The resulting dataset ends when one of the source datasets runs out of examples except when `oversampling` is `True`,
36
+ in which case, the resulting dataset ends when all datasets have ran out of examples at least one time.
37
+
38
+ Note for iterable datasets:
39
+
40
+ In a distributed setup or in PyTorch DataLoader workers, the stopping strategy is applied per process.
41
+ Therefore the "first_exhausted" strategy on an sharded iterable dataset can generate less samples in total (up to 1 missing sample per subdataset per worker).
42
+
43
+ Args:
44
+ datasets (`List[Dataset]` or `List[IterableDataset]`):
45
+ List of datasets to interleave.
46
+ probabilities (`List[float]`, *optional*, defaults to `None`):
47
+ If specified, the new dataset is constructed by sampling
48
+ examples from one source at a time according to these probabilities.
49
+ seed (`int`, *optional*, defaults to `None`):
50
+ The random seed used to choose a source for each example.
51
+ info ([`DatasetInfo`], *optional*):
52
+ Dataset information, like description, citation, etc.
53
+ <Added version="2.4.0"/>
54
+ split ([`NamedSplit`], *optional*):
55
+ Name of the dataset split.
56
+ <Added version="2.4.0"/>
57
+ stopping_strategy (`str`, defaults to `first_exhausted`):
58
+ Two strategies are proposed right now, `first_exhausted` and `all_exhausted`.
59
+ By default, `first_exhausted` is an undersampling strategy, i.e the dataset construction is stopped as soon as one dataset has ran out of samples.
60
+ If the strategy is `all_exhausted`, we use an oversampling strategy, i.e the dataset construction is stopped as soon as every samples of every dataset has been added at least once.
61
+ Note that if the strategy is `all_exhausted`, the interleaved dataset size can get enormous:
62
+ - with no probabilities, the resulting dataset will have `max_length_datasets*nb_dataset` samples.
63
+ - with given probabilities, the resulting dataset will have more samples if some datasets have really low probability of visiting.
64
+ Returns:
65
+ [`Dataset`] or [`IterableDataset`]: Return type depends on the input `datasets`
66
+ parameter. `Dataset` if the input is a list of `Dataset`, `IterableDataset` if the input is a list of
67
+ `IterableDataset`.
68
+
69
+ Example:
70
+
71
+ For regular datasets (map-style):
72
+
73
+ ```python
74
+ >>> from datasets import Dataset, interleave_datasets
75
+ >>> d1 = Dataset.from_dict({"a": [0, 1, 2]})
76
+ >>> d2 = Dataset.from_dict({"a": [10, 11, 12]})
77
+ >>> d3 = Dataset.from_dict({"a": [20, 21, 22]})
78
+ >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted")
79
+ >>> dataset["a"]
80
+ [10, 0, 11, 1, 2, 20, 12, 10, 0, 1, 2, 21, 0, 11, 1, 2, 0, 1, 12, 2, 10, 0, 22]
81
+ >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42)
82
+ >>> dataset["a"]
83
+ [10, 0, 11, 1, 2]
84
+ >>> dataset = interleave_datasets([d1, d2, d3])
85
+ >>> dataset["a"]
86
+ [0, 10, 20, 1, 11, 21, 2, 12, 22]
87
+ >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted")
88
+ >>> dataset["a"]
89
+ [0, 10, 20, 1, 11, 21, 2, 12, 22]
90
+ >>> d1 = Dataset.from_dict({"a": [0, 1, 2]})
91
+ >>> d2 = Dataset.from_dict({"a": [10, 11, 12, 13]})
92
+ >>> d3 = Dataset.from_dict({"a": [20, 21, 22, 23, 24]})
93
+ >>> dataset = interleave_datasets([d1, d2, d3])
94
+ >>> dataset["a"]
95
+ [0, 10, 20, 1, 11, 21, 2, 12, 22]
96
+ >>> dataset = interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted")
97
+ >>> dataset["a"]
98
+ [0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 23, 1, 10, 24]
99
+ >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42)
100
+ >>> dataset["a"]
101
+ [10, 0, 11, 1, 2]
102
+ >>> dataset = interleave_datasets([d1, d2, d3], probabilities=[0.7, 0.2, 0.1], seed=42, stopping_strategy="all_exhausted")
103
+ >>> dataset["a"]
104
+ [10, 0, 11, 1, 2, 20, 12, 13, ..., 0, 1, 2, 0, 24]
105
+ For datasets in streaming mode (iterable):
106
+
107
+ >>> from datasets import load_dataset, interleave_datasets
108
+ >>> d1 = load_dataset("oscar", "unshuffled_deduplicated_en", split="train", streaming=True)
109
+ >>> d2 = load_dataset("oscar", "unshuffled_deduplicated_fr", split="train", streaming=True)
110
+ >>> dataset = interleave_datasets([d1, d2])
111
+ >>> iterator = iter(dataset)
112
+ >>> next(iterator)
113
+ {'text': 'Mtendere Village was inspired by the vision...}
114
+ >>> next(iterator)
115
+ {'text': "Média de débat d'idées, de culture...}
116
+ ```
117
+ """
118
+ from .arrow_dataset import Dataset
119
+ from .iterable_dataset import IterableDataset
120
+
121
+ if not datasets:
122
+ raise ValueError("Unable to interleave an empty list of datasets.")
123
+ for i, dataset in enumerate(datasets):
124
+ if not isinstance(dataset, (Dataset, IterableDataset)):
125
+ if isinstance(dataset, (DatasetDict, IterableDatasetDict)):
126
+ if not dataset:
127
+ raise ValueError(
128
+ f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
129
+ "is an empty dataset dictionary."
130
+ )
131
+ raise ValueError(
132
+ f"Dataset at position {i} has at least one split: {list(dataset)}\n"
133
+ f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(dataset))}']"
134
+ )
135
+ raise ValueError(
136
+ f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(dataset).__name__}."
137
+ )
138
+ if i == 0:
139
+ dataset_type, other_type = (
140
+ (Dataset, IterableDataset) if isinstance(dataset, Dataset) else (IterableDataset, Dataset)
141
+ )
142
+ elif not isinstance(dataset, dataset_type):
143
+ raise ValueError(
144
+ f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects."
145
+ )
146
+ if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
147
+ raise ValueError(f"{stopping_strategy} is not supported. Please enter a valid stopping_strategy.")
148
+ if dataset_type is Dataset:
149
+ return _interleave_map_style_datasets(
150
+ datasets, probabilities, seed, info=info, split=split, stopping_strategy=stopping_strategy
151
+ )
152
+ else:
153
+ return _interleave_iterable_datasets(
154
+ datasets, probabilities, seed, info=info, split=split, stopping_strategy=stopping_strategy
155
+ )
156
+
157
+
158
+ def concatenate_datasets(
159
+ dsets: List[DatasetType],
160
+ info: Optional[DatasetInfo] = None,
161
+ split: Optional[NamedSplit] = None,
162
+ axis: int = 0,
163
+ ) -> DatasetType:
164
+ """
165
+ Converts a list of [`Dataset`] with the same schema into a single [`Dataset`].
166
+
167
+ Args:
168
+ dsets (`List[datasets.Dataset]`):
169
+ List of Datasets to concatenate.
170
+ info (`DatasetInfo`, *optional*):
171
+ Dataset information, like description, citation, etc.
172
+ split (`NamedSplit`, *optional*):
173
+ Name of the dataset split.
174
+ axis (`{0, 1}`, defaults to `0`):
175
+ Axis to concatenate over, where `0` means over rows (vertically) and `1` means over columns
176
+ (horizontally).
177
+
178
+ <Added version="1.6.0"/>
179
+
180
+ Example:
181
+
182
+ ```py
183
+ >>> ds3 = concatenate_datasets([ds1, ds2])
184
+ ```
185
+ """
186
+
187
+ if not dsets:
188
+ raise ValueError("Unable to concatenate an empty list of datasets.")
189
+ for i, dataset in enumerate(dsets):
190
+ if not isinstance(dataset, (Dataset, IterableDataset)):
191
+ if isinstance(dataset, (DatasetDict, IterableDatasetDict)):
192
+ if not dataset:
193
+ raise ValueError(
194
+ f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
195
+ "is an empty dataset dictionary."
196
+ )
197
+ raise ValueError(
198
+ f"Dataset at position {i} has at least one split: {list(dataset)}\n"
199
+ f"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(dataset))}']"
200
+ )
201
+ raise ValueError(
202
+ f"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(dataset).__name__}."
203
+ )
204
+ if i == 0:
205
+ dataset_type, other_type = (
206
+ (Dataset, IterableDataset) if isinstance(dataset, Dataset) else (IterableDataset, Dataset)
207
+ )
208
+ elif not isinstance(dataset, dataset_type):
209
+ raise ValueError(
210
+ f"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects."
211
+ )
212
+ if dataset_type is Dataset:
213
+ return _concatenate_map_style_datasets(dsets, info=info, split=split, axis=axis)
214
+ else:
215
+ return _concatenate_iterable_datasets(dsets, info=info, split=split, axis=axis)
evalkit_tf437/lib/python3.10/site-packages/datasets/commands/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (803 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/datasets/commands/__pycache__/convert.cpython-310.pyc ADDED
Binary file (6.06 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/datasets/commands/__pycache__/datasets_cli.cpython-310.pyc ADDED
Binary file (1.6 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/datasets/commands/__pycache__/dummy_data.cpython-310.pyc ADDED
Binary file (16.5 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/datasets/download/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (425 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/datasets/download/__pycache__/mock_download_manager.cpython-310.pyc ADDED
Binary file (8.02 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/datasets/download/__pycache__/streaming_download_manager.cpython-310.pyc ADDED
Binary file (37.7 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/datasets/features/audio.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dataclasses import dataclass, field
3
+ from io import BytesIO
4
+ from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
5
+
6
+ import numpy as np
7
+ import pyarrow as pa
8
+
9
+ from .. import config
10
+ from ..download.download_config import DownloadConfig
11
+ from ..download.streaming_download_manager import xopen, xsplitext
12
+ from ..table import array_cast
13
+ from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
14
+
15
+
16
+ if TYPE_CHECKING:
17
+ from .features import FeatureType
18
+
19
+
20
+ @dataclass
21
+ class Audio:
22
+ """Audio [`Feature`] to extract audio data from an audio file.
23
+
24
+ Input: The Audio feature accepts as input:
25
+ - A `str`: Absolute path to the audio file (i.e. random access is allowed).
26
+ - A `dict` with the keys:
27
+
28
+ - `path`: String with relative path of the audio file to the archive file.
29
+ - `bytes`: Bytes content of the audio file.
30
+
31
+ This is useful for archived files with sequential access.
32
+
33
+ - A `dict` with the keys:
34
+
35
+ - `path`: String with relative path of the audio file to the archive file.
36
+ - `array`: Array containing the audio sample
37
+ - `sampling_rate`: Integer corresponding to the sampling rate of the audio sample.
38
+
39
+ This is useful for archived files with sequential access.
40
+
41
+ Args:
42
+ sampling_rate (`int`, *optional*):
43
+ Target sampling rate. If `None`, the native sampling rate is used.
44
+ mono (`bool`, defaults to `True`):
45
+ Whether to convert the audio signal to mono by averaging samples across
46
+ channels.
47
+ decode (`bool`, defaults to `True`):
48
+ Whether to decode the audio data. If `False`,
49
+ returns the underlying dictionary in the format `{"path": audio_path, "bytes": audio_bytes}`.
50
+
51
+ Example:
52
+
53
+ ```py
54
+ >>> from datasets import load_dataset, Audio
55
+ >>> ds = load_dataset("PolyAI/minds14", name="en-US", split="train")
56
+ >>> ds = ds.cast_column("audio", Audio(sampling_rate=16000))
57
+ >>> ds[0]["audio"]
58
+ {'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ...,
59
+ 3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32),
60
+ 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav',
61
+ 'sampling_rate': 16000}
62
+ ```
63
+ """
64
+
65
+ sampling_rate: Optional[int] = None
66
+ mono: bool = True
67
+ decode: bool = True
68
+ id: Optional[str] = None
69
+ # Automatically constructed
70
+ dtype: ClassVar[str] = "dict"
71
+ pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()})
72
+ _type: str = field(default="Audio", init=False, repr=False)
73
+
74
+ def __call__(self):
75
+ return self.pa_type
76
+
77
+ def encode_example(self, value: Union[str, bytes, dict]) -> dict:
78
+ """Encode example into a format for Arrow.
79
+
80
+ Args:
81
+ value (`str` or `dict`):
82
+ Data passed as input to Audio feature.
83
+
84
+ Returns:
85
+ `dict`
86
+ """
87
+ try:
88
+ import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
89
+ except ImportError as err:
90
+ raise ImportError("To support encoding audio data, please install 'soundfile'.") from err
91
+ if isinstance(value, str):
92
+ return {"bytes": None, "path": value}
93
+ elif isinstance(value, bytes):
94
+ return {"bytes": value, "path": None}
95
+ elif "array" in value:
96
+ # convert the audio array to wav bytes
97
+ buffer = BytesIO()
98
+ sf.write(buffer, value["array"], value["sampling_rate"], format="wav")
99
+ return {"bytes": buffer.getvalue(), "path": None}
100
+ elif value.get("path") is not None and os.path.isfile(value["path"]):
101
+ # we set "bytes": None to not duplicate the data if they're already available locally
102
+ if value["path"].endswith("pcm"):
103
+ # "PCM" only has raw audio bytes
104
+ if value.get("sampling_rate") is None:
105
+ # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
106
+ raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object")
107
+ if value.get("bytes"):
108
+ # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
109
+ bytes_value = np.frombuffer(value["bytes"], dtype=np.int16).astype(np.float32) / 32767
110
+ else:
111
+ bytes_value = np.memmap(value["path"], dtype="h", mode="r").astype(np.float32) / 32767
112
+
113
+ buffer = BytesIO(bytes())
114
+ sf.write(buffer, bytes_value, value["sampling_rate"], format="wav")
115
+ return {"bytes": buffer.getvalue(), "path": None}
116
+ else:
117
+ return {"bytes": None, "path": value.get("path")}
118
+ elif value.get("bytes") is not None or value.get("path") is not None:
119
+ # store the audio bytes, and path is used to infer the audio format using the file extension
120
+ return {"bytes": value.get("bytes"), "path": value.get("path")}
121
+ else:
122
+ raise ValueError(
123
+ f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}."
124
+ )
125
+
126
+ def decode_example(
127
+ self, value: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None
128
+ ) -> dict:
129
+ """Decode example audio file into audio data.
130
+
131
+ Args:
132
+ value (`dict`):
133
+ A dictionary with keys:
134
+
135
+ - `path`: String with relative audio file path.
136
+ - `bytes`: Bytes of the audio file.
137
+ token_per_repo_id (`dict`, *optional*):
138
+ To access and decode
139
+ audio files from private repositories on the Hub, you can pass
140
+ a dictionary repo_id (`str`) -> token (`bool` or `str`)
141
+
142
+ Returns:
143
+ `dict`
144
+ """
145
+ if not self.decode:
146
+ raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead.")
147
+
148
+ path, file = (value["path"], BytesIO(value["bytes"])) if value["bytes"] is not None else (value["path"], None)
149
+ if path is None and file is None:
150
+ raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.")
151
+
152
+ try:
153
+ import librosa
154
+ import soundfile as sf
155
+ except ImportError as err:
156
+ raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'.") from err
157
+
158
+ audio_format = xsplitext(path)[1][1:].lower() if path is not None else None
159
+ if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
160
+ raise RuntimeError(
161
+ "Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
162
+ 'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. '
163
+ )
164
+ elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
165
+ raise RuntimeError(
166
+ "Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
167
+ 'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. '
168
+ )
169
+
170
+ if file is None:
171
+ token_per_repo_id = token_per_repo_id or {}
172
+ source_url = path.split("::")[-1]
173
+ pattern = (
174
+ config.HUB_DATASETS_URL if source_url.startswith(config.HF_ENDPOINT) else config.HUB_DATASETS_HFFS_URL
175
+ )
176
+ try:
177
+ repo_id = string_to_dict(source_url, pattern)["repo_id"]
178
+ token = token_per_repo_id[repo_id]
179
+ except (ValueError, KeyError):
180
+ token = None
181
+
182
+ download_config = DownloadConfig(token=token)
183
+ with xopen(path, "rb", download_config=download_config) as f:
184
+ array, sampling_rate = sf.read(f)
185
+
186
+ else:
187
+ array, sampling_rate = sf.read(file)
188
+
189
+ array = array.T
190
+ if self.mono:
191
+ array = librosa.to_mono(array)
192
+ if self.sampling_rate and self.sampling_rate != sampling_rate:
193
+ array = librosa.resample(array, orig_sr=sampling_rate, target_sr=self.sampling_rate)
194
+ sampling_rate = self.sampling_rate
195
+
196
+ return {"path": path, "array": array, "sampling_rate": sampling_rate}
197
+
198
+ def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
199
+ """If in the decodable state, raise an error, otherwise flatten the feature into a dictionary."""
200
+ from .features import Value
201
+
202
+ if self.decode:
203
+ raise ValueError("Cannot flatten a decoded Audio feature.")
204
+ return {
205
+ "bytes": Value("binary"),
206
+ "path": Value("string"),
207
+ }
208
+
209
+ def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray]) -> pa.StructArray:
210
+ """Cast an Arrow array to the Audio arrow storage type.
211
+ The Arrow types that can be converted to the Audio pyarrow storage type are:
212
+
213
+ - `pa.string()` - it must contain the "path" data
214
+ - `pa.binary()` - it must contain the audio bytes
215
+ - `pa.struct({"bytes": pa.binary()})`
216
+ - `pa.struct({"path": pa.string()})`
217
+ - `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter
218
+
219
+ Args:
220
+ storage (`Union[pa.StringArray, pa.StructArray]`):
221
+ PyArrow array to cast.
222
+
223
+ Returns:
224
+ `pa.StructArray`: Array in the Audio arrow storage type, that is
225
+ `pa.struct({"bytes": pa.binary(), "path": pa.string()})`
226
+ """
227
+ if pa.types.is_string(storage.type):
228
+ bytes_array = pa.array([None] * len(storage), type=pa.binary())
229
+ storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null())
230
+ elif pa.types.is_binary(storage.type):
231
+ path_array = pa.array([None] * len(storage), type=pa.string())
232
+ storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null())
233
+ elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices("array"):
234
+ storage = pa.array([Audio().encode_example(x) if x is not None else None for x in storage.to_pylist()])
235
+ elif pa.types.is_struct(storage.type):
236
+ if storage.type.get_field_index("bytes") >= 0:
237
+ bytes_array = storage.field("bytes")
238
+ else:
239
+ bytes_array = pa.array([None] * len(storage), type=pa.binary())
240
+ if storage.type.get_field_index("path") >= 0:
241
+ path_array = storage.field("path")
242
+ else:
243
+ path_array = pa.array([None] * len(storage), type=pa.string())
244
+ storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null())
245
+ return array_cast(storage, self.pa_type)
246
+
247
+ def embed_storage(self, storage: pa.StructArray) -> pa.StructArray:
248
+ """Embed audio files into the Arrow array.
249
+
250
+ Args:
251
+ storage (`pa.StructArray`):
252
+ PyArrow array to embed.
253
+
254
+ Returns:
255
+ `pa.StructArray`: Array in the Audio arrow storage type, that is
256
+ `pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
257
+ """
258
+
259
+ @no_op_if_value_is_null
260
+ def path_to_bytes(path):
261
+ with xopen(path, "rb") as f:
262
+ bytes_ = f.read()
263
+ return bytes_
264
+
265
+ bytes_array = pa.array(
266
+ [
267
+ (path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
268
+ for x in storage.to_pylist()
269
+ ],
270
+ type=pa.binary(),
271
+ )
272
+ path_array = pa.array(
273
+ [os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()],
274
+ type=pa.string(),
275
+ )
276
+ storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null())
277
+ return array_cast(storage, self.pa_type)
evalkit_tf437/lib/python3.10/site-packages/datasets/features/features.py ADDED
@@ -0,0 +1,2167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+ """This class handle features definition in datasets and some utilities to display table type."""
17
+
18
+ import copy
19
+ import json
20
+ import re
21
+ import sys
22
+ from collections.abc import Iterable, Mapping
23
+ from collections.abc import Sequence as SequenceABC
24
+ from dataclasses import InitVar, dataclass, field, fields
25
+ from functools import reduce, wraps
26
+ from operator import mul
27
+ from typing import Any, Callable, ClassVar, Dict, List, Optional, Tuple, Union
28
+ from typing import Sequence as Sequence_
29
+
30
+ import numpy as np
31
+ import pandas as pd
32
+ import pyarrow as pa
33
+ import pyarrow.compute as pc
34
+ import pyarrow.types
35
+ import pyarrow_hotfix # noqa: F401 # to fix vulnerability on pyarrow<14.0.1
36
+ from pandas.api.extensions import ExtensionArray as PandasExtensionArray
37
+ from pandas.api.extensions import ExtensionDtype as PandasExtensionDtype
38
+
39
+ from .. import config
40
+ from ..naming import camelcase_to_snakecase, snakecase_to_camelcase
41
+ from ..table import array_cast
42
+ from ..utils import logging
43
+ from ..utils.py_utils import asdict, first_non_null_value, zip_dict
44
+ from .audio import Audio
45
+ from .image import Image, encode_pil_image
46
+ from .translation import Translation, TranslationVariableLanguages
47
+
48
+
49
+ logger = logging.get_logger(__name__)
50
+
51
+
52
+ def _arrow_to_datasets_dtype(arrow_type: pa.DataType) -> str:
53
+ """
54
+ _arrow_to_datasets_dtype takes a pyarrow.DataType and converts it to a datasets string dtype.
55
+ In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))`
56
+ """
57
+ if pyarrow.types.is_null(arrow_type):
58
+ return "null"
59
+ elif pyarrow.types.is_boolean(arrow_type):
60
+ return "bool"
61
+ elif pyarrow.types.is_int8(arrow_type):
62
+ return "int8"
63
+ elif pyarrow.types.is_int16(arrow_type):
64
+ return "int16"
65
+ elif pyarrow.types.is_int32(arrow_type):
66
+ return "int32"
67
+ elif pyarrow.types.is_int64(arrow_type):
68
+ return "int64"
69
+ elif pyarrow.types.is_uint8(arrow_type):
70
+ return "uint8"
71
+ elif pyarrow.types.is_uint16(arrow_type):
72
+ return "uint16"
73
+ elif pyarrow.types.is_uint32(arrow_type):
74
+ return "uint32"
75
+ elif pyarrow.types.is_uint64(arrow_type):
76
+ return "uint64"
77
+ elif pyarrow.types.is_float16(arrow_type):
78
+ return "float16" # pyarrow dtype is "halffloat"
79
+ elif pyarrow.types.is_float32(arrow_type):
80
+ return "float32" # pyarrow dtype is "float"
81
+ elif pyarrow.types.is_float64(arrow_type):
82
+ return "float64" # pyarrow dtype is "double"
83
+ elif pyarrow.types.is_time32(arrow_type):
84
+ return f"time32[{pa.type_for_alias(str(arrow_type)).unit}]"
85
+ elif pyarrow.types.is_time64(arrow_type):
86
+ return f"time64[{pa.type_for_alias(str(arrow_type)).unit}]"
87
+ elif pyarrow.types.is_timestamp(arrow_type):
88
+ if arrow_type.tz is None:
89
+ return f"timestamp[{arrow_type.unit}]"
90
+ elif arrow_type.tz:
91
+ return f"timestamp[{arrow_type.unit}, tz={arrow_type.tz}]"
92
+ else:
93
+ raise ValueError(f"Unexpected timestamp object {arrow_type}.")
94
+ elif pyarrow.types.is_date32(arrow_type):
95
+ return "date32" # pyarrow dtype is "date32[day]"
96
+ elif pyarrow.types.is_date64(arrow_type):
97
+ return "date64" # pyarrow dtype is "date64[ms]"
98
+ elif pyarrow.types.is_duration(arrow_type):
99
+ return f"duration[{arrow_type.unit}]"
100
+ elif pyarrow.types.is_decimal128(arrow_type):
101
+ return f"decimal128({arrow_type.precision}, {arrow_type.scale})"
102
+ elif pyarrow.types.is_decimal256(arrow_type):
103
+ return f"decimal256({arrow_type.precision}, {arrow_type.scale})"
104
+ elif pyarrow.types.is_binary(arrow_type):
105
+ return "binary"
106
+ elif pyarrow.types.is_large_binary(arrow_type):
107
+ return "large_binary"
108
+ elif pyarrow.types.is_string(arrow_type):
109
+ return "string"
110
+ elif pyarrow.types.is_large_string(arrow_type):
111
+ return "large_string"
112
+ else:
113
+ raise ValueError(f"Arrow type {arrow_type} does not have a datasets dtype equivalent.")
114
+
115
+
116
+ def string_to_arrow(datasets_dtype: str) -> pa.DataType:
117
+ """
118
+ string_to_arrow takes a datasets string dtype and converts it to a pyarrow.DataType.
119
+
120
+ In effect, `dt == string_to_arrow(_arrow_to_datasets_dtype(dt))`
121
+
122
+ This is necessary because the datasets.Value() primitive type is constructed using a string dtype
123
+
124
+ Value(dtype=str)
125
+
126
+ But Features.type (via `get_nested_type()` expects to resolve Features into a pyarrow Schema,
127
+ which means that each Value() must be able to resolve into a corresponding pyarrow.DataType, which is the
128
+ purpose of this function.
129
+ """
130
+
131
+ def _dtype_error_msg(dtype, pa_dtype, examples=None, urls=None):
132
+ msg = f"{dtype} is not a validly formatted string representation of the pyarrow {pa_dtype} type."
133
+ if examples:
134
+ examples = ", ".join(examples[:-1]) + " or " + examples[-1] if len(examples) > 1 else examples[0]
135
+ msg += f"\nValid examples include: {examples}."
136
+ if urls:
137
+ urls = ", ".join(urls[:-1]) + " and " + urls[-1] if len(urls) > 1 else urls[0]
138
+ msg += f"\nFor more insformation, see: {urls}."
139
+ return msg
140
+
141
+ if datasets_dtype in pa.__dict__:
142
+ return pa.__dict__[datasets_dtype]()
143
+
144
+ if (datasets_dtype + "_") in pa.__dict__:
145
+ return pa.__dict__[datasets_dtype + "_"]()
146
+
147
+ timestamp_matches = re.search(r"^timestamp\[(.*)\]$", datasets_dtype)
148
+ if timestamp_matches:
149
+ timestamp_internals = timestamp_matches.group(1)
150
+ internals_matches = re.search(r"^(s|ms|us|ns),\s*tz=([a-zA-Z0-9/_+\-:]*)$", timestamp_internals)
151
+ if timestamp_internals in ["s", "ms", "us", "ns"]:
152
+ return pa.timestamp(timestamp_internals)
153
+ elif internals_matches:
154
+ return pa.timestamp(internals_matches.group(1), internals_matches.group(2))
155
+ else:
156
+ raise ValueError(
157
+ _dtype_error_msg(
158
+ datasets_dtype,
159
+ "timestamp",
160
+ examples=["timestamp[us]", "timestamp[us, tz=America/New_York"],
161
+ urls=["https://arrow.apache.org/docs/python/generated/pyarrow.timestamp.html"],
162
+ )
163
+ )
164
+
165
+ duration_matches = re.search(r"^duration\[(.*)\]$", datasets_dtype)
166
+ if duration_matches:
167
+ duration_internals = duration_matches.group(1)
168
+ if duration_internals in ["s", "ms", "us", "ns"]:
169
+ return pa.duration(duration_internals)
170
+ else:
171
+ raise ValueError(
172
+ _dtype_error_msg(
173
+ datasets_dtype,
174
+ "duration",
175
+ examples=["duration[s]", "duration[us]"],
176
+ urls=["https://arrow.apache.org/docs/python/generated/pyarrow.duration.html"],
177
+ )
178
+ )
179
+
180
+ time_matches = re.search(r"^time(.*)\[(.*)\]$", datasets_dtype)
181
+ if time_matches:
182
+ time_internals_bits = time_matches.group(1)
183
+ if time_internals_bits == "32":
184
+ time_internals_unit = time_matches.group(2)
185
+ if time_internals_unit in ["s", "ms"]:
186
+ return pa.time32(time_internals_unit)
187
+ else:
188
+ raise ValueError(
189
+ f"{time_internals_unit} is not a valid unit for the pyarrow time32 type. Supported units: s (second) and ms (millisecond)."
190
+ )
191
+ elif time_internals_bits == "64":
192
+ time_internals_unit = time_matches.group(2)
193
+ if time_internals_unit in ["us", "ns"]:
194
+ return pa.time64(time_internals_unit)
195
+ else:
196
+ raise ValueError(
197
+ f"{time_internals_unit} is not a valid unit for the pyarrow time64 type. Supported units: us (microsecond) and ns (nanosecond)."
198
+ )
199
+ else:
200
+ raise ValueError(
201
+ _dtype_error_msg(
202
+ datasets_dtype,
203
+ "time",
204
+ examples=["time32[s]", "time64[us]"],
205
+ urls=[
206
+ "https://arrow.apache.org/docs/python/generated/pyarrow.time32.html",
207
+ "https://arrow.apache.org/docs/python/generated/pyarrow.time64.html",
208
+ ],
209
+ )
210
+ )
211
+
212
+ decimal_matches = re.search(r"^decimal(.*)\((.*)\)$", datasets_dtype)
213
+ if decimal_matches:
214
+ decimal_internals_bits = decimal_matches.group(1)
215
+ if decimal_internals_bits == "128":
216
+ decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2))
217
+ if decimal_internals_precision_and_scale:
218
+ precision = decimal_internals_precision_and_scale.group(1)
219
+ scale = decimal_internals_precision_and_scale.group(2)
220
+ return pa.decimal128(int(precision), int(scale))
221
+ else:
222
+ raise ValueError(
223
+ _dtype_error_msg(
224
+ datasets_dtype,
225
+ "decimal128",
226
+ examples=["decimal128(10, 2)", "decimal128(4, -2)"],
227
+ urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html"],
228
+ )
229
+ )
230
+ elif decimal_internals_bits == "256":
231
+ decimal_internals_precision_and_scale = re.search(r"^(\d+),\s*(-?\d+)$", decimal_matches.group(2))
232
+ if decimal_internals_precision_and_scale:
233
+ precision = decimal_internals_precision_and_scale.group(1)
234
+ scale = decimal_internals_precision_and_scale.group(2)
235
+ return pa.decimal256(int(precision), int(scale))
236
+ else:
237
+ raise ValueError(
238
+ _dtype_error_msg(
239
+ datasets_dtype,
240
+ "decimal256",
241
+ examples=["decimal256(30, 2)", "decimal256(38, -4)"],
242
+ urls=["https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html"],
243
+ )
244
+ )
245
+ else:
246
+ raise ValueError(
247
+ _dtype_error_msg(
248
+ datasets_dtype,
249
+ "decimal",
250
+ examples=["decimal128(12, 3)", "decimal256(40, 6)"],
251
+ urls=[
252
+ "https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html",
253
+ "https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html",
254
+ ],
255
+ )
256
+ )
257
+
258
+ raise ValueError(
259
+ f"Neither {datasets_dtype} nor {datasets_dtype + '_'} seems to be a pyarrow data type. "
260
+ f"Please make sure to use a correct data type, see: "
261
+ f"https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions"
262
+ )
263
+
264
+
265
+ def _cast_to_python_objects(obj: Any, only_1d_for_numpy: bool, optimize_list_casting: bool) -> Tuple[Any, bool]:
266
+ """
267
+ Cast pytorch/tensorflow/pandas objects to python numpy array/lists.
268
+ It works recursively.
269
+
270
+ If `optimize_list_casting` is True, to avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted.
271
+ If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same.
272
+ This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example.
273
+
274
+ Args:
275
+ obj: the object (nested struct) to cast.
276
+ only_1d_for_numpy (bool): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to
277
+ nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays.
278
+ Indeed Arrow only support converting 1-dimensional array values.
279
+ optimize_list_casting (bool): whether to optimize list casting by checking the first non-null element to see if it needs to be casted
280
+ and if it doesn't, not checking the rest of the list elements.
281
+
282
+ Returns:
283
+ casted_obj: the casted object
284
+ has_changed (bool): True if the object has been changed, False if it is identical
285
+ """
286
+
287
+ if config.TF_AVAILABLE and "tensorflow" in sys.modules:
288
+ import tensorflow as tf
289
+
290
+ if config.TORCH_AVAILABLE and "torch" in sys.modules:
291
+ import torch
292
+
293
+ if config.JAX_AVAILABLE and "jax" in sys.modules:
294
+ import jax.numpy as jnp
295
+
296
+ if config.PIL_AVAILABLE and "PIL" in sys.modules:
297
+ import PIL.Image
298
+
299
+ if isinstance(obj, np.ndarray):
300
+ if obj.ndim == 0:
301
+ return obj[()], True
302
+ elif not only_1d_for_numpy or obj.ndim == 1:
303
+ return obj, False
304
+ else:
305
+ return (
306
+ [
307
+ _cast_to_python_objects(
308
+ x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
309
+ )[0]
310
+ for x in obj
311
+ ],
312
+ True,
313
+ )
314
+ elif config.TORCH_AVAILABLE and "torch" in sys.modules and isinstance(obj, torch.Tensor):
315
+ if obj.ndim == 0:
316
+ return obj.detach().cpu().numpy()[()], True
317
+ elif not only_1d_for_numpy or obj.ndim == 1:
318
+ return obj.detach().cpu().numpy(), True
319
+ else:
320
+ return (
321
+ [
322
+ _cast_to_python_objects(
323
+ x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
324
+ )[0]
325
+ for x in obj.detach().cpu().numpy()
326
+ ],
327
+ True,
328
+ )
329
+ elif config.TF_AVAILABLE and "tensorflow" in sys.modules and isinstance(obj, tf.Tensor):
330
+ if obj.ndim == 0:
331
+ return obj.numpy()[()], True
332
+ elif not only_1d_for_numpy or obj.ndim == 1:
333
+ return obj.numpy(), True
334
+ else:
335
+ return (
336
+ [
337
+ _cast_to_python_objects(
338
+ x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
339
+ )[0]
340
+ for x in obj.numpy()
341
+ ],
342
+ True,
343
+ )
344
+ elif config.JAX_AVAILABLE and "jax" in sys.modules and isinstance(obj, jnp.ndarray):
345
+ if obj.ndim == 0:
346
+ return np.asarray(obj)[()], True
347
+ elif not only_1d_for_numpy or obj.ndim == 1:
348
+ return np.asarray(obj), True
349
+ else:
350
+ return (
351
+ [
352
+ _cast_to_python_objects(
353
+ x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
354
+ )[0]
355
+ for x in np.asarray(obj)
356
+ ],
357
+ True,
358
+ )
359
+ elif config.PIL_AVAILABLE and "PIL" in sys.modules and isinstance(obj, PIL.Image.Image):
360
+ return encode_pil_image(obj), True
361
+ elif isinstance(obj, pd.Series):
362
+ return (
363
+ _cast_to_python_objects(
364
+ obj.tolist(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
365
+ )[0],
366
+ True,
367
+ )
368
+ elif isinstance(obj, pd.DataFrame):
369
+ return (
370
+ {
371
+ key: _cast_to_python_objects(
372
+ value, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
373
+ )[0]
374
+ for key, value in obj.to_dict("series").items()
375
+ },
376
+ True,
377
+ )
378
+ elif isinstance(obj, pd.Timestamp):
379
+ return obj.to_pydatetime(), True
380
+ elif isinstance(obj, pd.Timedelta):
381
+ return obj.to_pytimedelta(), True
382
+ elif isinstance(obj, Mapping):
383
+ has_changed = not isinstance(obj, dict)
384
+ output = {}
385
+ for k, v in obj.items():
386
+ casted_v, has_changed_v = _cast_to_python_objects(
387
+ v, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
388
+ )
389
+ has_changed |= has_changed_v
390
+ output[k] = casted_v
391
+ return output if has_changed else obj, has_changed
392
+ elif hasattr(obj, "__array__"):
393
+ return (
394
+ _cast_to_python_objects(
395
+ obj.__array__(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
396
+ )[0],
397
+ True,
398
+ )
399
+ elif isinstance(obj, (list, tuple)):
400
+ if len(obj) > 0:
401
+ for first_elmt in obj:
402
+ if _check_non_null_non_empty_recursive(first_elmt):
403
+ break
404
+ casted_first_elmt, has_changed_first_elmt = _cast_to_python_objects(
405
+ first_elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
406
+ )
407
+ if has_changed_first_elmt or not optimize_list_casting:
408
+ return (
409
+ [
410
+ _cast_to_python_objects(
411
+ elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
412
+ )[0]
413
+ for elmt in obj
414
+ ],
415
+ True,
416
+ )
417
+ else:
418
+ if isinstance(obj, (list, tuple)):
419
+ return obj, False
420
+ else:
421
+ return list(obj), True
422
+ else:
423
+ return obj, False
424
+ else:
425
+ return obj, False
426
+
427
+
428
+ def cast_to_python_objects(obj: Any, only_1d_for_numpy=False, optimize_list_casting=True) -> Any:
429
+ """
430
+ Cast numpy/pytorch/tensorflow/pandas objects to python lists.
431
+ It works recursively.
432
+
433
+ If `optimize_list_casting` is True, To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be casted.
434
+ If the first element needs to be casted, then all the elements of the list will be casted, otherwise they'll stay the same.
435
+ This trick allows to cast objects that contain tokenizers outputs without iterating over every single token for example.
436
+
437
+ Args:
438
+ obj: the object (nested struct) to cast
439
+ only_1d_for_numpy (bool, default ``False``): whether to keep the full multi-dim tensors as multi-dim numpy arrays, or convert them to
440
+ nested lists of 1-dimensional numpy arrays. This can be useful to keep only 1-d arrays to instantiate Arrow arrays.
441
+ Indeed Arrow only support converting 1-dimensional array values.
442
+ optimize_list_casting (bool, default ``True``): whether to optimize list casting by checking the first non-null element to see if it needs to be casted
443
+ and if it doesn't, not checking the rest of the list elements.
444
+
445
+ Returns:
446
+ casted_obj: the casted object
447
+ """
448
+ return _cast_to_python_objects(
449
+ obj, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting
450
+ )[0]
451
+
452
+
453
+ @dataclass
454
+ class Value:
455
+ """
456
+ The `Value` dtypes are as follows:
457
+
458
+ - `null`
459
+ - `bool`
460
+ - `int8`
461
+ - `int16`
462
+ - `int32`
463
+ - `int64`
464
+ - `uint8`
465
+ - `uint16`
466
+ - `uint32`
467
+ - `uint64`
468
+ - `float16`
469
+ - `float32` (alias float)
470
+ - `float64` (alias double)
471
+ - `time32[(s|ms)]`
472
+ - `time64[(us|ns)]`
473
+ - `timestamp[(s|ms|us|ns)]`
474
+ - `timestamp[(s|ms|us|ns), tz=(tzstring)]`
475
+ - `date32`
476
+ - `date64`
477
+ - `duration[(s|ms|us|ns)]`
478
+ - `decimal128(precision, scale)`
479
+ - `decimal256(precision, scale)`
480
+ - `binary`
481
+ - `large_binary`
482
+ - `string`
483
+ - `large_string`
484
+
485
+ Example:
486
+
487
+ ```py
488
+ >>> from datasets import Features
489
+ >>> features = Features({'stars': Value(dtype='int32')})
490
+ >>> features
491
+ {'stars': Value(dtype='int32', id=None)}
492
+ ```
493
+ """
494
+
495
+ dtype: str
496
+ id: Optional[str] = None
497
+ # Automatically constructed
498
+ pa_type: ClassVar[Any] = None
499
+ _type: str = field(default="Value", init=False, repr=False)
500
+
501
+ def __post_init__(self):
502
+ if self.dtype == "double": # fix inferred type
503
+ self.dtype = "float64"
504
+ if self.dtype == "float": # fix inferred type
505
+ self.dtype = "float32"
506
+ self.pa_type = string_to_arrow(self.dtype)
507
+
508
+ def __call__(self):
509
+ return self.pa_type
510
+
511
+ def encode_example(self, value):
512
+ if pa.types.is_boolean(self.pa_type):
513
+ return bool(value)
514
+ elif pa.types.is_integer(self.pa_type):
515
+ return int(value)
516
+ elif pa.types.is_floating(self.pa_type):
517
+ return float(value)
518
+ elif pa.types.is_string(self.pa_type):
519
+ return str(value)
520
+ else:
521
+ return value
522
+
523
+
524
+ class _ArrayXD:
525
+ def __post_init__(self):
526
+ self.shape = tuple(self.shape)
527
+
528
+ def __call__(self):
529
+ pa_type = globals()[self.__class__.__name__ + "ExtensionType"](self.shape, self.dtype)
530
+ return pa_type
531
+
532
+ def encode_example(self, value):
533
+ return value
534
+
535
+
536
+ @dataclass
537
+ class Array2D(_ArrayXD):
538
+ """Create a two-dimensional array.
539
+
540
+ Args:
541
+ shape (`tuple`):
542
+ The size of each dimension.
543
+ dtype (`str`):
544
+ The value of the data type.
545
+
546
+ Example:
547
+
548
+ ```py
549
+ >>> from datasets import Features
550
+ >>> features = Features({'x': Array2D(shape=(1, 3), dtype='int32')})
551
+ ```
552
+ """
553
+
554
+ shape: tuple
555
+ dtype: str
556
+ id: Optional[str] = None
557
+ # Automatically constructed
558
+ _type: str = field(default="Array2D", init=False, repr=False)
559
+
560
+
561
+ @dataclass
562
+ class Array3D(_ArrayXD):
563
+ """Create a three-dimensional array.
564
+
565
+ Args:
566
+ shape (`tuple`):
567
+ The size of each dimension.
568
+ dtype (`str`):
569
+ The value of the data type.
570
+
571
+ Example:
572
+
573
+ ```py
574
+ >>> from datasets import Features
575
+ >>> features = Features({'x': Array3D(shape=(1, 2, 3), dtype='int32')})
576
+ ```
577
+ """
578
+
579
+ shape: tuple
580
+ dtype: str
581
+ id: Optional[str] = None
582
+ # Automatically constructed
583
+ _type: str = field(default="Array3D", init=False, repr=False)
584
+
585
+
586
+ @dataclass
587
+ class Array4D(_ArrayXD):
588
+ """Create a four-dimensional array.
589
+
590
+ Args:
591
+ shape (`tuple`):
592
+ The size of each dimension.
593
+ dtype (`str`):
594
+ The value of the data type.
595
+
596
+ Example:
597
+
598
+ ```py
599
+ >>> from datasets import Features
600
+ >>> features = Features({'x': Array4D(shape=(1, 2, 2, 3), dtype='int32')})
601
+ ```
602
+ """
603
+
604
+ shape: tuple
605
+ dtype: str
606
+ id: Optional[str] = None
607
+ # Automatically constructed
608
+ _type: str = field(default="Array4D", init=False, repr=False)
609
+
610
+
611
+ @dataclass
612
+ class Array5D(_ArrayXD):
613
+ """Create a five-dimensional array.
614
+
615
+ Args:
616
+ shape (`tuple`):
617
+ The size of each dimension.
618
+ dtype (`str`):
619
+ The value of the data type.
620
+
621
+ Example:
622
+
623
+ ```py
624
+ >>> from datasets import Features
625
+ >>> features = Features({'x': Array5D(shape=(1, 2, 2, 3, 3), dtype='int32')})
626
+ ```
627
+ """
628
+
629
+ shape: tuple
630
+ dtype: str
631
+ id: Optional[str] = None
632
+ # Automatically constructed
633
+ _type: str = field(default="Array5D", init=False, repr=False)
634
+
635
+
636
+ class _ArrayXDExtensionType(pa.ExtensionType):
637
+ ndims: Optional[int] = None
638
+
639
+ def __init__(self, shape: tuple, dtype: str):
640
+ if self.ndims is None or self.ndims <= 1:
641
+ raise ValueError("You must instantiate an array type with a value for dim that is > 1")
642
+ if len(shape) != self.ndims:
643
+ raise ValueError(f"shape={shape} and ndims={self.ndims} don't match")
644
+ for dim in range(1, self.ndims):
645
+ if shape[dim] is None:
646
+ raise ValueError(f"Support only dynamic size on first dimension. Got: {shape}")
647
+ self.shape = tuple(shape)
648
+ self.value_type = dtype
649
+ self.storage_dtype = self._generate_dtype(self.value_type)
650
+ pa.ExtensionType.__init__(self, self.storage_dtype, f"{self.__class__.__module__}.{self.__class__.__name__}")
651
+
652
+ def __arrow_ext_serialize__(self):
653
+ return json.dumps((self.shape, self.value_type)).encode()
654
+
655
+ @classmethod
656
+ def __arrow_ext_deserialize__(cls, storage_type, serialized):
657
+ args = json.loads(serialized)
658
+ return cls(*args)
659
+
660
+ # This was added to pa.ExtensionType in pyarrow >= 13.0.0
661
+ def __reduce__(self):
662
+ return self.__arrow_ext_deserialize__, (self.storage_type, self.__arrow_ext_serialize__())
663
+
664
+ def __hash__(self):
665
+ return hash((self.__class__, self.shape, self.value_type))
666
+
667
+ def __arrow_ext_class__(self):
668
+ return ArrayExtensionArray
669
+
670
+ def _generate_dtype(self, dtype):
671
+ dtype = string_to_arrow(dtype)
672
+ for d in reversed(self.shape):
673
+ dtype = pa.list_(dtype)
674
+ # Don't specify the size of the list, since fixed length list arrays have issues
675
+ # being validated after slicing in pyarrow 0.17.1
676
+ return dtype
677
+
678
+ def to_pandas_dtype(self):
679
+ return PandasArrayExtensionDtype(self.value_type)
680
+
681
+
682
+ class Array2DExtensionType(_ArrayXDExtensionType):
683
+ ndims = 2
684
+
685
+
686
+ class Array3DExtensionType(_ArrayXDExtensionType):
687
+ ndims = 3
688
+
689
+
690
+ class Array4DExtensionType(_ArrayXDExtensionType):
691
+ ndims = 4
692
+
693
+
694
+ class Array5DExtensionType(_ArrayXDExtensionType):
695
+ ndims = 5
696
+
697
+
698
+ # Register the extension types for deserialization
699
+ pa.register_extension_type(Array2DExtensionType((1, 2), "int64"))
700
+ pa.register_extension_type(Array3DExtensionType((1, 2, 3), "int64"))
701
+ pa.register_extension_type(Array4DExtensionType((1, 2, 3, 4), "int64"))
702
+ pa.register_extension_type(Array5DExtensionType((1, 2, 3, 4, 5), "int64"))
703
+
704
+
705
+ def _is_zero_copy_only(pa_type: pa.DataType, unnest: bool = False) -> bool:
706
+ """
707
+ When converting a pyarrow array to a numpy array, we must know whether this could be done in zero-copy or not.
708
+ This function returns the value of the ``zero_copy_only`` parameter to pass to ``.to_numpy()``, given the type of the pyarrow array.
709
+
710
+ # zero copy is available for all primitive types except booleans and temporal types (date, time, timestamp or duration)
711
+ # primitive types are types for which the physical representation in arrow and in numpy
712
+ # https://github.com/wesm/arrow/blob/c07b9b48cf3e0bbbab493992a492ae47e5b04cad/python/pyarrow/types.pxi#L821
713
+ # see https://arrow.apache.org/docs/python/generated/pyarrow.Array.html#pyarrow.Array.to_numpy
714
+ # and https://issues.apache.org/jira/browse/ARROW-2871?jql=text%20~%20%22boolean%20to_numpy%22
715
+ """
716
+
717
+ def _unnest_pa_type(pa_type: pa.DataType) -> pa.DataType:
718
+ if pa.types.is_list(pa_type):
719
+ return _unnest_pa_type(pa_type.value_type)
720
+ return pa_type
721
+
722
+ if unnest:
723
+ pa_type = _unnest_pa_type(pa_type)
724
+ return pa.types.is_primitive(pa_type) and not (pa.types.is_boolean(pa_type) or pa.types.is_temporal(pa_type))
725
+
726
+
727
+ class ArrayExtensionArray(pa.ExtensionArray):
728
+ def __array__(self):
729
+ zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True)
730
+ return self.to_numpy(zero_copy_only=zero_copy_only)
731
+
732
+ def __getitem__(self, i):
733
+ return self.storage[i]
734
+
735
+ def to_numpy(self, zero_copy_only=True):
736
+ storage: pa.ListArray = self.storage
737
+ null_mask = storage.is_null().to_numpy(zero_copy_only=False)
738
+
739
+ if self.type.shape[0] is not None:
740
+ size = 1
741
+ null_indices = np.arange(len(storage))[null_mask] - np.arange(np.sum(null_mask))
742
+
743
+ for i in range(self.type.ndims):
744
+ size *= self.type.shape[i]
745
+ storage = storage.flatten()
746
+ numpy_arr = storage.to_numpy(zero_copy_only=zero_copy_only)
747
+ numpy_arr = numpy_arr.reshape(len(self) - len(null_indices), *self.type.shape)
748
+
749
+ if len(null_indices):
750
+ numpy_arr = np.insert(numpy_arr.astype(np.float64), null_indices, np.nan, axis=0)
751
+
752
+ else:
753
+ shape = self.type.shape
754
+ ndims = self.type.ndims
755
+ arrays = []
756
+ first_dim_offsets = np.array([off.as_py() for off in storage.offsets])
757
+ for i, is_null in enumerate(null_mask):
758
+ if is_null:
759
+ arrays.append(np.nan)
760
+ else:
761
+ storage_el = storage[i : i + 1]
762
+ first_dim = first_dim_offsets[i + 1] - first_dim_offsets[i]
763
+ # flatten storage
764
+ for _ in range(ndims):
765
+ storage_el = storage_el.flatten()
766
+
767
+ numpy_arr = storage_el.to_numpy(zero_copy_only=zero_copy_only)
768
+ arrays.append(numpy_arr.reshape(first_dim, *shape[1:]))
769
+
770
+ if len(np.unique(np.diff(first_dim_offsets))) > 1:
771
+ # ragged
772
+ numpy_arr = np.empty(len(arrays), dtype=object)
773
+ numpy_arr[:] = arrays
774
+ else:
775
+ numpy_arr = np.array(arrays)
776
+
777
+ return numpy_arr
778
+
779
+ def to_pylist(self):
780
+ zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True)
781
+ numpy_arr = self.to_numpy(zero_copy_only=zero_copy_only)
782
+ if self.type.shape[0] is None and numpy_arr.dtype == object:
783
+ return [arr.tolist() for arr in numpy_arr.tolist()]
784
+ else:
785
+ return numpy_arr.tolist()
786
+
787
+
788
+ class PandasArrayExtensionDtype(PandasExtensionDtype):
789
+ _metadata = "value_type"
790
+
791
+ def __init__(self, value_type: Union["PandasArrayExtensionDtype", np.dtype]):
792
+ self._value_type = value_type
793
+
794
+ def __from_arrow__(self, array: Union[pa.Array, pa.ChunkedArray]):
795
+ if isinstance(array, pa.ChunkedArray):
796
+ array = array.type.wrap_array(pa.concat_arrays([chunk.storage for chunk in array.chunks]))
797
+ zero_copy_only = _is_zero_copy_only(array.storage.type, unnest=True)
798
+ numpy_arr = array.to_numpy(zero_copy_only=zero_copy_only)
799
+ return PandasArrayExtensionArray(numpy_arr)
800
+
801
+ @classmethod
802
+ def construct_array_type(cls):
803
+ return PandasArrayExtensionArray
804
+
805
+ @property
806
+ def type(self) -> type:
807
+ return np.ndarray
808
+
809
+ @property
810
+ def kind(self) -> str:
811
+ return "O"
812
+
813
+ @property
814
+ def name(self) -> str:
815
+ return f"array[{self.value_type}]"
816
+
817
+ @property
818
+ def value_type(self) -> np.dtype:
819
+ return self._value_type
820
+
821
+
822
+ class PandasArrayExtensionArray(PandasExtensionArray):
823
+ def __init__(self, data: np.ndarray, copy: bool = False):
824
+ self._data = data if not copy else np.array(data)
825
+ self._dtype = PandasArrayExtensionDtype(data.dtype)
826
+
827
+ def __array__(self, dtype=None):
828
+ """
829
+ Convert to NumPy Array.
830
+ Note that Pandas expects a 1D array when dtype is set to object.
831
+ But for other dtypes, the returned shape is the same as the one of ``data``.
832
+
833
+ More info about pandas 1D requirement for PandasExtensionArray here:
834
+ https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.api.extensions.ExtensionArray.html#pandas.api.extensions.ExtensionArray
835
+
836
+ """
837
+ if dtype == object:
838
+ out = np.empty(len(self._data), dtype=object)
839
+ for i in range(len(self._data)):
840
+ out[i] = self._data[i]
841
+ return out
842
+ if dtype is None:
843
+ return self._data
844
+ else:
845
+ return self._data.astype(dtype)
846
+
847
+ def copy(self, deep: bool = False) -> "PandasArrayExtensionArray":
848
+ return PandasArrayExtensionArray(self._data, copy=True)
849
+
850
+ @classmethod
851
+ def _from_sequence(
852
+ cls, scalars, dtype: Optional[PandasArrayExtensionDtype] = None, copy: bool = False
853
+ ) -> "PandasArrayExtensionArray":
854
+ if len(scalars) > 1 and all(
855
+ isinstance(x, np.ndarray) and x.shape == scalars[0].shape and x.dtype == scalars[0].dtype for x in scalars
856
+ ):
857
+ data = np.array(scalars, dtype=dtype if dtype is None else dtype.value_type, copy=copy)
858
+ else:
859
+ data = np.empty(len(scalars), dtype=object)
860
+ data[:] = scalars
861
+ return cls(data, copy=copy)
862
+
863
+ @classmethod
864
+ def _concat_same_type(cls, to_concat: Sequence_["PandasArrayExtensionArray"]) -> "PandasArrayExtensionArray":
865
+ if len(to_concat) > 1 and all(
866
+ va._data.shape == to_concat[0]._data.shape and va._data.dtype == to_concat[0]._data.dtype
867
+ for va in to_concat
868
+ ):
869
+ data = np.vstack([va._data for va in to_concat])
870
+ else:
871
+ data = np.empty(len(to_concat), dtype=object)
872
+ data[:] = [va._data for va in to_concat]
873
+ return cls(data, copy=False)
874
+
875
+ @property
876
+ def dtype(self) -> PandasArrayExtensionDtype:
877
+ return self._dtype
878
+
879
+ @property
880
+ def nbytes(self) -> int:
881
+ return self._data.nbytes
882
+
883
+ def isna(self) -> np.ndarray:
884
+ return np.array([pd.isna(arr).any() for arr in self._data])
885
+
886
+ def __setitem__(self, key: Union[int, slice, np.ndarray], value: Any) -> None:
887
+ raise NotImplementedError()
888
+
889
+ def __getitem__(self, item: Union[int, slice, np.ndarray]) -> Union[np.ndarray, "PandasArrayExtensionArray"]:
890
+ if isinstance(item, int):
891
+ return self._data[item]
892
+ return PandasArrayExtensionArray(self._data[item], copy=False)
893
+
894
+ def take(
895
+ self, indices: Sequence_[int], allow_fill: bool = False, fill_value: bool = None
896
+ ) -> "PandasArrayExtensionArray":
897
+ indices: np.ndarray = np.asarray(indices, dtype=int)
898
+ if allow_fill:
899
+ fill_value = (
900
+ self.dtype.na_value if fill_value is None else np.asarray(fill_value, dtype=self.dtype.value_type)
901
+ )
902
+ mask = indices == -1
903
+ if (indices < -1).any():
904
+ raise ValueError("Invalid value in `indices`, must be all >= -1 for `allow_fill` is True")
905
+ elif len(self) > 0:
906
+ pass
907
+ elif not np.all(mask):
908
+ raise IndexError("Invalid take for empty PandasArrayExtensionArray, must be all -1.")
909
+ else:
910
+ data = np.array([fill_value] * len(indices), dtype=self.dtype.value_type)
911
+ return PandasArrayExtensionArray(data, copy=False)
912
+ took = self._data.take(indices, axis=0)
913
+ if allow_fill and mask.any():
914
+ took[mask] = [fill_value] * np.sum(mask)
915
+ return PandasArrayExtensionArray(took, copy=False)
916
+
917
+ def __len__(self) -> int:
918
+ return len(self._data)
919
+
920
+ def __eq__(self, other) -> np.ndarray:
921
+ if not isinstance(other, PandasArrayExtensionArray):
922
+ raise NotImplementedError(f"Invalid type to compare to: {type(other)}")
923
+ return (self._data == other._data).all()
924
+
925
+
926
+ def pandas_types_mapper(dtype):
927
+ if isinstance(dtype, _ArrayXDExtensionType):
928
+ return PandasArrayExtensionDtype(dtype.value_type)
929
+
930
+
931
+ @dataclass
932
+ class ClassLabel:
933
+ """Feature type for integer class labels.
934
+
935
+ There are 3 ways to define a `ClassLabel`, which correspond to the 3 arguments:
936
+
937
+ * `num_classes`: Create 0 to (num_classes-1) labels.
938
+ * `names`: List of label strings.
939
+ * `names_file`: File containing the list of labels.
940
+
941
+ Under the hood the labels are stored as integers.
942
+ You can use negative integers to represent unknown/missing labels.
943
+
944
+ Args:
945
+ num_classes (`int`, *optional*):
946
+ Number of classes. All labels must be < `num_classes`.
947
+ names (`list` of `str`, *optional*):
948
+ String names for the integer classes.
949
+ The order in which the names are provided is kept.
950
+ names_file (`str`, *optional*):
951
+ Path to a file with names for the integer classes, one per line.
952
+
953
+ Example:
954
+
955
+ ```py
956
+ >>> from datasets import Features
957
+ >>> features = Features({'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'])})
958
+ >>> features
959
+ {'label': ClassLabel(num_classes=3, names=['bad', 'ok', 'good'], id=None)}
960
+ ```
961
+ """
962
+
963
+ num_classes: InitVar[Optional[int]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict
964
+ names: List[str] = None
965
+ names_file: InitVar[Optional[str]] = None # Pseudo-field: ignored by asdict/fields when converting to/from dict
966
+ id: Optional[str] = None
967
+ # Automatically constructed
968
+ dtype: ClassVar[str] = "int64"
969
+ pa_type: ClassVar[Any] = pa.int64()
970
+ _str2int: ClassVar[Dict[str, int]] = None
971
+ _int2str: ClassVar[Dict[int, int]] = None
972
+ _type: str = field(default="ClassLabel", init=False, repr=False)
973
+
974
+ def __post_init__(self, num_classes, names_file):
975
+ self.num_classes = num_classes
976
+ self.names_file = names_file
977
+ if self.names_file is not None and self.names is not None:
978
+ raise ValueError("Please provide either names or names_file but not both.")
979
+ # Set self.names
980
+ if self.names is None:
981
+ if self.names_file is not None:
982
+ self.names = self._load_names_from_file(self.names_file)
983
+ elif self.num_classes is not None:
984
+ self.names = [str(i) for i in range(self.num_classes)]
985
+ else:
986
+ raise ValueError("Please provide either num_classes, names or names_file.")
987
+ elif not isinstance(self.names, SequenceABC):
988
+ raise TypeError(f"Please provide names as a list, is {type(self.names)}")
989
+ # Set self.num_classes
990
+ if self.num_classes is None:
991
+ self.num_classes = len(self.names)
992
+ elif self.num_classes != len(self.names):
993
+ raise ValueError(
994
+ "ClassLabel number of names do not match the defined num_classes. "
995
+ f"Got {len(self.names)} names VS {self.num_classes} num_classes"
996
+ )
997
+ # Prepare mappings
998
+ self._int2str = [str(name) for name in self.names]
999
+ self._str2int = {name: i for i, name in enumerate(self._int2str)}
1000
+ if len(self._int2str) != len(self._str2int):
1001
+ raise ValueError("Some label names are duplicated. Each label name should be unique.")
1002
+
1003
+ def __call__(self):
1004
+ return self.pa_type
1005
+
1006
+ def str2int(self, values: Union[str, Iterable]) -> Union[int, Iterable]:
1007
+ """Conversion class name `string` => `integer`.
1008
+
1009
+ Example:
1010
+
1011
+ ```py
1012
+ >>> from datasets import load_dataset
1013
+ >>> ds = load_dataset("rotten_tomatoes", split="train")
1014
+ >>> ds.features["label"].str2int('neg')
1015
+ 0
1016
+ ```
1017
+ """
1018
+ if not isinstance(values, str) and not isinstance(values, Iterable):
1019
+ raise ValueError(
1020
+ f"Values {values} should be a string or an Iterable (list, numpy array, pytorch, tensorflow tensors)"
1021
+ )
1022
+ return_list = True
1023
+ if isinstance(values, str):
1024
+ values = [values]
1025
+ return_list = False
1026
+
1027
+ output = [self._strval2int(value) for value in values]
1028
+ return output if return_list else output[0]
1029
+
1030
+ def _strval2int(self, value: str) -> int:
1031
+ failed_parse = False
1032
+ value = str(value)
1033
+ # first attempt - raw string value
1034
+ int_value = self._str2int.get(value)
1035
+ if int_value is None:
1036
+ # second attempt - strip whitespace
1037
+ int_value = self._str2int.get(value.strip())
1038
+ if int_value is None:
1039
+ # third attempt - convert str to int
1040
+ try:
1041
+ int_value = int(value)
1042
+ except ValueError:
1043
+ failed_parse = True
1044
+ else:
1045
+ if int_value < -1 or int_value >= self.num_classes:
1046
+ failed_parse = True
1047
+ if failed_parse:
1048
+ raise ValueError(f"Invalid string class label {value}")
1049
+ return int_value
1050
+
1051
+ def int2str(self, values: Union[int, Iterable]) -> Union[str, Iterable]:
1052
+ """Conversion `integer` => class name `string`.
1053
+
1054
+ Regarding unknown/missing labels: passing negative integers raises `ValueError`.
1055
+
1056
+ Example:
1057
+
1058
+ ```py
1059
+ >>> from datasets import load_dataset
1060
+ >>> ds = load_dataset("rotten_tomatoes", split="train")
1061
+ >>> ds.features["label"].int2str(0)
1062
+ 'neg'
1063
+ ```
1064
+ """
1065
+ if not isinstance(values, int) and not isinstance(values, Iterable):
1066
+ raise ValueError(
1067
+ f"Values {values} should be an integer or an Iterable (list, numpy array, pytorch, tensorflow tensors)"
1068
+ )
1069
+ return_list = True
1070
+ if isinstance(values, int):
1071
+ values = [values]
1072
+ return_list = False
1073
+
1074
+ for v in values:
1075
+ if not 0 <= v < self.num_classes:
1076
+ raise ValueError(f"Invalid integer class label {v:d}")
1077
+
1078
+ output = [self._int2str[int(v)] for v in values]
1079
+ return output if return_list else output[0]
1080
+
1081
+ def encode_example(self, example_data):
1082
+ if self.num_classes is None:
1083
+ raise ValueError(
1084
+ "Trying to use ClassLabel feature with undefined number of class. "
1085
+ "Please set ClassLabel.names or num_classes."
1086
+ )
1087
+
1088
+ # If a string is given, convert to associated integer
1089
+ if isinstance(example_data, str):
1090
+ example_data = self.str2int(example_data)
1091
+
1092
+ # Allowing -1 to mean no label.
1093
+ if not -1 <= example_data < self.num_classes:
1094
+ raise ValueError(f"Class label {example_data:d} greater than configured num_classes {self.num_classes}")
1095
+ return example_data
1096
+
1097
+ def cast_storage(self, storage: Union[pa.StringArray, pa.IntegerArray]) -> pa.Int64Array:
1098
+ """Cast an Arrow array to the `ClassLabel` arrow storage type.
1099
+ The Arrow types that can be converted to the `ClassLabel` pyarrow storage type are:
1100
+
1101
+ - `pa.string()`
1102
+ - `pa.int()`
1103
+
1104
+ Args:
1105
+ storage (`Union[pa.StringArray, pa.IntegerArray]`):
1106
+ PyArrow array to cast.
1107
+
1108
+ Returns:
1109
+ `pa.Int64Array`: Array in the `ClassLabel` arrow storage type.
1110
+ """
1111
+ if isinstance(storage, pa.IntegerArray) and len(storage) > 0:
1112
+ min_max = pc.min_max(storage).as_py()
1113
+ if min_max["max"] is not None and min_max["max"] >= self.num_classes:
1114
+ raise ValueError(
1115
+ f"Class label {min_max['max']} greater than configured num_classes {self.num_classes}"
1116
+ )
1117
+ elif isinstance(storage, pa.StringArray):
1118
+ storage = pa.array(
1119
+ [self._strval2int(label) if label is not None else None for label in storage.to_pylist()]
1120
+ )
1121
+ return array_cast(storage, self.pa_type)
1122
+
1123
+ @staticmethod
1124
+ def _load_names_from_file(names_filepath):
1125
+ with open(names_filepath, encoding="utf-8") as f:
1126
+ return [name.strip() for name in f.read().split("\n") if name.strip()] # Filter empty names
1127
+
1128
+
1129
+ @dataclass
1130
+ class Sequence:
1131
+ """Construct a list of feature from a single type or a dict of types.
1132
+ Mostly here for compatiblity with tfds.
1133
+
1134
+ Args:
1135
+ feature:
1136
+ A list of features of a single type or a dictionary of types.
1137
+ length (`int`):
1138
+ Length of the sequence.
1139
+
1140
+ Example:
1141
+
1142
+ ```py
1143
+ >>> from datasets import Features, Sequence, Value, ClassLabel
1144
+ >>> features = Features({'post': Sequence(feature={'text': Value(dtype='string'), 'upvotes': Value(dtype='int32'), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'])})})
1145
+ >>> features
1146
+ {'post': Sequence(feature={'text': Value(dtype='string', id=None), 'upvotes': Value(dtype='int32', id=None), 'label': ClassLabel(num_classes=2, names=['hot', 'cold'], id=None)}, length=-1, id=None)}
1147
+ ```
1148
+ """
1149
+
1150
+ feature: Any
1151
+ length: int = -1
1152
+ id: Optional[str] = None
1153
+ # Automatically constructed
1154
+ dtype: ClassVar[str] = "list"
1155
+ pa_type: ClassVar[Any] = None
1156
+ _type: str = field(default="Sequence", init=False, repr=False)
1157
+
1158
+
1159
+ FeatureType = Union[
1160
+ dict,
1161
+ list,
1162
+ tuple,
1163
+ Value,
1164
+ ClassLabel,
1165
+ Translation,
1166
+ TranslationVariableLanguages,
1167
+ Sequence,
1168
+ Array2D,
1169
+ Array3D,
1170
+ Array4D,
1171
+ Array5D,
1172
+ Audio,
1173
+ Image,
1174
+ ]
1175
+
1176
+
1177
+ def _check_non_null_non_empty_recursive(obj, schema: Optional[FeatureType] = None) -> bool:
1178
+ """
1179
+ Check if the object is not None.
1180
+ If the object is a list or a tuple, recursively check the first element of the sequence and stop if at any point the first element is not a sequence or is an empty sequence.
1181
+ """
1182
+ if obj is None:
1183
+ return False
1184
+ elif isinstance(obj, (list, tuple)) and (schema is None or isinstance(schema, (list, tuple, Sequence))):
1185
+ if len(obj) > 0:
1186
+ if schema is None:
1187
+ pass
1188
+ elif isinstance(schema, (list, tuple)):
1189
+ schema = schema[0]
1190
+ else:
1191
+ schema = schema.feature
1192
+ return _check_non_null_non_empty_recursive(obj[0], schema)
1193
+ else:
1194
+ return False
1195
+ else:
1196
+ return True
1197
+
1198
+
1199
+ def get_nested_type(schema: FeatureType) -> pa.DataType:
1200
+ """
1201
+ get_nested_type() converts a datasets.FeatureType into a pyarrow.DataType, and acts as the inverse of
1202
+ generate_from_arrow_type().
1203
+
1204
+ It performs double-duty as the implementation of Features.type and handles the conversion of
1205
+ datasets.Feature->pa.struct
1206
+ """
1207
+ # Nested structures: we allow dict, list/tuples, sequences
1208
+ if isinstance(schema, Features):
1209
+ return pa.struct(
1210
+ {key: get_nested_type(schema[key]) for key in schema}
1211
+ ) # Features is subclass of dict, and dict order is deterministic since Python 3.6
1212
+ elif isinstance(schema, dict):
1213
+ return pa.struct(
1214
+ {key: get_nested_type(schema[key]) for key in schema}
1215
+ ) # however don't sort on struct types since the order matters
1216
+ elif isinstance(schema, (list, tuple)):
1217
+ if len(schema) != 1:
1218
+ raise ValueError("When defining list feature, you should just provide one example of the inner type")
1219
+ value_type = get_nested_type(schema[0])
1220
+ return pa.list_(value_type)
1221
+ elif isinstance(schema, Sequence):
1222
+ value_type = get_nested_type(schema.feature)
1223
+ # We allow to reverse list of dict => dict of list for compatibility with tfds
1224
+ if isinstance(schema.feature, dict):
1225
+ return pa.struct({f.name: pa.list_(f.type, schema.length) for f in value_type})
1226
+ return pa.list_(value_type, schema.length)
1227
+
1228
+ # Other objects are callable which returns their data type (ClassLabel, Array2D, Translation, Arrow datatype creation methods)
1229
+ return schema()
1230
+
1231
+
1232
+ def encode_nested_example(schema, obj, level=0):
1233
+ """Encode a nested example.
1234
+ This is used since some features (in particular ClassLabel) have some logic during encoding.
1235
+
1236
+ To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be encoded.
1237
+ If the first element needs to be encoded, then all the elements of the list will be encoded, otherwise they'll stay the same.
1238
+ """
1239
+ # Nested structures: we allow dict, list/tuples, sequences
1240
+ if isinstance(schema, dict):
1241
+ if level == 0 and obj is None:
1242
+ raise ValueError("Got None but expected a dictionary instead")
1243
+ return (
1244
+ {k: encode_nested_example(schema[k], obj.get(k), level=level + 1) for k in schema}
1245
+ if obj is not None
1246
+ else None
1247
+ )
1248
+
1249
+ elif isinstance(schema, (list, tuple)):
1250
+ sub_schema = schema[0]
1251
+ if obj is None:
1252
+ return None
1253
+ else:
1254
+ if len(obj) > 0:
1255
+ for first_elmt in obj:
1256
+ if _check_non_null_non_empty_recursive(first_elmt, sub_schema):
1257
+ break
1258
+ if encode_nested_example(sub_schema, first_elmt, level=level + 1) != first_elmt:
1259
+ return [encode_nested_example(sub_schema, o, level=level + 1) for o in obj]
1260
+ return list(obj)
1261
+ elif isinstance(schema, Sequence):
1262
+ if obj is None:
1263
+ return None
1264
+ # We allow to reverse list of dict => dict of list for compatiblity with tfds
1265
+ if isinstance(schema.feature, dict):
1266
+ # dict of list to fill
1267
+ list_dict = {}
1268
+ if isinstance(obj, (list, tuple)):
1269
+ # obj is a list of dict
1270
+ for k in schema.feature:
1271
+ list_dict[k] = [encode_nested_example(schema.feature[k], o.get(k), level=level + 1) for o in obj]
1272
+ return list_dict
1273
+ else:
1274
+ # obj is a single dict
1275
+ for k in schema.feature:
1276
+ list_dict[k] = (
1277
+ [encode_nested_example(schema.feature[k], o, level=level + 1) for o in obj[k]]
1278
+ if k in obj
1279
+ else None
1280
+ )
1281
+ return list_dict
1282
+ # schema.feature is not a dict
1283
+ if isinstance(obj, str): # don't interpret a string as a list
1284
+ raise ValueError(f"Got a string but expected a list instead: '{obj}'")
1285
+ else:
1286
+ if len(obj) > 0:
1287
+ for first_elmt in obj:
1288
+ if _check_non_null_non_empty_recursive(first_elmt, schema.feature):
1289
+ break
1290
+ # be careful when comparing tensors here
1291
+ if (
1292
+ not isinstance(first_elmt, list)
1293
+ or encode_nested_example(schema.feature, first_elmt, level=level + 1) != first_elmt
1294
+ ):
1295
+ return [encode_nested_example(schema.feature, o, level=level + 1) for o in obj]
1296
+ return list(obj)
1297
+ # Object with special encoding:
1298
+ # ClassLabel will convert from string to int, TranslationVariableLanguages does some checks
1299
+ elif isinstance(schema, (Audio, Image, ClassLabel, TranslationVariableLanguages, Value, _ArrayXD)):
1300
+ return schema.encode_example(obj) if obj is not None else None
1301
+ # Other object should be directly convertible to a native Arrow type (like Translation and Translation)
1302
+ return obj
1303
+
1304
+
1305
+ def decode_nested_example(schema, obj, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
1306
+ """Decode a nested example.
1307
+ This is used since some features (in particular Audio and Image) have some logic during decoding.
1308
+
1309
+ To avoid iterating over possibly long lists, it first checks (recursively) if the first element that is not None or empty (if it is a sequence) has to be decoded.
1310
+ If the first element needs to be decoded, then all the elements of the list will be decoded, otherwise they'll stay the same.
1311
+ """
1312
+ # Nested structures: we allow dict, list/tuples, sequences
1313
+ if isinstance(schema, dict):
1314
+ return (
1315
+ {k: decode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in zip_dict(schema, obj)}
1316
+ if obj is not None
1317
+ else None
1318
+ )
1319
+ elif isinstance(schema, (list, tuple)):
1320
+ sub_schema = schema[0]
1321
+ if obj is None:
1322
+ return None
1323
+ else:
1324
+ if len(obj) > 0:
1325
+ for first_elmt in obj:
1326
+ if _check_non_null_non_empty_recursive(first_elmt, sub_schema):
1327
+ break
1328
+ if decode_nested_example(sub_schema, first_elmt) != first_elmt:
1329
+ return [decode_nested_example(sub_schema, o) for o in obj]
1330
+ return list(obj)
1331
+ elif isinstance(schema, Sequence):
1332
+ # We allow to reverse list of dict => dict of list for compatiblity with tfds
1333
+ if isinstance(schema.feature, dict):
1334
+ return {k: decode_nested_example([schema.feature[k]], obj[k]) for k in schema.feature}
1335
+ else:
1336
+ return decode_nested_example([schema.feature], obj)
1337
+ # Object with special decoding:
1338
+ elif isinstance(schema, (Audio, Image)):
1339
+ # we pass the token to read and decode files from private repositories in streaming mode
1340
+ if obj is not None and schema.decode:
1341
+ return schema.decode_example(obj, token_per_repo_id=token_per_repo_id)
1342
+ return obj
1343
+
1344
+
1345
+ def generate_from_dict(obj: Any):
1346
+ """Regenerate the nested feature object from a deserialized dict.
1347
+ We use the '_type' fields to get the dataclass name to load.
1348
+
1349
+ generate_from_dict is the recursive helper for Features.from_dict, and allows for a convenient constructor syntax
1350
+ to define features from deserialized JSON dictionaries. This function is used in particular when deserializing
1351
+ a :class:`DatasetInfo` that was dumped to a JSON object. This acts as an analogue to
1352
+ :meth:`Features.from_arrow_schema` and handles the recursive field-by-field instantiation, but doesn't require any
1353
+ mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive dtypes
1354
+ that :class:`Value` automatically performs.
1355
+ """
1356
+ # Nested structures: we allow dict, list/tuples, sequences
1357
+ if isinstance(obj, list):
1358
+ return [generate_from_dict(value) for value in obj]
1359
+ # Otherwise we have a dict or a dataclass
1360
+ if "_type" not in obj or isinstance(obj["_type"], dict):
1361
+ return {key: generate_from_dict(value) for key, value in obj.items()}
1362
+ obj = dict(obj)
1363
+ class_type = globals()[obj.pop("_type")]
1364
+
1365
+ if class_type == Sequence:
1366
+ return Sequence(feature=generate_from_dict(obj["feature"]), length=obj.get("length", -1))
1367
+
1368
+ field_names = {f.name for f in fields(class_type)}
1369
+ return class_type(**{k: v for k, v in obj.items() if k in field_names})
1370
+
1371
+
1372
+ def generate_from_arrow_type(pa_type: pa.DataType) -> FeatureType:
1373
+ """
1374
+ generate_from_arrow_type accepts an arrow DataType and returns a datasets FeatureType to be used as the type for
1375
+ a single field.
1376
+
1377
+ This is the high-level arrow->datasets type conversion and is inverted by get_nested_type().
1378
+
1379
+ This operates at the individual *field* level, whereas Features.from_arrow_schema() operates at the
1380
+ full schema level and holds the methods that represent the bijection from Features<->pyarrow.Schema
1381
+ """
1382
+ if isinstance(pa_type, pa.StructType):
1383
+ return {field.name: generate_from_arrow_type(field.type) for field in pa_type}
1384
+ elif isinstance(pa_type, pa.FixedSizeListType):
1385
+ return Sequence(feature=generate_from_arrow_type(pa_type.value_type), length=pa_type.list_size)
1386
+ elif isinstance(pa_type, pa.ListType):
1387
+ feature = generate_from_arrow_type(pa_type.value_type)
1388
+ if isinstance(feature, (dict, tuple, list)):
1389
+ return [feature]
1390
+ return Sequence(feature=feature)
1391
+ elif isinstance(pa_type, _ArrayXDExtensionType):
1392
+ array_feature = [None, None, Array2D, Array3D, Array4D, Array5D][pa_type.ndims]
1393
+ return array_feature(shape=pa_type.shape, dtype=pa_type.value_type)
1394
+ elif isinstance(pa_type, pa.DictionaryType):
1395
+ raise NotImplementedError # TODO(thom) this will need access to the dictionary as well (for labels). I.e. to the py_table
1396
+ elif isinstance(pa_type, pa.DataType):
1397
+ return Value(dtype=_arrow_to_datasets_dtype(pa_type))
1398
+ else:
1399
+ raise ValueError(f"Cannot convert {pa_type} to a Feature type.")
1400
+
1401
+
1402
+ def numpy_to_pyarrow_listarray(arr: np.ndarray, type: pa.DataType = None) -> pa.ListArray:
1403
+ """Build a PyArrow ListArray from a multidimensional NumPy array"""
1404
+ arr = np.array(arr)
1405
+ values = pa.array(arr.flatten(), type=type)
1406
+ for i in range(arr.ndim - 1):
1407
+ n_offsets = reduce(mul, arr.shape[: arr.ndim - i - 1], 1)
1408
+ step_offsets = arr.shape[arr.ndim - i - 1]
1409
+ offsets = pa.array(np.arange(n_offsets + 1) * step_offsets, type=pa.int32())
1410
+ values = pa.ListArray.from_arrays(offsets, values)
1411
+ return values
1412
+
1413
+
1414
+ def list_of_pa_arrays_to_pyarrow_listarray(l_arr: List[Optional[pa.Array]]) -> pa.ListArray:
1415
+ null_mask = np.array([arr is None for arr in l_arr])
1416
+ null_indices = np.arange(len(null_mask))[null_mask] - np.arange(np.sum(null_mask))
1417
+ l_arr = [arr for arr in l_arr if arr is not None]
1418
+ offsets = np.cumsum(
1419
+ [0] + [len(arr) for arr in l_arr], dtype=object
1420
+ ) # convert to dtype object to allow None insertion
1421
+ offsets = np.insert(offsets, null_indices, None)
1422
+ offsets = pa.array(offsets, type=pa.int32())
1423
+ values = pa.concat_arrays(l_arr)
1424
+ return pa.ListArray.from_arrays(offsets, values)
1425
+
1426
+
1427
+ def list_of_np_array_to_pyarrow_listarray(l_arr: List[np.ndarray], type: pa.DataType = None) -> pa.ListArray:
1428
+ """Build a PyArrow ListArray from a possibly nested list of NumPy arrays"""
1429
+ if len(l_arr) > 0:
1430
+ return list_of_pa_arrays_to_pyarrow_listarray(
1431
+ [numpy_to_pyarrow_listarray(arr, type=type) if arr is not None else None for arr in l_arr]
1432
+ )
1433
+ else:
1434
+ return pa.array([], type=type)
1435
+
1436
+
1437
+ def contains_any_np_array(data: Any):
1438
+ """Return `True` if data is a NumPy ndarray or (recursively) if first non-null value in list is a NumPy ndarray.
1439
+
1440
+ Args:
1441
+ data (Any): Data.
1442
+
1443
+ Returns:
1444
+ bool
1445
+ """
1446
+ if isinstance(data, np.ndarray):
1447
+ return True
1448
+ elif isinstance(data, list):
1449
+ return contains_any_np_array(first_non_null_value(data)[1])
1450
+ else:
1451
+ return False
1452
+
1453
+
1454
+ def any_np_array_to_pyarrow_listarray(data: Union[np.ndarray, List], type: pa.DataType = None) -> pa.ListArray:
1455
+ """Convert to PyArrow ListArray either a NumPy ndarray or (recursively) a list that may contain any NumPy ndarray.
1456
+
1457
+ Args:
1458
+ data (Union[np.ndarray, List]): Data.
1459
+ type (pa.DataType): Explicit PyArrow DataType passed to coerce the ListArray data type.
1460
+
1461
+ Returns:
1462
+ pa.ListArray
1463
+ """
1464
+ if isinstance(data, np.ndarray):
1465
+ return numpy_to_pyarrow_listarray(data, type=type)
1466
+ elif isinstance(data, list):
1467
+ return list_of_pa_arrays_to_pyarrow_listarray([any_np_array_to_pyarrow_listarray(i, type=type) for i in data])
1468
+
1469
+
1470
+ def to_pyarrow_listarray(data: Any, pa_type: _ArrayXDExtensionType) -> pa.Array:
1471
+ """Convert to PyArrow ListArray.
1472
+
1473
+ Args:
1474
+ data (Any): Sequence, iterable, np.ndarray or pd.Series.
1475
+ pa_type (_ArrayXDExtensionType): Any of the ArrayNDExtensionType.
1476
+
1477
+ Returns:
1478
+ pyarrow.Array
1479
+ """
1480
+ if contains_any_np_array(data):
1481
+ return any_np_array_to_pyarrow_listarray(data, type=pa_type.value_type)
1482
+ else:
1483
+ return pa.array(data, pa_type.storage_dtype)
1484
+
1485
+
1486
+ def _visit(feature: FeatureType, func: Callable[[FeatureType], Optional[FeatureType]]) -> FeatureType:
1487
+ """Visit a (possibly nested) feature.
1488
+
1489
+ Args:
1490
+ feature (FeatureType): the feature type to be checked
1491
+ Returns:
1492
+ visited feature (FeatureType)
1493
+ """
1494
+ if isinstance(feature, dict):
1495
+ out = func({k: _visit(f, func) for k, f in feature.items()})
1496
+ elif isinstance(feature, (list, tuple)):
1497
+ out = func([_visit(feature[0], func)])
1498
+ elif isinstance(feature, Sequence):
1499
+ out = func(Sequence(_visit(feature.feature, func), length=feature.length))
1500
+ else:
1501
+ out = func(feature)
1502
+ return feature if out is None else out
1503
+
1504
+
1505
+ def require_decoding(feature: FeatureType, ignore_decode_attribute: bool = False) -> bool:
1506
+ """Check if a (possibly nested) feature requires decoding.
1507
+
1508
+ Args:
1509
+ feature (FeatureType): the feature type to be checked
1510
+ ignore_decode_attribute (:obj:`bool`, default ``False``): Whether to ignore the current value
1511
+ of the `decode` attribute of the decodable feature types.
1512
+ Returns:
1513
+ :obj:`bool`
1514
+ """
1515
+ if isinstance(feature, dict):
1516
+ return any(require_decoding(f) for f in feature.values())
1517
+ elif isinstance(feature, (list, tuple)):
1518
+ return require_decoding(feature[0])
1519
+ elif isinstance(feature, Sequence):
1520
+ return require_decoding(feature.feature)
1521
+ else:
1522
+ return hasattr(feature, "decode_example") and (feature.decode if not ignore_decode_attribute else True)
1523
+
1524
+
1525
+ def require_storage_cast(feature: FeatureType) -> bool:
1526
+ """Check if a (possibly nested) feature requires storage casting.
1527
+
1528
+ Args:
1529
+ feature (FeatureType): the feature type to be checked
1530
+ Returns:
1531
+ :obj:`bool`
1532
+ """
1533
+ if isinstance(feature, dict):
1534
+ return any(require_storage_cast(f) for f in feature.values())
1535
+ elif isinstance(feature, (list, tuple)):
1536
+ return require_storage_cast(feature[0])
1537
+ elif isinstance(feature, Sequence):
1538
+ return require_storage_cast(feature.feature)
1539
+ else:
1540
+ return hasattr(feature, "cast_storage")
1541
+
1542
+
1543
+ def require_storage_embed(feature: FeatureType) -> bool:
1544
+ """Check if a (possibly nested) feature requires embedding data into storage.
1545
+
1546
+ Args:
1547
+ feature (FeatureType): the feature type to be checked
1548
+ Returns:
1549
+ :obj:`bool`
1550
+ """
1551
+ if isinstance(feature, dict):
1552
+ return any(require_storage_cast(f) for f in feature.values())
1553
+ elif isinstance(feature, (list, tuple)):
1554
+ return require_storage_cast(feature[0])
1555
+ elif isinstance(feature, Sequence):
1556
+ return require_storage_cast(feature.feature)
1557
+ else:
1558
+ return hasattr(feature, "embed_storage")
1559
+
1560
+
1561
+ def keep_features_dicts_synced(func):
1562
+ """
1563
+ Wrapper to keep the secondary dictionary, which tracks whether keys are decodable, of the :class:`datasets.Features` object
1564
+ in sync with the main dictionary.
1565
+ """
1566
+
1567
+ @wraps(func)
1568
+ def wrapper(*args, **kwargs):
1569
+ if args:
1570
+ self: "Features" = args[0]
1571
+ args = args[1:]
1572
+ else:
1573
+ self: "Features" = kwargs.pop("self")
1574
+ out = func(self, *args, **kwargs)
1575
+ assert hasattr(self, "_column_requires_decoding")
1576
+ self._column_requires_decoding = {col: require_decoding(feature) for col, feature in self.items()}
1577
+ return out
1578
+
1579
+ wrapper._decorator_name_ = "_keep_dicts_synced"
1580
+ return wrapper
1581
+
1582
+
1583
+ class Features(dict):
1584
+ """A special dictionary that defines the internal structure of a dataset.
1585
+
1586
+ Instantiated with a dictionary of type `dict[str, FieldType]`, where keys are the desired column names,
1587
+ and values are the type of that column.
1588
+
1589
+ `FieldType` can be one of the following:
1590
+ - a [`~datasets.Value`] feature specifies a single typed value, e.g. `int64` or `string`.
1591
+ - a [`~datasets.ClassLabel`] feature specifies a field with a predefined set of classes which can have labels
1592
+ associated to them and will be stored as integers in the dataset.
1593
+ - a python `dict` which specifies that the field is a nested field containing a mapping of sub-fields to sub-fields
1594
+ features. It's possible to have nested fields of nested fields in an arbitrary manner.
1595
+ - a python `list` or a [`~datasets.Sequence`] specifies that the field contains a list of objects. The python
1596
+ `list` or [`~datasets.Sequence`] should be provided with a single sub-feature as an example of the feature
1597
+ type hosted in this list.
1598
+
1599
+ <Tip>
1600
+
1601
+ A [`~datasets.Sequence`] with a internal dictionary feature will be automatically converted into a dictionary of
1602
+ lists. This behavior is implemented to have a compatilbity layer with the TensorFlow Datasets library but may be
1603
+ un-wanted in some cases. If you don't want this behavior, you can use a python `list` instead of the
1604
+ [`~datasets.Sequence`].
1605
+
1606
+ </Tip>
1607
+
1608
+ - a [`Array2D`], [`Array3D`], [`Array4D`] or [`Array5D`] feature for multidimensional arrays.
1609
+ - an [`Audio`] feature to store the absolute path to an audio file or a dictionary with the relative path
1610
+ to an audio file ("path" key) and its bytes content ("bytes" key). This feature extracts the audio data.
1611
+ - an [`Image`] feature to store the absolute path to an image file, an `np.ndarray` object, a `PIL.Image.Image` object
1612
+ or a dictionary with the relative path to an image file ("path" key) and its bytes content ("bytes" key). This feature extracts the image data.
1613
+ - [`~datasets.Translation`] and [`~datasets.TranslationVariableLanguages`], the two features specific to Machine Translation.
1614
+ """
1615
+
1616
+ def __init__(*args, **kwargs):
1617
+ # self not in the signature to allow passing self as a kwarg
1618
+ if not args:
1619
+ raise TypeError("descriptor '__init__' of 'Features' object needs an argument")
1620
+ self, *args = args
1621
+ super(Features, self).__init__(*args, **kwargs)
1622
+ self._column_requires_decoding: Dict[str, bool] = {
1623
+ col: require_decoding(feature) for col, feature in self.items()
1624
+ }
1625
+
1626
+ __setitem__ = keep_features_dicts_synced(dict.__setitem__)
1627
+ __delitem__ = keep_features_dicts_synced(dict.__delitem__)
1628
+ update = keep_features_dicts_synced(dict.update)
1629
+ setdefault = keep_features_dicts_synced(dict.setdefault)
1630
+ pop = keep_features_dicts_synced(dict.pop)
1631
+ popitem = keep_features_dicts_synced(dict.popitem)
1632
+ clear = keep_features_dicts_synced(dict.clear)
1633
+
1634
+ def __reduce__(self):
1635
+ return Features, (dict(self),)
1636
+
1637
+ @property
1638
+ def type(self):
1639
+ """
1640
+ Features field types.
1641
+
1642
+ Returns:
1643
+ :obj:`pyarrow.DataType`
1644
+ """
1645
+ return get_nested_type(self)
1646
+
1647
+ @property
1648
+ def arrow_schema(self):
1649
+ """
1650
+ Features schema.
1651
+
1652
+ Returns:
1653
+ :obj:`pyarrow.Schema`
1654
+ """
1655
+ hf_metadata = {"info": {"features": self.to_dict()}}
1656
+ return pa.schema(self.type).with_metadata({"huggingface": json.dumps(hf_metadata)})
1657
+
1658
+ @classmethod
1659
+ def from_arrow_schema(cls, pa_schema: pa.Schema) -> "Features":
1660
+ """
1661
+ Construct [`Features`] from Arrow Schema.
1662
+ It also checks the schema metadata for Hugging Face Datasets features.
1663
+ Non-nullable fields are not supported and set to nullable.
1664
+
1665
+ Args:
1666
+ pa_schema (`pyarrow.Schema`):
1667
+ Arrow Schema.
1668
+
1669
+ Returns:
1670
+ [`Features`]
1671
+ """
1672
+ # try to load features from the arrow schema metadata
1673
+ metadata_features = Features()
1674
+ if pa_schema.metadata is not None and "huggingface".encode("utf-8") in pa_schema.metadata:
1675
+ metadata = json.loads(pa_schema.metadata["huggingface".encode("utf-8")].decode())
1676
+ if "info" in metadata and "features" in metadata["info"] and metadata["info"]["features"] is not None:
1677
+ metadata_features = Features.from_dict(metadata["info"]["features"])
1678
+ metadata_features_schema = metadata_features.arrow_schema
1679
+ obj = {
1680
+ field.name: (
1681
+ metadata_features[field.name]
1682
+ if field.name in metadata_features and metadata_features_schema.field(field.name) == field
1683
+ else generate_from_arrow_type(field.type)
1684
+ )
1685
+ for field in pa_schema
1686
+ }
1687
+ return cls(**obj)
1688
+
1689
+ @classmethod
1690
+ def from_dict(cls, dic) -> "Features":
1691
+ """
1692
+ Construct [`Features`] from dict.
1693
+
1694
+ Regenerate the nested feature object from a deserialized dict.
1695
+ We use the `_type` key to infer the dataclass name of the feature `FieldType`.
1696
+
1697
+ It allows for a convenient constructor syntax
1698
+ to define features from deserialized JSON dictionaries. This function is used in particular when deserializing
1699
+ a [`DatasetInfo`] that was dumped to a JSON object. This acts as an analogue to
1700
+ [`Features.from_arrow_schema`] and handles the recursive field-by-field instantiation, but doesn't require
1701
+ any mapping to/from pyarrow, except for the fact that it takes advantage of the mapping of pyarrow primitive
1702
+ dtypes that [`Value`] automatically performs.
1703
+
1704
+ Args:
1705
+ dic (`dict[str, Any]`):
1706
+ Python dictionary.
1707
+
1708
+ Returns:
1709
+ `Features`
1710
+
1711
+ Example::
1712
+ >>> Features.from_dict({'_type': {'dtype': 'string', 'id': None, '_type': 'Value'}})
1713
+ {'_type': Value(dtype='string', id=None)}
1714
+ """
1715
+ obj = generate_from_dict(dic)
1716
+ return cls(**obj)
1717
+
1718
+ def to_dict(self):
1719
+ return asdict(self)
1720
+
1721
+ def _to_yaml_list(self) -> list:
1722
+ # we compute the YAML list from the dict representation that is used for JSON dump
1723
+ yaml_data = self.to_dict()
1724
+
1725
+ def simplify(feature: dict) -> dict:
1726
+ if not isinstance(feature, dict):
1727
+ raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}")
1728
+
1729
+ #
1730
+ # sequence: -> sequence: int32
1731
+ # dtype: int32 ->
1732
+ #
1733
+ if isinstance(feature.get("sequence"), dict) and list(feature["sequence"]) == ["dtype"]:
1734
+ feature["sequence"] = feature["sequence"]["dtype"]
1735
+
1736
+ #
1737
+ # sequence: -> sequence:
1738
+ # struct: -> - name: foo
1739
+ # - name: foo -> dtype: int32
1740
+ # dtype: int32 ->
1741
+ #
1742
+ if isinstance(feature.get("sequence"), dict) and list(feature["sequence"]) == ["struct"]:
1743
+ feature["sequence"] = feature["sequence"]["struct"]
1744
+
1745
+ #
1746
+ # list: -> list: int32
1747
+ # dtype: int32 ->
1748
+ #
1749
+ if isinstance(feature.get("list"), dict) and list(feature["list"]) == ["dtype"]:
1750
+ feature["list"] = feature["list"]["dtype"]
1751
+
1752
+ #
1753
+ # list: -> list:
1754
+ # struct: -> - name: foo
1755
+ # - name: foo -> dtype: int32
1756
+ # dtype: int32 ->
1757
+ #
1758
+ if isinstance(feature.get("list"), dict) and list(feature["list"]) == ["struct"]:
1759
+ feature["list"] = feature["list"]["struct"]
1760
+
1761
+ #
1762
+ # class_label: -> class_label:
1763
+ # names: -> names:
1764
+ # - negative -> '0': negative
1765
+ # - positive -> '1': positive
1766
+ #
1767
+ if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), list):
1768
+ # server-side requirement: keys must be strings
1769
+ feature["class_label"]["names"] = {
1770
+ str(label_id): label_name for label_id, label_name in enumerate(feature["class_label"]["names"])
1771
+ }
1772
+ return feature
1773
+
1774
+ def to_yaml_inner(obj: Union[dict, list]) -> dict:
1775
+ if isinstance(obj, dict):
1776
+ _type = obj.pop("_type", None)
1777
+ if _type == "Sequence":
1778
+ _feature = obj.pop("feature")
1779
+ return simplify({"sequence": to_yaml_inner(_feature), **obj})
1780
+ elif _type == "Value":
1781
+ return obj
1782
+ elif _type and not obj:
1783
+ return {"dtype": camelcase_to_snakecase(_type)}
1784
+ elif _type:
1785
+ return {"dtype": simplify({camelcase_to_snakecase(_type): obj})}
1786
+ else:
1787
+ return {"struct": [{"name": name, **to_yaml_inner(_feature)} for name, _feature in obj.items()]}
1788
+ elif isinstance(obj, list):
1789
+ return simplify({"list": simplify(to_yaml_inner(obj[0]))})
1790
+ elif isinstance(obj, tuple):
1791
+ return to_yaml_inner(list(obj))
1792
+ else:
1793
+ raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}")
1794
+
1795
+ def to_yaml_types(obj: dict) -> dict:
1796
+ if isinstance(obj, dict):
1797
+ return {k: to_yaml_types(v) for k, v in obj.items()}
1798
+ elif isinstance(obj, list):
1799
+ return [to_yaml_types(v) for v in obj]
1800
+ elif isinstance(obj, tuple):
1801
+ return to_yaml_types(list(obj))
1802
+ else:
1803
+ return obj
1804
+
1805
+ return to_yaml_types(to_yaml_inner(yaml_data)["struct"])
1806
+
1807
+ @classmethod
1808
+ def _from_yaml_list(cls, yaml_data: list) -> "Features":
1809
+ yaml_data = copy.deepcopy(yaml_data)
1810
+
1811
+ # we convert the list obtained from YAML data into the dict representation that is used for JSON dump
1812
+
1813
+ def unsimplify(feature: dict) -> dict:
1814
+ if not isinstance(feature, dict):
1815
+ raise TypeError(f"Expected a dict but got a {type(feature)}: {feature}")
1816
+ #
1817
+ # sequence: int32 -> sequence:
1818
+ # -> dtype: int32
1819
+ #
1820
+ if isinstance(feature.get("sequence"), str):
1821
+ feature["sequence"] = {"dtype": feature["sequence"]}
1822
+ #
1823
+ # list: int32 -> list:
1824
+ # -> dtype: int32
1825
+ #
1826
+ if isinstance(feature.get("list"), str):
1827
+ feature["list"] = {"dtype": feature["list"]}
1828
+
1829
+ #
1830
+ # class_label: -> class_label:
1831
+ # names: -> names:
1832
+ # '0': negative -> - negative
1833
+ # '1': positive -> - positive
1834
+ #
1835
+ if isinstance(feature.get("class_label"), dict) and isinstance(feature["class_label"].get("names"), dict):
1836
+ label_ids = sorted(feature["class_label"]["names"], key=int)
1837
+ if label_ids and [int(label_id) for label_id in label_ids] != list(range(int(label_ids[-1]) + 1)):
1838
+ raise ValueError(
1839
+ f"ClassLabel expected a value for all label ids [0:{int(label_ids[-1]) + 1}] but some ids are missing."
1840
+ )
1841
+ feature["class_label"]["names"] = [feature["class_label"]["names"][label_id] for label_id in label_ids]
1842
+ return feature
1843
+
1844
+ def from_yaml_inner(obj: Union[dict, list]) -> Union[dict, list]:
1845
+ if isinstance(obj, dict):
1846
+ if not obj:
1847
+ return {}
1848
+ _type = next(iter(obj))
1849
+ if _type == "sequence":
1850
+ _feature = unsimplify(obj).pop(_type)
1851
+ return {"feature": from_yaml_inner(_feature), **obj, "_type": "Sequence"}
1852
+ if _type == "list":
1853
+ return [from_yaml_inner(unsimplify(obj)[_type])]
1854
+ if _type == "struct":
1855
+ return from_yaml_inner(obj["struct"])
1856
+ elif _type == "dtype":
1857
+ if isinstance(obj["dtype"], str):
1858
+ # e.g. int32, float64, string, audio, image
1859
+ try:
1860
+ Value(obj["dtype"])
1861
+ return {**obj, "_type": "Value"}
1862
+ except ValueError:
1863
+ # e.g. Audio, Image, ArrayXD
1864
+ return {"_type": snakecase_to_camelcase(obj["dtype"])}
1865
+ else:
1866
+ return from_yaml_inner(obj["dtype"])
1867
+ else:
1868
+ return {"_type": snakecase_to_camelcase(_type), **unsimplify(obj)[_type]}
1869
+ elif isinstance(obj, list):
1870
+ names = [_feature.pop("name") for _feature in obj]
1871
+ return {name: from_yaml_inner(_feature) for name, _feature in zip(names, obj)}
1872
+ else:
1873
+ raise TypeError(f"Expected a dict or a list but got {type(obj)}: {obj}")
1874
+
1875
+ return cls.from_dict(from_yaml_inner(yaml_data))
1876
+
1877
+ def encode_example(self, example):
1878
+ """
1879
+ Encode example into a format for Arrow.
1880
+
1881
+ Args:
1882
+ example (`dict[str, Any]`):
1883
+ Data in a Dataset row.
1884
+
1885
+ Returns:
1886
+ `dict[str, Any]`
1887
+ """
1888
+ example = cast_to_python_objects(example)
1889
+ return encode_nested_example(self, example)
1890
+
1891
+ def encode_column(self, column, column_name: str):
1892
+ """
1893
+ Encode column into a format for Arrow.
1894
+
1895
+ Args:
1896
+ column (`list[Any]`):
1897
+ Data in a Dataset column.
1898
+ column_name (`str`):
1899
+ Dataset column name.
1900
+
1901
+ Returns:
1902
+ `list[Any]`
1903
+ """
1904
+ column = cast_to_python_objects(column)
1905
+ return [encode_nested_example(self[column_name], obj) for obj in column]
1906
+
1907
+ def encode_batch(self, batch):
1908
+ """
1909
+ Encode batch into a format for Arrow.
1910
+
1911
+ Args:
1912
+ batch (`dict[str, list[Any]]`):
1913
+ Data in a Dataset batch.
1914
+
1915
+ Returns:
1916
+ `dict[str, list[Any]]`
1917
+ """
1918
+ encoded_batch = {}
1919
+ if set(batch) != set(self):
1920
+ raise ValueError(f"Column mismatch between batch {set(batch)} and features {set(self)}")
1921
+ for key, column in batch.items():
1922
+ column = cast_to_python_objects(column)
1923
+ encoded_batch[key] = [encode_nested_example(self[key], obj) for obj in column]
1924
+ return encoded_batch
1925
+
1926
+ def decode_example(self, example: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
1927
+ """Decode example with custom feature decoding.
1928
+
1929
+ Args:
1930
+ example (`dict[str, Any]`):
1931
+ Dataset row data.
1932
+ token_per_repo_id (`dict`, *optional*):
1933
+ To access and decode audio or image files from private repositories on the Hub, you can pass
1934
+ a dictionary `repo_id (str) -> token (bool or str)`.
1935
+
1936
+ Returns:
1937
+ `dict[str, Any]`
1938
+ """
1939
+
1940
+ return {
1941
+ column_name: decode_nested_example(feature, value, token_per_repo_id=token_per_repo_id)
1942
+ if self._column_requires_decoding[column_name]
1943
+ else value
1944
+ for column_name, (feature, value) in zip_dict(
1945
+ {key: value for key, value in self.items() if key in example}, example
1946
+ )
1947
+ }
1948
+
1949
+ def decode_column(self, column: list, column_name: str):
1950
+ """Decode column with custom feature decoding.
1951
+
1952
+ Args:
1953
+ column (`list[Any]`):
1954
+ Dataset column data.
1955
+ column_name (`str`):
1956
+ Dataset column name.
1957
+
1958
+ Returns:
1959
+ `list[Any]`
1960
+ """
1961
+ return (
1962
+ [decode_nested_example(self[column_name], value) if value is not None else None for value in column]
1963
+ if self._column_requires_decoding[column_name]
1964
+ else column
1965
+ )
1966
+
1967
+ def decode_batch(self, batch: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None):
1968
+ """Decode batch with custom feature decoding.
1969
+
1970
+ Args:
1971
+ batch (`dict[str, list[Any]]`):
1972
+ Dataset batch data.
1973
+ token_per_repo_id (`dict`, *optional*):
1974
+ To access and decode audio or image files from private repositories on the Hub, you can pass
1975
+ a dictionary repo_id (str) -> token (bool or str)
1976
+
1977
+ Returns:
1978
+ `dict[str, list[Any]]`
1979
+ """
1980
+ decoded_batch = {}
1981
+ for column_name, column in batch.items():
1982
+ decoded_batch[column_name] = (
1983
+ [
1984
+ decode_nested_example(self[column_name], value, token_per_repo_id=token_per_repo_id)
1985
+ if value is not None
1986
+ else None
1987
+ for value in column
1988
+ ]
1989
+ if self._column_requires_decoding[column_name]
1990
+ else column
1991
+ )
1992
+ return decoded_batch
1993
+
1994
+ def copy(self) -> "Features":
1995
+ """
1996
+ Make a deep copy of [`Features`].
1997
+
1998
+ Returns:
1999
+ [`Features`]
2000
+
2001
+ Example:
2002
+
2003
+ ```py
2004
+ >>> from datasets import load_dataset
2005
+ >>> ds = load_dataset("rotten_tomatoes", split="train")
2006
+ >>> copy_of_features = ds.features.copy()
2007
+ >>> copy_of_features
2008
+ {'label': ClassLabel(num_classes=2, names=['neg', 'pos'], id=None),
2009
+ 'text': Value(dtype='string', id=None)}
2010
+ ```
2011
+ """
2012
+ return copy.deepcopy(self)
2013
+
2014
+ def reorder_fields_as(self, other: "Features") -> "Features":
2015
+ """
2016
+ Reorder Features fields to match the field order of other [`Features`].
2017
+
2018
+ The order of the fields is important since it matters for the underlying arrow data.
2019
+ Re-ordering the fields allows to make the underlying arrow data type match.
2020
+
2021
+ Args:
2022
+ other ([`Features`]):
2023
+ The other [`Features`] to align with.
2024
+
2025
+ Returns:
2026
+ [`Features`]
2027
+
2028
+ Example::
2029
+
2030
+ >>> from datasets import Features, Sequence, Value
2031
+ >>> # let's say we have to features with a different order of nested fields (for a and b for example)
2032
+ >>> f1 = Features({"root": Sequence({"a": Value("string"), "b": Value("string")})})
2033
+ >>> f2 = Features({"root": {"b": Sequence(Value("string")), "a": Sequence(Value("string"))}})
2034
+ >>> assert f1.type != f2.type
2035
+ >>> # re-ordering keeps the base structure (here Sequence is defined at the root level), but make the fields order match
2036
+ >>> f1.reorder_fields_as(f2)
2037
+ {'root': Sequence(feature={'b': Value(dtype='string', id=None), 'a': Value(dtype='string', id=None)}, length=-1, id=None)}
2038
+ >>> assert f1.reorder_fields_as(f2).type == f2.type
2039
+ """
2040
+
2041
+ def recursive_reorder(source, target, stack=""):
2042
+ stack_position = " at " + stack[1:] if stack else ""
2043
+ if isinstance(target, Sequence):
2044
+ target = target.feature
2045
+ if isinstance(target, dict):
2046
+ target = {k: [v] for k, v in target.items()}
2047
+ else:
2048
+ target = [target]
2049
+ if isinstance(source, Sequence):
2050
+ source, id_, length = source.feature, source.id, source.length
2051
+ if isinstance(source, dict):
2052
+ source = {k: [v] for k, v in source.items()}
2053
+ reordered = recursive_reorder(source, target, stack)
2054
+ return Sequence({k: v[0] for k, v in reordered.items()}, id=id_, length=length)
2055
+ else:
2056
+ source = [source]
2057
+ reordered = recursive_reorder(source, target, stack)
2058
+ return Sequence(reordered[0], id=id_, length=length)
2059
+ elif isinstance(source, dict):
2060
+ if not isinstance(target, dict):
2061
+ raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position)
2062
+ if sorted(source) != sorted(target):
2063
+ message = (
2064
+ f"Keys mismatch: between {source} (source) and {target} (target).\n"
2065
+ f"{source.keys()-target.keys()} are missing from target "
2066
+ f"and {target.keys()-source.keys()} are missing from source" + stack_position
2067
+ )
2068
+ raise ValueError(message)
2069
+ return {key: recursive_reorder(source[key], target[key], stack + f".{key}") for key in target}
2070
+ elif isinstance(source, list):
2071
+ if not isinstance(target, list):
2072
+ raise ValueError(f"Type mismatch: between {source} and {target}" + stack_position)
2073
+ if len(source) != len(target):
2074
+ raise ValueError(f"Length mismatch: between {source} and {target}" + stack_position)
2075
+ return [recursive_reorder(source[i], target[i], stack + ".<list>") for i in range(len(target))]
2076
+ else:
2077
+ return source
2078
+
2079
+ return Features(recursive_reorder(self, other))
2080
+
2081
+ def flatten(self, max_depth=16) -> "Features":
2082
+ """Flatten the features. Every dictionary column is removed and is replaced by
2083
+ all the subfields it contains. The new fields are named by concatenating the
2084
+ name of the original column and the subfield name like this: `<original>.<subfield>`.
2085
+
2086
+ If a column contains nested dictionaries, then all the lower-level subfields names are
2087
+ also concatenated to form new columns: `<original>.<subfield>.<subsubfield>`, etc.
2088
+
2089
+ Returns:
2090
+ [`Features`]:
2091
+ The flattened features.
2092
+
2093
+ Example:
2094
+
2095
+ ```py
2096
+ >>> from datasets import load_dataset
2097
+ >>> ds = load_dataset("squad", split="train")
2098
+ >>> ds.features.flatten()
2099
+ {'answers.answer_start': Sequence(feature=Value(dtype='int32', id=None), length=-1, id=None),
2100
+ 'answers.text': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None),
2101
+ 'context': Value(dtype='string', id=None),
2102
+ 'id': Value(dtype='string', id=None),
2103
+ 'question': Value(dtype='string', id=None),
2104
+ 'title': Value(dtype='string', id=None)}
2105
+ ```
2106
+ """
2107
+ for depth in range(1, max_depth):
2108
+ no_change = True
2109
+ flattened = self.copy()
2110
+ for column_name, subfeature in self.items():
2111
+ if isinstance(subfeature, dict):
2112
+ no_change = False
2113
+ flattened.update({f"{column_name}.{k}": v for k, v in subfeature.items()})
2114
+ del flattened[column_name]
2115
+ elif isinstance(subfeature, Sequence) and isinstance(subfeature.feature, dict):
2116
+ no_change = False
2117
+ flattened.update(
2118
+ {
2119
+ f"{column_name}.{k}": Sequence(v) if not isinstance(v, dict) else [v]
2120
+ for k, v in subfeature.feature.items()
2121
+ }
2122
+ )
2123
+ del flattened[column_name]
2124
+ elif hasattr(subfeature, "flatten") and subfeature.flatten() != subfeature:
2125
+ no_change = False
2126
+ flattened.update({f"{column_name}.{k}": v for k, v in subfeature.flatten().items()})
2127
+ del flattened[column_name]
2128
+ self = flattened
2129
+ if no_change:
2130
+ break
2131
+ return self
2132
+
2133
+
2134
+ def _align_features(features_list: List[Features]) -> List[Features]:
2135
+ """Align dictionaries of features so that the keys that are found in multiple dictionaries share the same feature."""
2136
+ name2feature = {}
2137
+ for features in features_list:
2138
+ for k, v in features.items():
2139
+ if k in name2feature and isinstance(v, dict):
2140
+ # Recursively align features.
2141
+ name2feature[k] = _align_features([name2feature[k], v])[0]
2142
+ elif k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"):
2143
+ name2feature[k] = v
2144
+
2145
+ return [Features({k: name2feature[k] for k in features.keys()}) for features in features_list]
2146
+
2147
+
2148
+ def _check_if_features_can_be_aligned(features_list: List[Features]):
2149
+ """Check if the dictionaries of features can be aligned.
2150
+
2151
+ Two dictonaries of features can be aligned if the keys they share have the same type or some of them is of type `Value("null")`.
2152
+ """
2153
+ name2feature = {}
2154
+ for features in features_list:
2155
+ for k, v in features.items():
2156
+ if k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == "null"):
2157
+ name2feature[k] = v
2158
+
2159
+ for features in features_list:
2160
+ for k, v in features.items():
2161
+ if isinstance(v, dict) and isinstance(name2feature[k], dict):
2162
+ # Deep checks for structure.
2163
+ _check_if_features_can_be_aligned([name2feature[k], v])
2164
+ elif not (isinstance(v, Value) and v.dtype == "null") and name2feature[k] != v:
2165
+ raise ValueError(
2166
+ f'The features can\'t be aligned because the key {k} of features {features} has unexpected type - {v} (expected either {name2feature[k]} or Value("null").'
2167
+ )
evalkit_tf437/lib/python3.10/site-packages/datasets/features/image.py ADDED
@@ -0,0 +1,376 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import warnings
4
+ from dataclasses import dataclass, field
5
+ from io import BytesIO
6
+ from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
7
+
8
+ import numpy as np
9
+ import pyarrow as pa
10
+
11
+ from .. import config
12
+ from ..download.download_config import DownloadConfig
13
+ from ..download.streaming_download_manager import xopen
14
+ from ..table import array_cast
15
+ from ..utils.file_utils import is_local_path
16
+ from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
17
+
18
+
19
+ if TYPE_CHECKING:
20
+ import PIL.Image
21
+
22
+ from .features import FeatureType
23
+
24
+
25
+ _IMAGE_COMPRESSION_FORMATS: Optional[List[str]] = None
26
+ _NATIVE_BYTEORDER = "<" if sys.byteorder == "little" else ">"
27
+ # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
28
+ _VALID_IMAGE_ARRAY_DTPYES = [
29
+ np.dtype("|b1"),
30
+ np.dtype("|u1"),
31
+ np.dtype("<u2"),
32
+ np.dtype(">u2"),
33
+ np.dtype("<i2"),
34
+ np.dtype(">i2"),
35
+ np.dtype("<u4"),
36
+ np.dtype(">u4"),
37
+ np.dtype("<i4"),
38
+ np.dtype(">i4"),
39
+ np.dtype("<f4"),
40
+ np.dtype(">f4"),
41
+ np.dtype("<f8"),
42
+ np.dtype(">f8"),
43
+ ]
44
+
45
+
46
+ @dataclass
47
+ class Image:
48
+ """Image [`Feature`] to read image data from an image file.
49
+
50
+ Input: The Image feature accepts as input:
51
+ - A `str`: Absolute path to the image file (i.e. random access is allowed).
52
+ - A `dict` with the keys:
53
+
54
+ - `path`: String with relative path of the image file to the archive file.
55
+ - `bytes`: Bytes of the image file.
56
+
57
+ This is useful for archived files with sequential access.
58
+
59
+ - An `np.ndarray`: NumPy array representing an image.
60
+ - A `PIL.Image.Image`: PIL image object.
61
+
62
+ Args:
63
+ decode (`bool`, defaults to `True`):
64
+ Whether to decode the image data. If `False`,
65
+ returns the underlying dictionary in the format `{"path": image_path, "bytes": image_bytes}`.
66
+
67
+ Examples:
68
+
69
+ ```py
70
+ >>> from datasets import load_dataset, Image
71
+ >>> ds = load_dataset("beans", split="train")
72
+ >>> ds.features["image"]
73
+ Image(decode=True, id=None)
74
+ >>> ds[0]["image"]
75
+ <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=500x500 at 0x15E52E7F0>
76
+ >>> ds = ds.cast_column('image', Image(decode=False))
77
+ {'bytes': None,
78
+ 'path': '/root/.cache/huggingface/datasets/downloads/extracted/b0a21163f78769a2cf11f58dfc767fb458fc7cea5c05dccc0144a2c0f0bc1292/train/healthy/healthy_train.85.jpg'}
79
+ ```
80
+ """
81
+
82
+ decode: bool = True
83
+ id: Optional[str] = None
84
+ # Automatically constructed
85
+ dtype: ClassVar[str] = "PIL.Image.Image"
86
+ pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()})
87
+ _type: str = field(default="Image", init=False, repr=False)
88
+
89
+ def __call__(self):
90
+ return self.pa_type
91
+
92
+ def encode_example(self, value: Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]) -> dict:
93
+ """Encode example into a format for Arrow.
94
+
95
+ Args:
96
+ value (`str`, `np.ndarray`, `PIL.Image.Image` or `dict`):
97
+ Data passed as input to Image feature.
98
+
99
+ Returns:
100
+ `dict` with "path" and "bytes" fields
101
+ """
102
+ if config.PIL_AVAILABLE:
103
+ import PIL.Image
104
+ else:
105
+ raise ImportError("To support encoding images, please install 'Pillow'.")
106
+
107
+ if isinstance(value, list):
108
+ value = np.array(value)
109
+
110
+ if isinstance(value, str):
111
+ return {"path": value, "bytes": None}
112
+ elif isinstance(value, bytes):
113
+ return {"path": None, "bytes": value}
114
+ elif isinstance(value, np.ndarray):
115
+ # convert the image array to PNG/TIFF bytes
116
+ return encode_np_array(value)
117
+ elif isinstance(value, PIL.Image.Image):
118
+ # convert the PIL image to bytes (default format is PNG/TIFF)
119
+ return encode_pil_image(value)
120
+ elif value.get("path") is not None and os.path.isfile(value["path"]):
121
+ # we set "bytes": None to not duplicate the data if they're already available locally
122
+ return {"bytes": None, "path": value.get("path")}
123
+ elif value.get("bytes") is not None or value.get("path") is not None:
124
+ # store the image bytes, and path is used to infer the image format using the file extension
125
+ return {"bytes": value.get("bytes"), "path": value.get("path")}
126
+ else:
127
+ raise ValueError(
128
+ f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}."
129
+ )
130
+
131
+ def decode_example(self, value: dict, token_per_repo_id=None) -> "PIL.Image.Image":
132
+ """Decode example image file into image data.
133
+
134
+ Args:
135
+ value (`str` or `dict`):
136
+ A string with the absolute image file path, a dictionary with
137
+ keys:
138
+
139
+ - `path`: String with absolute or relative image file path.
140
+ - `bytes`: The bytes of the image file.
141
+ token_per_repo_id (`dict`, *optional*):
142
+ To access and decode
143
+ image files from private repositories on the Hub, you can pass
144
+ a dictionary repo_id (`str`) -> token (`bool` or `str`).
145
+
146
+ Returns:
147
+ `PIL.Image.Image`
148
+ """
149
+ if not self.decode:
150
+ raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead.")
151
+
152
+ if config.PIL_AVAILABLE:
153
+ import PIL.Image
154
+ else:
155
+ raise ImportError("To support decoding images, please install 'Pillow'.")
156
+
157
+ if token_per_repo_id is None:
158
+ token_per_repo_id = {}
159
+
160
+ path, bytes_ = value["path"], value["bytes"]
161
+ if bytes_ is None:
162
+ if path is None:
163
+ raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}.")
164
+ else:
165
+ if is_local_path(path):
166
+ image = PIL.Image.open(path)
167
+ else:
168
+ source_url = path.split("::")[-1]
169
+ pattern = (
170
+ config.HUB_DATASETS_URL
171
+ if source_url.startswith(config.HF_ENDPOINT)
172
+ else config.HUB_DATASETS_HFFS_URL
173
+ )
174
+ try:
175
+ repo_id = string_to_dict(source_url, pattern)["repo_id"]
176
+ token = token_per_repo_id.get(repo_id)
177
+ except ValueError:
178
+ token = None
179
+ download_config = DownloadConfig(token=token)
180
+ with xopen(path, "rb", download_config=download_config) as f:
181
+ bytes_ = BytesIO(f.read())
182
+ image = PIL.Image.open(bytes_)
183
+ else:
184
+ image = PIL.Image.open(BytesIO(bytes_))
185
+ image.load() # to avoid "Too many open files" errors
186
+ return image
187
+
188
+ def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
189
+ """If in the decodable state, return the feature itself, otherwise flatten the feature into a dictionary."""
190
+ from .features import Value
191
+
192
+ return (
193
+ self
194
+ if self.decode
195
+ else {
196
+ "bytes": Value("binary"),
197
+ "path": Value("string"),
198
+ }
199
+ )
200
+
201
+ def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray, pa.ListArray]) -> pa.StructArray:
202
+ """Cast an Arrow array to the Image arrow storage type.
203
+ The Arrow types that can be converted to the Image pyarrow storage type are:
204
+
205
+ - `pa.string()` - it must contain the "path" data
206
+ - `pa.binary()` - it must contain the image bytes
207
+ - `pa.struct({"bytes": pa.binary()})`
208
+ - `pa.struct({"path": pa.string()})`
209
+ - `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter
210
+ - `pa.list(*)` - it must contain the image array data
211
+
212
+ Args:
213
+ storage (`Union[pa.StringArray, pa.StructArray, pa.ListArray]`):
214
+ PyArrow array to cast.
215
+
216
+ Returns:
217
+ `pa.StructArray`: Array in the Image arrow storage type, that is
218
+ `pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
219
+ """
220
+ if pa.types.is_string(storage.type):
221
+ bytes_array = pa.array([None] * len(storage), type=pa.binary())
222
+ storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null())
223
+ elif pa.types.is_binary(storage.type):
224
+ path_array = pa.array([None] * len(storage), type=pa.string())
225
+ storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null())
226
+ elif pa.types.is_struct(storage.type):
227
+ if storage.type.get_field_index("bytes") >= 0:
228
+ bytes_array = storage.field("bytes")
229
+ else:
230
+ bytes_array = pa.array([None] * len(storage), type=pa.binary())
231
+ if storage.type.get_field_index("path") >= 0:
232
+ path_array = storage.field("path")
233
+ else:
234
+ path_array = pa.array([None] * len(storage), type=pa.string())
235
+ storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null())
236
+ elif pa.types.is_list(storage.type):
237
+ bytes_array = pa.array(
238
+ [encode_np_array(np.array(arr))["bytes"] if arr is not None else None for arr in storage.to_pylist()],
239
+ type=pa.binary(),
240
+ )
241
+ path_array = pa.array([None] * len(storage), type=pa.string())
242
+ storage = pa.StructArray.from_arrays(
243
+ [bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null()
244
+ )
245
+ return array_cast(storage, self.pa_type)
246
+
247
+ def embed_storage(self, storage: pa.StructArray) -> pa.StructArray:
248
+ """Embed image files into the Arrow array.
249
+
250
+ Args:
251
+ storage (`pa.StructArray`):
252
+ PyArrow array to embed.
253
+
254
+ Returns:
255
+ `pa.StructArray`: Array in the Image arrow storage type, that is
256
+ `pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
257
+ """
258
+
259
+ @no_op_if_value_is_null
260
+ def path_to_bytes(path):
261
+ with xopen(path, "rb") as f:
262
+ bytes_ = f.read()
263
+ return bytes_
264
+
265
+ bytes_array = pa.array(
266
+ [
267
+ (path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
268
+ for x in storage.to_pylist()
269
+ ],
270
+ type=pa.binary(),
271
+ )
272
+ path_array = pa.array(
273
+ [os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()],
274
+ type=pa.string(),
275
+ )
276
+ storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null())
277
+ return array_cast(storage, self.pa_type)
278
+
279
+
280
+ def list_image_compression_formats() -> List[str]:
281
+ if config.PIL_AVAILABLE:
282
+ import PIL.Image
283
+ else:
284
+ raise ImportError("To support encoding images, please install 'Pillow'.")
285
+
286
+ global _IMAGE_COMPRESSION_FORMATS
287
+ if _IMAGE_COMPRESSION_FORMATS is None:
288
+ PIL.Image.init()
289
+ _IMAGE_COMPRESSION_FORMATS = list(set(PIL.Image.OPEN.keys()) & set(PIL.Image.SAVE.keys()))
290
+ return _IMAGE_COMPRESSION_FORMATS
291
+
292
+
293
+ def image_to_bytes(image: "PIL.Image.Image") -> bytes:
294
+ """Convert a PIL Image object to bytes using native compression if possible, otherwise use PNG/TIFF compression."""
295
+ buffer = BytesIO()
296
+ if image.format in list_image_compression_formats():
297
+ format = image.format
298
+ else:
299
+ format = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
300
+ image.save(buffer, format=format)
301
+ return buffer.getvalue()
302
+
303
+
304
+ def encode_pil_image(image: "PIL.Image.Image") -> dict:
305
+ if hasattr(image, "filename") and image.filename != "":
306
+ return {"path": image.filename, "bytes": None}
307
+ else:
308
+ return {"path": None, "bytes": image_to_bytes(image)}
309
+
310
+
311
+ def encode_np_array(array: np.ndarray) -> dict:
312
+ if config.PIL_AVAILABLE:
313
+ import PIL.Image
314
+ else:
315
+ raise ImportError("To support encoding images, please install 'Pillow'.")
316
+
317
+ dtype = array.dtype
318
+ dtype_byteorder = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
319
+ dtype_kind = dtype.kind
320
+ dtype_itemsize = dtype.itemsize
321
+
322
+ dest_dtype = None
323
+
324
+ # Multi-channel array case (only np.dtype("|u1") is allowed)
325
+ if array.shape[2:]:
326
+ if dtype_kind not in ["u", "i"]:
327
+ raise TypeError(
328
+ f"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays."
329
+ )
330
+ dest_dtype = np.dtype("|u1")
331
+ if dtype != dest_dtype:
332
+ warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'")
333
+ # Exact match
334
+ elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
335
+ dest_dtype = dtype
336
+ else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
337
+ while dtype_itemsize >= 1:
338
+ dtype_str = dtype_byteorder + dtype_kind + str(dtype_itemsize)
339
+ if np.dtype(dtype_str) in _VALID_IMAGE_ARRAY_DTPYES:
340
+ dest_dtype = np.dtype(dtype_str)
341
+ warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'")
342
+ break
343
+ else:
344
+ dtype_itemsize //= 2
345
+ if dest_dtype is None:
346
+ raise TypeError(
347
+ f"Cannot downcast dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}"
348
+ )
349
+
350
+ image = PIL.Image.fromarray(array.astype(dest_dtype))
351
+ return {"path": None, "bytes": image_to_bytes(image)}
352
+
353
+
354
+ def objects_to_list_of_image_dicts(
355
+ objs: Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]],
356
+ ) -> List[dict]:
357
+ """Encode a list of objects into a format suitable for creating an extension array of type `ImageExtensionType`."""
358
+ if config.PIL_AVAILABLE:
359
+ import PIL.Image
360
+ else:
361
+ raise ImportError("To support encoding images, please install 'Pillow'.")
362
+
363
+ if objs:
364
+ _, obj = first_non_null_value(objs)
365
+ if isinstance(obj, str):
366
+ return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
367
+ if isinstance(obj, np.ndarray):
368
+ obj_to_image_dict_func = no_op_if_value_is_null(encode_np_array)
369
+ return [obj_to_image_dict_func(obj) for obj in objs]
370
+ elif isinstance(obj, PIL.Image.Image):
371
+ obj_to_image_dict_func = no_op_if_value_is_null(encode_pil_image)
372
+ return [obj_to_image_dict_func(obj) for obj in objs]
373
+ else:
374
+ return objs
375
+ else:
376
+ return objs
evalkit_tf437/lib/python3.10/site-packages/datasets/filesystems/__pycache__/compression.cpython-310.pyc ADDED
Binary file (6.28 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/datasets/formatting/__pycache__/torch_formatter.cpython-310.pyc ADDED
Binary file (3.86 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/datasets/formatting/np_formatter.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import sys
16
+ from collections.abc import Mapping
17
+
18
+ import numpy as np
19
+ import pyarrow as pa
20
+
21
+ from .. import config
22
+ from ..utils.py_utils import map_nested
23
+ from .formatting import TensorFormatter
24
+
25
+
26
+ class NumpyFormatter(TensorFormatter[Mapping, np.ndarray, Mapping]):
27
+ def __init__(self, features=None, **np_array_kwargs):
28
+ super().__init__(features=features)
29
+ self.np_array_kwargs = np_array_kwargs
30
+
31
+ def _consolidate(self, column):
32
+ if isinstance(column, list):
33
+ if column and all(
34
+ isinstance(x, np.ndarray) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column
35
+ ):
36
+ return np.stack(column)
37
+ else:
38
+ # don't use np.array(column, dtype=object)
39
+ # since it fails in certain cases
40
+ # see https://stackoverflow.com/q/51005699
41
+ out = np.empty(len(column), dtype=object)
42
+ out[:] = column
43
+ return out
44
+ return column
45
+
46
+ def _tensorize(self, value):
47
+ if isinstance(value, (str, bytes, type(None))):
48
+ return value
49
+ elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
50
+ return value
51
+ elif isinstance(value, np.number):
52
+ return value
53
+
54
+ default_dtype = {}
55
+
56
+ if isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.integer):
57
+ default_dtype = {"dtype": np.int64}
58
+ elif isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.floating):
59
+ default_dtype = {"dtype": np.float32}
60
+ elif config.PIL_AVAILABLE and "PIL" in sys.modules:
61
+ import PIL.Image
62
+
63
+ if isinstance(value, PIL.Image.Image):
64
+ return np.asarray(value, **self.np_array_kwargs)
65
+
66
+ return np.asarray(value, **{**default_dtype, **self.np_array_kwargs})
67
+
68
+ def _recursive_tensorize(self, data_struct):
69
+ # support for torch, tf, jax etc.
70
+ if config.TORCH_AVAILABLE and "torch" in sys.modules:
71
+ import torch
72
+
73
+ if isinstance(data_struct, torch.Tensor):
74
+ return self._tensorize(data_struct.detach().cpu().numpy()[()])
75
+ if hasattr(data_struct, "__array__") and not isinstance(data_struct, (np.ndarray, np.character, np.number)):
76
+ data_struct = data_struct.__array__()
77
+ # support for nested types like struct of list of struct
78
+ if isinstance(data_struct, np.ndarray):
79
+ if data_struct.dtype == object:
80
+ return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
81
+ if isinstance(data_struct, (list, tuple)):
82
+ return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
83
+ return self._tensorize(data_struct)
84
+
85
+ def recursive_tensorize(self, data_struct: dict):
86
+ return map_nested(self._recursive_tensorize, data_struct, map_list=False)
87
+
88
+ def format_row(self, pa_table: pa.Table) -> Mapping:
89
+ row = self.numpy_arrow_extractor().extract_row(pa_table)
90
+ row = self.python_features_decoder.decode_row(row)
91
+ return self.recursive_tensorize(row)
92
+
93
+ def format_column(self, pa_table: pa.Table) -> np.ndarray:
94
+ column = self.numpy_arrow_extractor().extract_column(pa_table)
95
+ column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
96
+ column = self.recursive_tensorize(column)
97
+ column = self._consolidate(column)
98
+ return column
99
+
100
+ def format_batch(self, pa_table: pa.Table) -> Mapping:
101
+ batch = self.numpy_arrow_extractor().extract_batch(pa_table)
102
+ batch = self.python_features_decoder.decode_batch(batch)
103
+ batch = self.recursive_tensorize(batch)
104
+ for column_name in batch:
105
+ batch[column_name] = self._consolidate(batch[column_name])
106
+ return batch
evalkit_tf437/lib/python3.10/site-packages/datasets/formatting/tf_formatter.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+ import sys
17
+ from collections.abc import Mapping
18
+ from typing import TYPE_CHECKING
19
+
20
+ import numpy as np
21
+ import pyarrow as pa
22
+
23
+ from .. import config
24
+ from ..utils.py_utils import map_nested
25
+ from .formatting import TensorFormatter
26
+
27
+
28
+ if TYPE_CHECKING:
29
+ import tensorflow as tf
30
+
31
+
32
+ class TFFormatter(TensorFormatter[Mapping, "tf.Tensor", Mapping]):
33
+ def __init__(self, features=None, **tf_tensor_kwargs):
34
+ super().__init__(features=features)
35
+ self.tf_tensor_kwargs = tf_tensor_kwargs
36
+ import tensorflow as tf # noqa: F401 - import tf at initialization
37
+
38
+ def _consolidate(self, column):
39
+ import tensorflow as tf
40
+
41
+ if isinstance(column, list) and column:
42
+ if all(
43
+ isinstance(x, tf.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column
44
+ ):
45
+ return tf.stack(column)
46
+ elif all(
47
+ isinstance(x, (tf.Tensor, tf.RaggedTensor)) and x.ndim == 1 and x.dtype == column[0].dtype
48
+ for x in column
49
+ ):
50
+ # only rag 1-D tensors, otherwise some dimensions become ragged even though they were consolidated
51
+ return tf.ragged.stack(column)
52
+
53
+ return column
54
+
55
+ def _tensorize(self, value):
56
+ import tensorflow as tf
57
+
58
+ if value is None:
59
+ return value
60
+
61
+ default_dtype = {}
62
+
63
+ if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
64
+ default_dtype = {"dtype": tf.int64}
65
+ elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
66
+ default_dtype = {"dtype": tf.float32}
67
+ elif config.PIL_AVAILABLE and "PIL" in sys.modules:
68
+ import PIL.Image
69
+
70
+ if isinstance(value, PIL.Image.Image):
71
+ value = np.asarray(value)
72
+
73
+ return tf.convert_to_tensor(value, **{**default_dtype, **self.tf_tensor_kwargs})
74
+
75
+ def _recursive_tensorize(self, data_struct):
76
+ import tensorflow as tf
77
+
78
+ # support for torch, tf, jax etc.
79
+ if config.TORCH_AVAILABLE and "torch" in sys.modules:
80
+ import torch
81
+
82
+ if isinstance(data_struct, torch.Tensor):
83
+ return self._tensorize(data_struct.detach().cpu().numpy()[()])
84
+ if hasattr(data_struct, "__array__") and not isinstance(data_struct, tf.Tensor):
85
+ data_struct = data_struct.__array__()
86
+ # support for nested types like struct of list of struct
87
+ if isinstance(data_struct, np.ndarray):
88
+ if data_struct.dtype == object: # tf tensors cannot be instantied from an array of objects
89
+ return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
90
+ elif isinstance(data_struct, (list, tuple)):
91
+ return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
92
+ return self._tensorize(data_struct)
93
+
94
+ def recursive_tensorize(self, data_struct: dict):
95
+ return map_nested(self._recursive_tensorize, data_struct, map_list=False)
96
+
97
+ def format_row(self, pa_table: pa.Table) -> Mapping:
98
+ row = self.numpy_arrow_extractor().extract_row(pa_table)
99
+ row = self.python_features_decoder.decode_row(row)
100
+ return self.recursive_tensorize(row)
101
+
102
+ def format_column(self, pa_table: pa.Table) -> "tf.Tensor":
103
+ column = self.numpy_arrow_extractor().extract_column(pa_table)
104
+ column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
105
+ column = self.recursive_tensorize(column)
106
+ column = self._consolidate(column)
107
+ return column
108
+
109
+ def format_batch(self, pa_table: pa.Table) -> Mapping:
110
+ batch = self.numpy_arrow_extractor().extract_batch(pa_table)
111
+ batch = self.python_features_decoder.decode_batch(batch)
112
+ batch = self.recursive_tensorize(batch)
113
+ for column_name in batch:
114
+ batch[column_name] = self._consolidate(batch[column_name])
115
+ return batch
evalkit_tf437/lib/python3.10/site-packages/datasets/formatting/torch_formatter.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+ import sys
17
+ from collections.abc import Mapping
18
+ from typing import TYPE_CHECKING
19
+
20
+ import numpy as np
21
+ import pyarrow as pa
22
+
23
+ from .. import config
24
+ from ..utils.py_utils import map_nested
25
+ from .formatting import TensorFormatter
26
+
27
+
28
+ if TYPE_CHECKING:
29
+ import torch
30
+
31
+
32
+ class TorchFormatter(TensorFormatter[Mapping, "torch.Tensor", Mapping]):
33
+ def __init__(self, features=None, **torch_tensor_kwargs):
34
+ super().__init__(features=features)
35
+ self.torch_tensor_kwargs = torch_tensor_kwargs
36
+ import torch # noqa import torch at initialization
37
+
38
+ def _consolidate(self, column):
39
+ import torch
40
+
41
+ if isinstance(column, list) and column:
42
+ if all(
43
+ isinstance(x, torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype
44
+ for x in column
45
+ ):
46
+ return torch.stack(column)
47
+ return column
48
+
49
+ def _tensorize(self, value):
50
+ import torch
51
+
52
+ if isinstance(value, (str, bytes, type(None))):
53
+ return value
54
+ elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character):
55
+ return value.tolist()
56
+
57
+ default_dtype = {}
58
+
59
+ if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer):
60
+ default_dtype = {"dtype": torch.int64}
61
+
62
+ # Convert dtype to np.int64 if it's either np.uint16 or np.uint32 to ensure compatibility.
63
+ # np.uint64 is excluded from this conversion as there is no compatible PyTorch dtype that can handle it without loss.
64
+ if value.dtype in [np.uint16, np.uint32]:
65
+ value = value.astype(np.int64)
66
+
67
+ elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating):
68
+ default_dtype = {"dtype": torch.float32}
69
+ elif config.PIL_AVAILABLE and "PIL" in sys.modules:
70
+ import PIL.Image
71
+
72
+ if isinstance(value, PIL.Image.Image):
73
+ value = np.asarray(value)
74
+ return torch.tensor(value, **{**default_dtype, **self.torch_tensor_kwargs})
75
+
76
+ def _recursive_tensorize(self, data_struct):
77
+ import torch
78
+
79
+ # support for torch, tf, jax etc.
80
+ if hasattr(data_struct, "__array__") and not isinstance(data_struct, torch.Tensor):
81
+ data_struct = data_struct.__array__()
82
+ # support for nested types like struct of list of struct
83
+ if isinstance(data_struct, np.ndarray):
84
+ if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
85
+ return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
86
+ elif isinstance(data_struct, (list, tuple)):
87
+ return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct])
88
+ return self._tensorize(data_struct)
89
+
90
+ def recursive_tensorize(self, data_struct: dict):
91
+ return map_nested(self._recursive_tensorize, data_struct, map_list=False)
92
+
93
+ def format_row(self, pa_table: pa.Table) -> Mapping:
94
+ row = self.numpy_arrow_extractor().extract_row(pa_table)
95
+ row = self.python_features_decoder.decode_row(row)
96
+ return self.recursive_tensorize(row)
97
+
98
+ def format_column(self, pa_table: pa.Table) -> "torch.Tensor":
99
+ column = self.numpy_arrow_extractor().extract_column(pa_table)
100
+ column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
101
+ column = self.recursive_tensorize(column)
102
+ column = self._consolidate(column)
103
+ return column
104
+
105
+ def format_batch(self, pa_table: pa.Table) -> Mapping:
106
+ batch = self.numpy_arrow_extractor().extract_batch(pa_table)
107
+ batch = self.python_features_decoder.decode_batch(batch)
108
+ batch = self.recursive_tensorize(batch)
109
+ for column_name in batch:
110
+ batch[column_name] = self._consolidate(batch[column_name])
111
+ return batch
evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (170 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/abc.cpython-310.pyc ADDED
Binary file (2.11 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/datasets/io/__pycache__/spark.cpython-310.pyc ADDED
Binary file (1.92 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/audiofolder/__pycache__/audiofolder.cpython-310.pyc ADDED
Binary file (1.35 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/cache/__init__.py ADDED
File without changes
evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/folder_based_builder/folder_based_builder.py ADDED
@@ -0,0 +1,406 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import itertools
3
+ import os
4
+ from dataclasses import dataclass
5
+ from typing import List, Optional, Tuple, Type
6
+
7
+ import pandas as pd
8
+ import pyarrow as pa
9
+ import pyarrow.json as paj
10
+
11
+ import datasets
12
+ from datasets.features.features import FeatureType
13
+ from datasets.tasks.base import TaskTemplate
14
+
15
+
16
+ logger = datasets.utils.logging.get_logger(__name__)
17
+
18
+
19
+ def count_path_segments(path):
20
+ return path.replace("\\", "/").count("/")
21
+
22
+
23
+ @dataclass
24
+ class FolderBasedBuilderConfig(datasets.BuilderConfig):
25
+ """BuilderConfig for AutoFolder."""
26
+
27
+ features: Optional[datasets.Features] = None
28
+ drop_labels: bool = None
29
+ drop_metadata: bool = None
30
+
31
+
32
+ class FolderBasedBuilder(datasets.GeneratorBasedBuilder):
33
+ """
34
+ Base class for generic data loaders for vision and image data.
35
+
36
+
37
+ Abstract class attributes to be overridden by a child class:
38
+ BASE_FEATURE: feature object to decode data (i.e. datasets.Image, datasets.Audio, ...)
39
+ BASE_COLUMN_NAME: string key name of a base feature (i.e. "image", "audio", ...)
40
+ BUILDER_CONFIG_CLASS: builder config inherited from `folder_based_builder.FolderBasedBuilderConfig`
41
+ EXTENSIONS: list of allowed extensions (only files with these extensions and METADATA_FILENAME files
42
+ will be included in a dataset)
43
+ CLASSIFICATION_TASK: classification task to use if labels are obtained from the folder structure
44
+ """
45
+
46
+ BASE_FEATURE: Type[FeatureType]
47
+ BASE_COLUMN_NAME: str
48
+ BUILDER_CONFIG_CLASS: FolderBasedBuilderConfig
49
+ EXTENSIONS: List[str]
50
+ CLASSIFICATION_TASK: TaskTemplate
51
+
52
+ METADATA_FILENAMES: List[str] = ["metadata.csv", "metadata.jsonl"]
53
+
54
+ def _info(self):
55
+ return datasets.DatasetInfo(features=self.config.features)
56
+
57
+ def _split_generators(self, dl_manager):
58
+ if not self.config.data_files:
59
+ raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
60
+
61
+ # Do an early pass if:
62
+ # * `drop_labels` is None (default) or False, to infer the class labels
63
+ # * `drop_metadata` is None (default) or False, to find the metadata files
64
+ do_analyze = not self.config.drop_labels or not self.config.drop_metadata
65
+ labels, path_depths = set(), set()
66
+ metadata_files = collections.defaultdict(set)
67
+
68
+ def analyze(files_or_archives, downloaded_files_or_dirs, split):
69
+ if len(downloaded_files_or_dirs) == 0:
70
+ return
71
+ # The files are separated from the archives at this point, so check the first sample
72
+ # to see if it's a file or a directory and iterate accordingly
73
+ if os.path.isfile(downloaded_files_or_dirs[0]):
74
+ original_files, downloaded_files = files_or_archives, downloaded_files_or_dirs
75
+ for original_file, downloaded_file in zip(original_files, downloaded_files):
76
+ original_file, downloaded_file = str(original_file), str(downloaded_file)
77
+ _, original_file_ext = os.path.splitext(original_file)
78
+ if original_file_ext.lower() in self.EXTENSIONS:
79
+ if not self.config.drop_labels:
80
+ labels.add(os.path.basename(os.path.dirname(original_file)))
81
+ path_depths.add(count_path_segments(original_file))
82
+ elif os.path.basename(original_file) in self.METADATA_FILENAMES:
83
+ metadata_files[split].add((original_file, downloaded_file))
84
+ else:
85
+ original_file_name = os.path.basename(original_file)
86
+ logger.debug(
87
+ f"The file '{original_file_name}' was ignored: it is not an image, and is not {self.METADATA_FILENAMES} either."
88
+ )
89
+ else:
90
+ archives, downloaded_dirs = files_or_archives, downloaded_files_or_dirs
91
+ for archive, downloaded_dir in zip(archives, downloaded_dirs):
92
+ archive, downloaded_dir = str(archive), str(downloaded_dir)
93
+ for downloaded_dir_file in dl_manager.iter_files(downloaded_dir):
94
+ _, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file)
95
+ if downloaded_dir_file_ext in self.EXTENSIONS:
96
+ if not self.config.drop_labels:
97
+ labels.add(os.path.basename(os.path.dirname(downloaded_dir_file)))
98
+ path_depths.add(count_path_segments(downloaded_dir_file))
99
+ elif os.path.basename(downloaded_dir_file) in self.METADATA_FILENAMES:
100
+ metadata_files[split].add((None, downloaded_dir_file))
101
+ else:
102
+ archive_file_name = os.path.basename(archive)
103
+ original_file_name = os.path.basename(downloaded_dir_file)
104
+ logger.debug(
105
+ f"The file '{original_file_name}' from the archive '{archive_file_name}' was ignored: it is not an {self.BASE_COLUMN_NAME}, and is not {self.METADATA_FILENAMES} either."
106
+ )
107
+
108
+ data_files = self.config.data_files
109
+ splits = []
110
+ for split_name, files in data_files.items():
111
+ if isinstance(files, str):
112
+ files = [files]
113
+ files, archives = self._split_files_and_archives(files)
114
+ downloaded_files = dl_manager.download(files)
115
+ downloaded_dirs = dl_manager.download_and_extract(archives)
116
+ if do_analyze: # drop_metadata is None or False, drop_labels is None or False
117
+ logger.info(f"Searching for labels and/or metadata files in {split_name} data files...")
118
+ analyze(files, downloaded_files, split_name)
119
+ analyze(archives, downloaded_dirs, split_name)
120
+
121
+ if metadata_files:
122
+ # add metadata if `metadata_files` are found and `drop_metadata` is None (default) or False
123
+ add_metadata = not self.config.drop_metadata
124
+ # if `metadata_files` are found, add labels only if
125
+ # `drop_labels` is set up to False explicitly (not-default behavior)
126
+ add_labels = self.config.drop_labels is False
127
+ else:
128
+ # if `metadata_files` are not found, don't add metadata
129
+ add_metadata = False
130
+ # if `metadata_files` are not found and `drop_labels` is None (default) -
131
+ # add labels if files are on the same level in directory hierarchy and there is more than one label
132
+ add_labels = (
133
+ (len(labels) > 1 and len(path_depths) == 1)
134
+ if self.config.drop_labels is None
135
+ else not self.config.drop_labels
136
+ )
137
+
138
+ if add_labels:
139
+ logger.info("Adding the labels inferred from data directories to the dataset's features...")
140
+ if add_metadata:
141
+ logger.info("Adding metadata to the dataset...")
142
+ else:
143
+ add_labels, add_metadata, metadata_files = False, False, {}
144
+
145
+ splits.append(
146
+ datasets.SplitGenerator(
147
+ name=split_name,
148
+ gen_kwargs={
149
+ "files": list(zip(files, downloaded_files))
150
+ + [(None, dl_manager.iter_files(downloaded_dir)) for downloaded_dir in downloaded_dirs],
151
+ "metadata_files": metadata_files,
152
+ "split_name": split_name,
153
+ "add_labels": add_labels,
154
+ "add_metadata": add_metadata,
155
+ },
156
+ )
157
+ )
158
+
159
+ if add_metadata:
160
+ # Verify that:
161
+ # * all metadata files have the same set of features
162
+ # * the `file_name` key is one of the metadata keys and is of type string
163
+ features_per_metadata_file: List[Tuple[str, datasets.Features]] = []
164
+
165
+ # Check that all metadata files share the same format
166
+ metadata_ext = {
167
+ os.path.splitext(original_metadata_file)[-1]
168
+ for original_metadata_file, _ in itertools.chain.from_iterable(metadata_files.values())
169
+ }
170
+ if len(metadata_ext) > 1:
171
+ raise ValueError(f"Found metadata files with different extensions: {list(metadata_ext)}")
172
+ metadata_ext = metadata_ext.pop()
173
+
174
+ for _, downloaded_metadata_file in itertools.chain.from_iterable(metadata_files.values()):
175
+ pa_metadata_table = self._read_metadata(downloaded_metadata_file, metadata_ext=metadata_ext)
176
+ features_per_metadata_file.append(
177
+ (downloaded_metadata_file, datasets.Features.from_arrow_schema(pa_metadata_table.schema))
178
+ )
179
+ for downloaded_metadata_file, metadata_features in features_per_metadata_file:
180
+ if metadata_features != features_per_metadata_file[0][1]:
181
+ raise ValueError(
182
+ f"Metadata files {downloaded_metadata_file} and {features_per_metadata_file[0][0]} have different features: {features_per_metadata_file[0]} != {metadata_features}"
183
+ )
184
+ metadata_features = features_per_metadata_file[0][1]
185
+ if "file_name" not in metadata_features:
186
+ raise ValueError("`file_name` must be present as dictionary key in metadata files")
187
+ if metadata_features["file_name"] != datasets.Value("string"):
188
+ raise ValueError("`file_name` key must be a string")
189
+ del metadata_features["file_name"]
190
+ else:
191
+ metadata_features = None
192
+
193
+ # Normally, we would do this in _info, but we need to know the labels and/or metadata
194
+ # before building the features
195
+ if self.config.features is None:
196
+ if add_labels:
197
+ self.info.features = datasets.Features(
198
+ {
199
+ self.BASE_COLUMN_NAME: self.BASE_FEATURE(),
200
+ "label": datasets.ClassLabel(names=sorted(labels)),
201
+ }
202
+ )
203
+ self.info.task_templates = [self.CLASSIFICATION_TASK.align_with_features(self.info.features)]
204
+ else:
205
+ self.info.features = datasets.Features({self.BASE_COLUMN_NAME: self.BASE_FEATURE()})
206
+
207
+ if add_metadata:
208
+ # Warn if there are duplicated keys in metadata compared to the existing features
209
+ # (`BASE_COLUMN_NAME`, optionally "label")
210
+ duplicated_keys = set(self.info.features) & set(metadata_features)
211
+ if duplicated_keys:
212
+ logger.warning(
213
+ f"Ignoring metadata columns {list(duplicated_keys)} as they are already present in "
214
+ f"the features dictionary."
215
+ )
216
+ # skip metadata duplicated keys
217
+ self.info.features.update(
218
+ {
219
+ feature: metadata_features[feature]
220
+ for feature in metadata_features
221
+ if feature not in duplicated_keys
222
+ }
223
+ )
224
+
225
+ return splits
226
+
227
+ def _split_files_and_archives(self, data_files):
228
+ files, archives = [], []
229
+ for data_file in data_files:
230
+ _, data_file_ext = os.path.splitext(data_file)
231
+ if data_file_ext.lower() in self.EXTENSIONS:
232
+ files.append(data_file)
233
+ elif os.path.basename(data_file) in self.METADATA_FILENAMES:
234
+ files.append(data_file)
235
+ else:
236
+ archives.append(data_file)
237
+ return files, archives
238
+
239
+ def _read_metadata(self, metadata_file, metadata_ext: str = ""):
240
+ if metadata_ext == ".csv":
241
+ # Use `pd.read_csv` (although slower) instead of `pyarrow.csv.read_csv` for reading CSV files for consistency with the CSV packaged module
242
+ return pa.Table.from_pandas(pd.read_csv(metadata_file))
243
+ else:
244
+ with open(metadata_file, "rb") as f:
245
+ return paj.read_json(f)
246
+
247
+ def _generate_examples(self, files, metadata_files, split_name, add_metadata, add_labels):
248
+ split_metadata_files = metadata_files.get(split_name, [])
249
+ sample_empty_metadata = (
250
+ {k: None for k in self.info.features if k != self.BASE_COLUMN_NAME} if self.info.features else {}
251
+ )
252
+ last_checked_dir = None
253
+ metadata_dir = None
254
+ metadata_dict = None
255
+ downloaded_metadata_file = None
256
+
257
+ metadata_ext = ""
258
+ if split_metadata_files:
259
+ metadata_ext = {
260
+ os.path.splitext(original_metadata_file)[-1] for original_metadata_file, _ in split_metadata_files
261
+ }
262
+ metadata_ext = metadata_ext.pop()
263
+
264
+ file_idx = 0
265
+ for original_file, downloaded_file_or_dir in files:
266
+ if original_file is not None:
267
+ _, original_file_ext = os.path.splitext(original_file)
268
+ if original_file_ext.lower() in self.EXTENSIONS:
269
+ if add_metadata:
270
+ # If the file is a file of a needed type, and we've just entered a new directory,
271
+ # find the nereast metadata file (by counting path segments) for the directory
272
+ current_dir = os.path.dirname(original_file)
273
+ if last_checked_dir is None or last_checked_dir != current_dir:
274
+ last_checked_dir = current_dir
275
+ metadata_file_candidates = [
276
+ (
277
+ os.path.relpath(original_file, os.path.dirname(metadata_file_candidate)),
278
+ metadata_file_candidate,
279
+ downloaded_metadata_file,
280
+ )
281
+ for metadata_file_candidate, downloaded_metadata_file in split_metadata_files
282
+ if metadata_file_candidate
283
+ is not None # ignore metadata_files that are inside archives
284
+ and not os.path.relpath(
285
+ original_file, os.path.dirname(metadata_file_candidate)
286
+ ).startswith("..")
287
+ ]
288
+ if metadata_file_candidates:
289
+ _, metadata_file, downloaded_metadata_file = min(
290
+ metadata_file_candidates, key=lambda x: count_path_segments(x[0])
291
+ )
292
+ pa_metadata_table = self._read_metadata(
293
+ downloaded_metadata_file, metadata_ext=metadata_ext
294
+ )
295
+ pa_file_name_array = pa_metadata_table["file_name"]
296
+ pa_metadata_table = pa_metadata_table.drop(["file_name"])
297
+ metadata_dir = os.path.dirname(metadata_file)
298
+ metadata_dict = {
299
+ os.path.normpath(file_name).replace("\\", "/"): sample_metadata
300
+ for file_name, sample_metadata in zip(
301
+ pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist()
302
+ )
303
+ }
304
+ else:
305
+ raise ValueError(
306
+ f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}."
307
+ )
308
+ if metadata_dir is not None and downloaded_metadata_file is not None:
309
+ file_relpath = os.path.relpath(original_file, metadata_dir)
310
+ file_relpath = file_relpath.replace("\\", "/")
311
+ if file_relpath not in metadata_dict:
312
+ raise ValueError(
313
+ f"{self.BASE_COLUMN_NAME} at {file_relpath} doesn't have metadata in {downloaded_metadata_file}."
314
+ )
315
+ sample_metadata = metadata_dict[file_relpath]
316
+ else:
317
+ raise ValueError(
318
+ f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}."
319
+ )
320
+ else:
321
+ sample_metadata = {}
322
+ if add_labels:
323
+ sample_label = {"label": os.path.basename(os.path.dirname(original_file))}
324
+ else:
325
+ sample_label = {}
326
+ yield (
327
+ file_idx,
328
+ {
329
+ **sample_empty_metadata,
330
+ self.BASE_COLUMN_NAME: downloaded_file_or_dir,
331
+ **sample_metadata,
332
+ **sample_label,
333
+ },
334
+ )
335
+ file_idx += 1
336
+ else:
337
+ for downloaded_dir_file in downloaded_file_or_dir:
338
+ _, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file)
339
+ if downloaded_dir_file_ext.lower() in self.EXTENSIONS:
340
+ if add_metadata:
341
+ current_dir = os.path.dirname(downloaded_dir_file)
342
+ if last_checked_dir is None or last_checked_dir != current_dir:
343
+ last_checked_dir = current_dir
344
+ metadata_file_candidates = [
345
+ (
346
+ os.path.relpath(
347
+ downloaded_dir_file, os.path.dirname(downloaded_metadata_file)
348
+ ),
349
+ metadata_file_candidate,
350
+ downloaded_metadata_file,
351
+ )
352
+ for metadata_file_candidate, downloaded_metadata_file in split_metadata_files
353
+ if metadata_file_candidate
354
+ is None # ignore metadata_files that are not inside archives
355
+ and not os.path.relpath(
356
+ downloaded_dir_file, os.path.dirname(downloaded_metadata_file)
357
+ ).startswith("..")
358
+ ]
359
+ if metadata_file_candidates:
360
+ _, metadata_file, downloaded_metadata_file = min(
361
+ metadata_file_candidates, key=lambda x: count_path_segments(x[0])
362
+ )
363
+ pa_metadata_table = self._read_metadata(
364
+ downloaded_metadata_file, metadata_ext=metadata_ext
365
+ )
366
+ pa_file_name_array = pa_metadata_table["file_name"]
367
+ pa_metadata_table = pa_metadata_table.drop(["file_name"])
368
+ metadata_dir = os.path.dirname(downloaded_metadata_file)
369
+ metadata_dict = {
370
+ os.path.normpath(file_name).replace("\\", "/"): sample_metadata
371
+ for file_name, sample_metadata in zip(
372
+ pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist()
373
+ )
374
+ }
375
+ else:
376
+ raise ValueError(
377
+ f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}."
378
+ )
379
+ if metadata_dir is not None and downloaded_metadata_file is not None:
380
+ downloaded_dir_file_relpath = os.path.relpath(downloaded_dir_file, metadata_dir)
381
+ downloaded_dir_file_relpath = downloaded_dir_file_relpath.replace("\\", "/")
382
+ if downloaded_dir_file_relpath not in metadata_dict:
383
+ raise ValueError(
384
+ f"{self.BASE_COLUMN_NAME} at {downloaded_dir_file_relpath} doesn't have metadata in {downloaded_metadata_file}."
385
+ )
386
+ sample_metadata = metadata_dict[downloaded_dir_file_relpath]
387
+ else:
388
+ raise ValueError(
389
+ f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}."
390
+ )
391
+ else:
392
+ sample_metadata = {}
393
+ if add_labels:
394
+ sample_label = {"label": os.path.basename(os.path.dirname(downloaded_dir_file))}
395
+ else:
396
+ sample_label = {}
397
+ yield (
398
+ file_idx,
399
+ {
400
+ **sample_empty_metadata,
401
+ self.BASE_COLUMN_NAME: downloaded_dir_file,
402
+ **sample_metadata,
403
+ **sample_label,
404
+ },
405
+ )
406
+ file_idx += 1
evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/generator/__pycache__/generator.cpython-310.pyc ADDED
Binary file (1.68 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/json/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (189 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/json/__pycache__/json.cpython-310.pyc ADDED
Binary file (6.24 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__init__.py ADDED
File without changes
evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/pandas/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (191 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/parquet/__init__.py ADDED
File without changes
evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/parquet/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (192 Bytes). View file
 
evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/parquet/parquet.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ from dataclasses import dataclass
3
+ from typing import List, Optional
4
+
5
+ import pyarrow as pa
6
+ import pyarrow.parquet as pq
7
+
8
+ import datasets
9
+ from datasets.table import table_cast
10
+
11
+
12
+ logger = datasets.utils.logging.get_logger(__name__)
13
+
14
+
15
+ @dataclass
16
+ class ParquetConfig(datasets.BuilderConfig):
17
+ """BuilderConfig for Parquet."""
18
+
19
+ batch_size: Optional[int] = None
20
+ columns: Optional[List[str]] = None
21
+ features: Optional[datasets.Features] = None
22
+
23
+
24
+ class Parquet(datasets.ArrowBasedBuilder):
25
+ BUILDER_CONFIG_CLASS = ParquetConfig
26
+
27
+ def _info(self):
28
+ if (
29
+ self.config.columns is not None
30
+ and self.config.features is not None
31
+ and set(self.config.columns) != set(self.config.features)
32
+ ):
33
+ raise ValueError(
34
+ "The columns and features argument must contain the same columns, but got ",
35
+ f"{self.config.columns} and {self.config.features}",
36
+ )
37
+ return datasets.DatasetInfo(features=self.config.features)
38
+
39
+ def _split_generators(self, dl_manager):
40
+ """We handle string, list and dicts in datafiles"""
41
+ if not self.config.data_files:
42
+ raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
43
+ data_files = dl_manager.download_and_extract(self.config.data_files)
44
+ if isinstance(data_files, (str, list, tuple)):
45
+ files = data_files
46
+ if isinstance(files, str):
47
+ files = [files]
48
+ # Use `dl_manager.iter_files` to skip hidden files in an extracted archive
49
+ files = [dl_manager.iter_files(file) for file in files]
50
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})]
51
+ splits = []
52
+ for split_name, files in data_files.items():
53
+ if isinstance(files, str):
54
+ files = [files]
55
+ # Use `dl_manager.iter_files` to skip hidden files in an extracted archive
56
+ files = [dl_manager.iter_files(file) for file in files]
57
+ # Infer features if they are stored in the arrow schema
58
+ if self.info.features is None:
59
+ for file in itertools.chain.from_iterable(files):
60
+ with open(file, "rb") as f:
61
+ self.info.features = datasets.Features.from_arrow_schema(pq.read_schema(f))
62
+ break
63
+ splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
64
+ if self.config.columns is not None and set(self.config.columns) != set(self.info.features):
65
+ self.info.features = datasets.Features(
66
+ {col: feat for col, feat in self.info.features.items() if col in self.config.columns}
67
+ )
68
+ return splits
69
+
70
+ def _cast_table(self, pa_table: pa.Table) -> pa.Table:
71
+ if self.info.features is not None:
72
+ # more expensive cast to support nested features with keys in a different order
73
+ # allows str <-> int/float or str to Audio for example
74
+ pa_table = table_cast(pa_table, self.info.features.arrow_schema)
75
+ return pa_table
76
+
77
+ def _generate_tables(self, files):
78
+ if self.config.features is not None and self.config.columns is not None:
79
+ if sorted(field.name for field in self.info.features.arrow_schema) != sorted(self.config.columns):
80
+ raise ValueError(
81
+ f"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'"
82
+ )
83
+ for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
84
+ with open(file, "rb") as f:
85
+ parquet_file = pq.ParquetFile(f)
86
+ if parquet_file.metadata.num_row_groups > 0:
87
+ batch_size = self.config.batch_size or parquet_file.metadata.row_group(0).num_rows
88
+ try:
89
+ for batch_idx, record_batch in enumerate(
90
+ parquet_file.iter_batches(batch_size=batch_size, columns=self.config.columns)
91
+ ):
92
+ pa_table = pa.Table.from_batches([record_batch])
93
+ # Uncomment for debugging (will print the Arrow table size and elements)
94
+ # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
95
+ # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
96
+ yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table)
97
+ except ValueError as e:
98
+ logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}")
99
+ raise
evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/spark/__pycache__/spark.cpython-310.pyc ADDED
Binary file (10.4 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/sql/__pycache__/sql.cpython-310.pyc ADDED
Binary file (4.47 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/sql/sql.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from dataclasses import dataclass
3
+ from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
4
+
5
+ import pandas as pd
6
+ import pyarrow as pa
7
+
8
+ import datasets
9
+ import datasets.config
10
+ from datasets.features.features import require_storage_cast
11
+ from datasets.table import table_cast
12
+
13
+
14
+ if TYPE_CHECKING:
15
+ import sqlite3
16
+
17
+ import sqlalchemy
18
+
19
+
20
+ logger = datasets.utils.logging.get_logger(__name__)
21
+
22
+
23
+ @dataclass
24
+ class SqlConfig(datasets.BuilderConfig):
25
+ """BuilderConfig for SQL."""
26
+
27
+ sql: Union[str, "sqlalchemy.sql.Selectable"] = None
28
+ con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] = None
29
+ index_col: Optional[Union[str, List[str]]] = None
30
+ coerce_float: bool = True
31
+ params: Optional[Union[List, Tuple, Dict]] = None
32
+ parse_dates: Optional[Union[List, Dict]] = None
33
+ columns: Optional[List[str]] = None
34
+ chunksize: Optional[int] = 10_000
35
+ features: Optional[datasets.Features] = None
36
+
37
+ def __post_init__(self):
38
+ if self.sql is None:
39
+ raise ValueError("sql must be specified")
40
+ if self.con is None:
41
+ raise ValueError("con must be specified")
42
+
43
+ def create_config_id(
44
+ self,
45
+ config_kwargs: dict,
46
+ custom_features: Optional[datasets.Features] = None,
47
+ ) -> str:
48
+ config_kwargs = config_kwargs.copy()
49
+ # We need to stringify the Selectable object to make its hash deterministic
50
+
51
+ # The process of stringifying is explained here: http://docs.sqlalchemy.org/en/latest/faq/sqlexpressions.html
52
+ sql = config_kwargs["sql"]
53
+ if not isinstance(sql, str):
54
+ if datasets.config.SQLALCHEMY_AVAILABLE and "sqlalchemy" in sys.modules:
55
+ import sqlalchemy
56
+
57
+ if isinstance(sql, sqlalchemy.sql.Selectable):
58
+ engine = sqlalchemy.create_engine(config_kwargs["con"].split("://")[0] + "://")
59
+ sql_str = str(sql.compile(dialect=engine.dialect))
60
+ config_kwargs["sql"] = sql_str
61
+ else:
62
+ raise TypeError(
63
+ f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}"
64
+ )
65
+ else:
66
+ raise TypeError(
67
+ f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}"
68
+ )
69
+ con = config_kwargs["con"]
70
+ if not isinstance(con, str):
71
+ config_kwargs["con"] = id(con)
72
+ logger.info(
73
+ f"SQL connection 'con' of type {type(con)} couldn't be hashed properly. To enable hashing, specify 'con' as URI string instead."
74
+ )
75
+
76
+ return super().create_config_id(config_kwargs, custom_features=custom_features)
77
+
78
+ @property
79
+ def pd_read_sql_kwargs(self):
80
+ pd_read_sql_kwargs = {
81
+ "index_col": self.index_col,
82
+ "columns": self.columns,
83
+ "params": self.params,
84
+ "coerce_float": self.coerce_float,
85
+ "parse_dates": self.parse_dates,
86
+ }
87
+ return pd_read_sql_kwargs
88
+
89
+
90
+ class Sql(datasets.ArrowBasedBuilder):
91
+ BUILDER_CONFIG_CLASS = SqlConfig
92
+
93
+ def _info(self):
94
+ return datasets.DatasetInfo(features=self.config.features)
95
+
96
+ def _split_generators(self, dl_manager):
97
+ return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={})]
98
+
99
+ def _cast_table(self, pa_table: pa.Table) -> pa.Table:
100
+ if self.config.features is not None:
101
+ schema = self.config.features.arrow_schema
102
+ if all(not require_storage_cast(feature) for feature in self.config.features.values()):
103
+ # cheaper cast
104
+ pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema)
105
+ else:
106
+ # more expensive cast; allows str <-> int/float or str to Audio for example
107
+ pa_table = table_cast(pa_table, schema)
108
+ return pa_table
109
+
110
+ def _generate_tables(self):
111
+ chunksize = self.config.chunksize
112
+ sql_reader = pd.read_sql(
113
+ self.config.sql, self.config.con, chunksize=chunksize, **self.config.pd_read_sql_kwargs
114
+ )
115
+ sql_reader = [sql_reader] if chunksize is None else sql_reader
116
+ for chunk_idx, df in enumerate(sql_reader):
117
+ pa_table = pa.Table.from_pandas(df)
118
+ yield chunk_idx, self._cast_table(pa_table)
evalkit_tf437/lib/python3.10/site-packages/datasets/packaged_modules/webdataset/__pycache__/webdataset.cpython-310.pyc ADDED
Binary file (6.1 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/datasets/parallel/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .parallel import parallel_backend, parallel_map, ParallelBackendConfig # noqa F401
evalkit_tf437/lib/python3.10/site-packages/datasets/parallel/__pycache__/parallel.cpython-310.pyc ADDED
Binary file (4.48 kB). View file
 
evalkit_tf437/lib/python3.10/site-packages/datasets/parallel/parallel.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ from multiprocessing import Pool, RLock
3
+
4
+ from tqdm.auto import tqdm
5
+
6
+ from ..utils import experimental, logging
7
+
8
+
9
+ logger = logging.get_logger(__name__)
10
+
11
+
12
+ class ParallelBackendConfig:
13
+ backend_name = None
14
+
15
+
16
+ @experimental
17
+ def parallel_map(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func):
18
+ """
19
+ **Experimental.** Apply a function to iterable elements in parallel, where the implementation uses either
20
+ multiprocessing.Pool or joblib for parallelization.
21
+
22
+ Args:
23
+ function (`Callable[[Any], Any]`): Function to be applied to `iterable`.
24
+ iterable (`list`, `tuple` or `np.ndarray`): Iterable elements to apply function to.
25
+ num_proc (`int`): Number of processes (if no backend specified) or jobs (using joblib).
26
+ types (`tuple`): Additional types (besides `dict` values) to apply `function` recursively to their elements.
27
+ disable_tqdm (`bool`): Whether to disable the tqdm progressbar.
28
+ desc (`str`): Prefix for the tqdm progressbar.
29
+ single_map_nested_func (`Callable`): Map function that applies `function` to an element from `iterable`.
30
+ Takes a tuple of function, data_struct, types, rank, disable_tqdm, desc as input, where data_struct is an
31
+ element of `iterable`, and `rank` is used for progress bar.
32
+ """
33
+ if ParallelBackendConfig.backend_name is None:
34
+ return _map_with_multiprocessing_pool(
35
+ function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func
36
+ )
37
+
38
+ return _map_with_joblib(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func)
39
+
40
+
41
+ def _map_with_multiprocessing_pool(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func):
42
+ num_proc = num_proc if num_proc <= len(iterable) else len(iterable)
43
+ split_kwds = [] # We organize the splits ourselve (contiguous splits)
44
+ for index in range(num_proc):
45
+ div = len(iterable) // num_proc
46
+ mod = len(iterable) % num_proc
47
+ start = div * index + min(index, mod)
48
+ end = start + div + (1 if index < mod else 0)
49
+ split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc))
50
+
51
+ if len(iterable) != sum(len(i[1]) for i in split_kwds):
52
+ raise ValueError(
53
+ f"Error dividing inputs iterable among processes. "
54
+ f"Total number of objects {len(iterable)}, "
55
+ f"length: {sum(len(i[1]) for i in split_kwds)}"
56
+ )
57
+
58
+ logger.info(
59
+ f"Spawning {num_proc} processes for {len(iterable)} objects in slices of {[len(i[1]) for i in split_kwds]}"
60
+ )
61
+ initargs, initializer = None, None
62
+ if not disable_tqdm:
63
+ initargs, initializer = (RLock(),), tqdm.set_lock
64
+ with Pool(num_proc, initargs=initargs, initializer=initializer) as pool:
65
+ mapped = pool.map(single_map_nested_func, split_kwds)
66
+ logger.info(f"Finished {num_proc} processes")
67
+ mapped = [obj for proc_res in mapped for obj in proc_res]
68
+ logger.info(f"Unpacked {len(mapped)} objects")
69
+
70
+ return mapped
71
+
72
+
73
+ def _map_with_joblib(function, iterable, num_proc, types, disable_tqdm, desc, single_map_nested_func):
74
+ # progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
75
+ # and it requires monkey-patching joblib internal classes which is subject to change
76
+ import joblib
77
+
78
+ with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=num_proc):
79
+ return joblib.Parallel()(
80
+ joblib.delayed(single_map_nested_func)((function, obj, types, None, True, None)) for obj in iterable
81
+ )
82
+
83
+
84
+ @experimental
85
+ @contextlib.contextmanager
86
+ def parallel_backend(backend_name: str):
87
+ """
88
+ **Experimental.** Configures the parallel backend for parallelized dataset loading, which uses the parallelization
89
+ implemented by joblib.
90
+
91
+ Args:
92
+ backend_name (str): Name of backend for parallelization implementation, has to be supported by joblib.
93
+
94
+ Example usage:
95
+ ```py
96
+ with parallel_backend('spark'):
97
+ dataset = load_dataset(..., num_proc=2)
98
+ ```
99
+ """
100
+ ParallelBackendConfig.backend_name = backend_name
101
+
102
+ if backend_name == "spark":
103
+ from joblibspark import register_spark
104
+
105
+ register_spark()
106
+
107
+ # TODO: call create_cache_and_write_probe if "download" in steps
108
+ # TODO: raise NotImplementedError when Dataset.map etc is called
109
+
110
+ try:
111
+ yield
112
+ finally:
113
+ ParallelBackendConfig.backend_name = None
evalkit_tf437/lib/python3.10/site-packages/datasets/tasks/__init__.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ from ..utils.logging import get_logger
4
+ from .audio_classification import AudioClassification
5
+ from .automatic_speech_recognition import AutomaticSpeechRecognition
6
+ from .base import TaskTemplate
7
+ from .image_classification import ImageClassification
8
+ from .language_modeling import LanguageModeling
9
+ from .question_answering import QuestionAnsweringExtractive
10
+ from .summarization import Summarization
11
+ from .text_classification import TextClassification
12
+
13
+
14
+ __all__ = [
15
+ "AutomaticSpeechRecognition",
16
+ "AudioClassification",
17
+ "ImageClassification",
18
+ "LanguageModeling",
19
+ "QuestionAnsweringExtractive",
20
+ "Summarization",
21
+ "TaskTemplate",
22
+ "TextClassification",
23
+ ]
24
+
25
+ logger = get_logger(__name__)
26
+
27
+
28
+ NAME2TEMPLATE = {
29
+ AutomaticSpeechRecognition.task: AutomaticSpeechRecognition,
30
+ AudioClassification.task: AudioClassification,
31
+ ImageClassification.task: ImageClassification,
32
+ LanguageModeling.task: LanguageModeling,
33
+ QuestionAnsweringExtractive.task: QuestionAnsweringExtractive,
34
+ Summarization.task: Summarization,
35
+ TextClassification.task: TextClassification,
36
+ }
37
+
38
+
39
+ def task_template_from_dict(task_template_dict: dict) -> Optional[TaskTemplate]:
40
+ """Create one of the supported task templates in :py:mod:`datasets.tasks` from a dictionary."""
41
+ task_name = task_template_dict.get("task")
42
+ if task_name is None:
43
+ logger.warning(f"Couldn't find template for task '{task_name}'. Available templates: {list(NAME2TEMPLATE)}")
44
+ return None
45
+ template = NAME2TEMPLATE.get(task_name)
46
+ return template.from_dict(task_template_dict)