text
stringlengths
1
1.02k
class_index
int64
0
271
source
stringclasses
76 values
def to_parquet( self, path_or_buf: Union[PathLike, BinaryIO], batch_size: Optional[int] = None, storage_options: Optional[dict] = None, **parquet_writer_kwargs, ) -> int: """Exports the dataset to parquet Args: path_or_buf (`PathLike` or `FileOrBu...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
<Added version="2.19.0"/> **parquet_writer_kwargs (additional keyword arguments): Parameters to pass to PyArrow's `pyarrow.parquet.ParquetWriter`. Returns: `int`: The number of characters or bytes written. Example: ```py >>> ds.to_parquet("path/...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Args: name (`str`): Name of SQL table. con (`str` or `sqlite3.Connection` or `sqlalchemy.engine.Connection` or `sqlalchemy.engine.Connection`): A [URI string](https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls) or a SQLite3/SQLAlchemy connection obj...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
If you would like to write the index, pass `index=True` and also set a name for the index column by passing `index_label`. </Changed> Returns: `int`: The number of records written. Example: ```py >>> # con provided as a connection URI strin...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
# Find decodable columns, because if there are any, we need to # adjust the dataset size computation (needed for sharding) to account for possible external files decodable_columns = [ k for k, v in self._info.features.items() if require_decoding(v, ignore_decode_attribute=True) ] ...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
table = self.with_format("arrow")[:1000] table_visitor(table, extra_nbytes_visitor) extra_nbytes = extra_nbytes * len(self.data) / len(table) dataset_nbytes = dataset_nbytes + extra_nbytes if self._indices is not None: dataset_nbytes = dataset_nbytes * len(self....
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
def to_iterable_dataset(self, num_shards: Optional[int] = 1) -> "IterableDataset": """Get an [`datasets.IterableDataset`] from a map-style [`datasets.Dataset`]. This is equivalent to loading a dataset in streaming mode with [`datasets.load_dataset`], but much faster since the data is streamed from local...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
To get the best speed performance, make sure your dataset doesn't have an indices mapping. If this is the case, the data are not read contiguously, which can be slow sometimes. You can use `ds = ds.flatten_indices()` to write your dataset in contiguous chunks of data and have optimal speed before switch...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Basic usage: ```python >>> ids = ds.to_iterable_dataset() >>> for example in ids: ... pass ``` With lazy filtering and processing: ```python >>> ids = ds.to_iterable_dataset() >>> ids = ids.filter(filter_fn).map(process_fn) # will filter and ...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
With a PyTorch DataLoader: ```python >>> import torch >>> ids = ds.to_iterable_dataset(num_shards=64) >>> ids = ids.filter(filter_fn).map(process_fn) >>> dataloader = torch.utils.data.DataLoader(ids, num_workers=4) # will assign 64 / 4 = 16 shards to each worker to load, filter ...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
In a distributed setup like PyTorch DDP with a PyTorch DataLoader and shuffling ```python >>> from datasets.distributed import split_dataset_by_node >>> ids = ds.to_iterable_dataset(num_shards=512) >>> ids = ids.shuffle(buffer_size=10_000, seed=42) # will shuffle the shards order and us...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
With shuffling and multiple epochs: ```python >>> ids = ds.to_iterable_dataset(num_shards=64) >>> ids = ids.shuffle(buffer_size=10_000, seed=42) # will shuffle the shards order and use a shuffle buffer when you start iterating >>> for epoch in range(n_epochs): ... ids.set_ep...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
if self._format_type is not None: raise NotImplementedError( "Converting a formatted dataset to a formatted iterable dataset is not implemented yet. Please run `my_dataset = my_dataset.with_format(None)` before calling to_iterable_dataset" ) if num_shards > len(self): ...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
self.shard(num_shards=num_shards, index=shard_idx, contiguous=True) for shard_idx in range(num_shards) ] ) ex_iterable = ArrowExamplesIterable( Dataset._generate_tables_from_shards, kwargs={"shards": shards, "batch_size": config.DEFAULT_MAX_BATCH_SIZE}, ) ...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
def _push_parquet_shards_to_hub( self, repo_id: str, data_dir: str = "data", split: Optional[str] = None, token: Optional[str] = None, revision: Optional[str] = None, create_pr: Optional[bool] = False, max_shard_size: Optional[Union[int, str]] = None, ...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Returns: additions (`List[CommitOperation]`): list of the `CommitOperationAdd` of the uploaded shards uploaded_size (`int`): number of uploaded bytes to the repository dataset_nbytes (`int`): approximate size in bytes of the uploaded dataset afer uncompression """ # F...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
shards = (self.shard(num_shards=num_shards, index=i, contiguous=True) for i in range(num_shards)) if decodable_columns: from .io.parquet import get_writer_batch_size def shards_with_embedded_external_files(shards: Iterator[Dataset]) -> Iterator[Dataset]: for shard in sh...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
uploaded_size = 0 additions = [] for index, shard in hf_tqdm( enumerate(shards), desc="Uploading the dataset shards", total=num_shards, ): shard_path_in_repo = f"{data_dir}/{split}-{index:05d}-of-{num_shards:05d}.parquet" buffer = Bytes...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
def push_to_hub( self, repo_id: str, config_name: str = "default", set_default: Optional[bool] = None, split: Optional[str] = None, data_dir: Optional[str] = None, commit_message: Optional[str] = None, commit_description: Optional[str] = None, priv...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
The resulting Parquet files are self-contained by default. If your dataset contains [`Image`], [`Audio`] or [`Video`] data, the Parquet files will store the bytes of your images or audio files. You can disable this by setting `embed_external_files` to `False`.
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Args: repo_id (`str`): The ID of the repository to push to in the following format: `<user>/<dataset_name>` or `<org>/<dataset_name>`. Also accepts `<dataset_name>`, which will default to the namespace of the logged-in user. config_name (`str`, def...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
<Added version="2.17.0"/> commit_message (`str`, *optional*): Message to commit while pushing. Will default to `"Upload dataset"`. commit_description (`str`, *optional*): Description of the commit that will be created. Additionally, description of ...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
<Added version="2.16.0"/> private (`bool`, *optional*): Whether to make the repo private. If `None` (default), the repo will be public unless the organization's default is private. This value is ignored if the repo already exists. token (`str`, *optional*): ...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
<Added version="2.15.0"/> max_shard_size (`int` or `str`, *optional*, defaults to `"500MB"`): The maximum size of the dataset shards to be uploaded to the hub. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). num_shards (`int`, *opti...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
```python >>> dataset.push_to_hub("<organization>/<dataset_id>") >>> dataset_dict.push_to_hub("<organization>/<dataset_id>", private=True) >>> dataset.push_to_hub("<organization>/<dataset_id>", max_shard_size="1GB") >>> dataset.push_to_hub("<organization>/<dataset_id>", num_shards=1024) ...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
```python >>> english_dataset.push_to_hub("<organization>/<dataset_id>", "en") >>> french_dataset.push_to_hub("<organization>/<dataset_id>", "fr") >>> # later >>> english_dataset = load_dataset("<organization>/<dataset_id>", "en") >>> french_dataset = load_dataset("<organization>...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
raise ValueError("`config_name` cannot be 'data'. Please, choose another name for configuration.")
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
if max_shard_size is not None and num_shards is not None: raise ValueError( "Failed to push_to_hub: please specify either max_shard_size or num_shards, but not both." ) if split is None: split = str(self.split) if self.split is not None else "train" ...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
if not data_dir: data_dir = config_name if config_name != "default" else "data" # for backward compatibility additions, uploaded_size, dataset_nbytes = self._push_parquet_shards_to_hub( repo_id=repo_id, data_dir=data_dir, split=split, token=token, ...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
# Check if the repo already has a README.md and/or a dataset_infos.json to update them with the new split info (size and pattern) # and delete old split shards (if they exist) repo_with_dataset_card, repo_with_dataset_infos = False, False deletions, deleted_size = [], 0 repo_splits = [] ...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
repo_file.rfilename.startswith(f"{data_dir}/{split}-") and repo_file.rfilename not in repo_files_to_add ): deletions.append(CommitOperationDelete(path_in_repo=repo_file.rfilename)) deleted_size += repo_file.size elif fnmatch.fnmatch( repo_file.rfil...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
organization, dataset_name = repo_id.split("/") if "/" in repo_id else (None, repo_id) info_to_dump = self.info.copy() info_to_dump.download_checksums = None info_to_dump.download_size = uploaded_size info_to_dump.dataset_size = dataset_nbytes info_to_dump.size_in_bytes = uploade...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
dataset_infos: DatasetInfosDict = DatasetInfosDict.from_dataset_card_data(dataset_card_data) if dataset_infos and config_name in dataset_infos: repo_info = dataset_infos[config_name] else: repo_info = None # get the deprecated dataset_infos.json to update ...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
dataset_card_data = DatasetCardData() metadata_configs = MetadataConfigs() repo_info = None # update the total info to dump from existing info if repo_info is not None: logger.info("Updating downloaded metadata with the new split.") if repo_info.splits and...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
if split in repo_info.splits: repo_info.download_size -= deleted_size repo_info.dataset_size -= repo_info.splits.get(split, SplitInfo()).num_bytes or 0
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
repo_info.download_checksums = None repo_info.download_size = (repo_info.download_size or 0) + uploaded_size repo_info.dataset_size = (repo_info.dataset_size or 0) + dataset_nbytes repo_info.size_in_bytes = repo_info.download_size + repo_info.dataset_size ...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
# update the metadata configs if config_name in metadata_configs: metadata_config = metadata_configs[config_name] if "data_files" in metadata_config: data_files_to_dump = sanitize_patterns(metadata_config["data_files"]) else: data_files_to_dump...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
default_config_name = metadata_configs.get_default_config_name() if default_config_name == "default": raise ValueError( "There exists a configuration named 'default'. To set a different configuration as default, " "rename the 'default' ...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
buffer.write(json.dumps(dataset_infos, indent=4).encode("utf-8")) additions.append( CommitOperationAdd(path_in_repo=config.DATASETDICT_INFOS_FILENAME, path_or_fileobj=buffer) ) # push to README DatasetInfosDict({config_name: info_to_dump}).to_dataset_card_data(dat...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
commit_message = commit_message if commit_message is not None else "Upload dataset" if len(additions) <= config.UPLOADS_MAX_NUMBER_PER_COMMIT: commit_info = api.create_commit( repo_id, operations=additions + deletions, commit_message=commit_message, ...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
] + (deletions if i == 0 else []) commit_info = api.create_commit( repo_id, operations=operations, commit_message=commit_message + f" (part {i:05d}-of-{num_commits:05d})", commit_description=commit_description, ...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
@transmit_format @fingerprint_transform(inplace=False) def add_column( self, name: str, column: Union[list, np.array], new_fingerprint: str, feature: Optional[FeatureType] = None ): """Add column to Dataset. <Added version="1.7"/> Args: name (`str`): ...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
if feature: pyarrow_schema = Features({name: feature}).arrow_schema else: pyarrow_schema = None column_table = InMemoryTable.from_pydict({name: column}, schema=pyarrow_schema) _check_column_names(self._data.column_names + column_table.column_names) dataset = self...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
def add_faiss_index( self, column: str, index_name: Optional[str] = None, device: Optional[int] = None, string_factory: Optional[str] = None, metric_type: Optional[int] = None, custom_index: Optional["faiss.Index"] = None, # noqa: F821 batch_size: int = 1...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Args: column (`str`): The column of the vectors to add to the index. index_name (`str`, *optional*): The `index_name`/identifier of the index. This is the `index_name` that is used to call [`~datasets.Dataset.get_nearest_examples`] or [`~datasets.D...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
custom_index (`faiss.Index`, *optional*): Custom Faiss index that you already have instantiated and configured for your needs. batch_size (`int`): Size of the batch to use while adding vectors to the `FaissIndex`. Default value is `1000`. <Added version="2.4.0...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Example: ```python >>> ds = datasets.load_dataset('crime_and_punish', split='train') >>> ds_with_embeddings = ds.map(lambda example: {'embeddings': embed(example['line']})) >>> ds_with_embeddings.add_faiss_index(column='embeddings') >>> # query >>> scores, retrieved_exam...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
>>> ds = datasets.load_dataset('crime_and_punish', split='train') >>> # load index >>> ds.load_faiss_index('embeddings', 'my_index.faiss') >>> # query >>> scores, retrieved_examples = ds.get_nearest_examples('embeddings', embed('my new query'), k=10) ``` """ with ...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
def add_faiss_index_from_external_arrays( self, external_arrays: np.array, index_name: str, device: Optional[int] = None, string_factory: Optional[str] = None, metric_type: Optional[int] = None, custom_index: Optional["faiss.Index"] = None, # noqa: F821 b...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Args: external_arrays (`np.array`): If you want to use arrays from outside the lib for the index, you can set `external_arrays`. It will use `external_arrays` to create the Faiss index instead of the arrays in the given `column`. index_name (`str`): ...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
metric_type (`int`, *optional*): Type of metric. Ex: `faiss.faiss.METRIC_INNER_PRODUCT` or `faiss.METRIC_L2`. custom_index (`faiss.Index`, *optional*): Custom Faiss index that you already have instantiated and configured for your needs. batch_size (`int`, *optiona...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
index_name=index_name, device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index, batch_size=batch_size, train_size=train_size, faiss_verbose=faiss_verbose, )
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
def add_elasticsearch_index( self, column: str, index_name: Optional[str] = None, host: Optional[str] = None, port: Optional[int] = None, es_client: Optional["elasticsearch.Elasticsearch"] = None, # noqa: F821 es_index_name: Optional[str] = None, es_index...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Args: column (`str`): The column of the documents to add to the index. index_name (`str`, *optional*): The `index_name`/identifier of the index. This is the index name that is used to call [`~Dataset.get_nearest_examples`] or [`~Dataset.search`]. ...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Default config is: ``` { "settings": { "number_of_shards": 1, "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}}, }, ...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
```python >>> es_client = elasticsearch.Elasticsearch() >>> ds = datasets.load_dataset('crime_and_punish', split='train') >>> ds.add_elasticsearch_index(column='line', es_client=es_client, es_index_name="my_es_index") >>> scores, retrieved_examples = ds.get_nearest_examples('line', 'my n...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
Args: item (`dict`): Item data to be added. Returns: [`Dataset`] Example:
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
```py >>> from datasets import load_dataset >>> ds = load_dataset("rotten_tomatoes", split="validation") >>> new_review = {'label': 0, 'text': 'this movie is the absolute worst thing I have ever seen'} >>> ds = ds.add_item(new_review) >>> ds[-1] {'label': 0, 'text': 'this...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
] ) if self._indices is None: indices_table = None else: item_indices_array = pa.array([len(self._data)], type=pa.uint64()) item_indices_table = InMemoryTable.from_arrays([item_indices_array], names=["indices"]) indices_table = concat_tables([self....
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
def align_labels_with_mapping(self, label2id: Dict, label_column: str) -> "Dataset": """Align the dataset's label ID and label name mapping to match an input `label2id` mapping. This is useful when you want to ensure that a model's predicted labels are aligned with the dataset. The alignment in ...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
""" # Sanity checks if label_column not in self._data.column_names: raise ValueError(f"Column ({label_column}) not in table columns ({self._data.column_names}).") label_feature = self._info.features[label_column] if not ( isinstance(label_feature, ClassLabel) ...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
# Sort input mapping by ID value to ensure the label names are aligned label2id = dict(sorted(label2id.items(), key=lambda item: item[1])) label_names = list(label2id.keys()) # Some label mappings use uppercase label names so we lowercase them during alignment label2id = {k.lower(): v fo...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
def process_label_ids(batch): dset_label_names = [ [int2str_function(label_id).lower() if label_id is not None else None for label_id in seq] for seq in batch[label_column] ] batch[label_column] = [ [label2id[lab...
4
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
class NumExamplesMismatchError(Exception): pass
5
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/arrow_dataset.py
class Url(str): pass
6
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py
class EmptyDatasetError(FileNotFoundError): pass
7
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py
class DataFilesList(List[str]): """ List of data files (absolute local paths or URLs). It has two construction methods given the user's data files patterns: - ``from_hf_repo``: resolve patterns inside a dataset repository - ``from_local_or_remote``: resolve patterns from a local path Moreover, ...
8
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py
def __add__(self, other: "DataFilesList") -> "DataFilesList": return DataFilesList([*self, *other], self.origin_metadata + other.origin_metadata) @classmethod def from_hf_repo( cls, patterns: List[str], dataset_info: huggingface_hub.hf_api.DatasetInfo, base_path: Optiona...
8
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py
@classmethod def from_local_or_remote( cls, patterns: List[str], base_path: Optional[str] = None, allowed_extensions: Optional[List[str]] = None, download_config: Optional[DownloadConfig] = None, ) -> "DataFilesList": base_path = base_path if base_path is not None...
8
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py
@classmethod def from_patterns( cls, patterns: List[str], base_path: Optional[str] = None, allowed_extensions: Optional[List[str]] = None, download_config: Optional[DownloadConfig] = None, ) -> "DataFilesList": base_path = base_path if base_path is not None else P...
8
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py
def filter_extensions(self, extensions: List[str]) -> "DataFilesList": pattern = "|".join("\\" + ext for ext in extensions) pattern = re.compile(f".*({pattern})(\\..+)?$") return DataFilesList( [data_file for data_file in self if pattern.match(data_file)], origin_metadata...
8
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py
class DataFilesDict(Dict[str, DataFilesList]): """ Dict of split_name -> list of data files (absolute local paths or URLs). It has two construction methods given the user's data files patterns : - ``from_hf_repo``: resolve patterns inside a dataset repository - ``from_local_or_remote``: resolve patt...
9
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py
@classmethod def from_local_or_remote( cls, patterns: Dict[str, Union[List[str], DataFilesList]], base_path: Optional[str] = None, allowed_extensions: Optional[List[str]] = None, download_config: Optional[DownloadConfig] = None, ) -> "DataFilesDict": out = cls() ...
9
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py
@classmethod def from_hf_repo( cls, patterns: Dict[str, Union[List[str], DataFilesList]], dataset_info: huggingface_hub.hf_api.DatasetInfo, base_path: Optional[str] = None, allowed_extensions: Optional[List[str]] = None, download_config: Optional[DownloadConfig] = Non...
9
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py
@classmethod def from_patterns( cls, patterns: Dict[str, Union[List[str], DataFilesList]], base_path: Optional[str] = None, allowed_extensions: Optional[List[str]] = None, download_config: Optional[DownloadConfig] = None, ) -> "DataFilesDict": out = cls() ...
9
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py
def filter_extensions(self, extensions: List[str]) -> "DataFilesDict": out = type(self)() for key, data_files_list in self.items(): out[key] = data_files_list.filter_extensions(extensions) return out
9
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py
class DataFilesPatternsList(List[str]): """ List of data files patterns (absolute local paths or URLs). For each pattern there should also be a list of allowed extensions to keep, or a None ot keep all the files for the pattern. """ def __init__( self, patterns: List[str], ...
10
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py
def resolve( self, base_path: str, download_config: Optional[DownloadConfig] = None, ) -> "DataFilesList": base_path = base_path if base_path is not None else Path().resolve().as_posix() data_files = [] for pattern, allowed_extensions in zip(self, self.allowed_extensi...
10
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py
def filter_extensions(self, extensions: List[str]) -> "DataFilesPatternsList": return DataFilesPatternsList( self, [allowed_extensions + extensions for allowed_extensions in self.allowed_extensions] )
10
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py
class DataFilesPatternsDict(Dict[str, DataFilesPatternsList]): """ Dict of split_name -> list of data files patterns (absolute local paths or URLs). """ @classmethod def from_patterns( cls, patterns: Dict[str, List[str]], allowed_extensions: Optional[List[str]] = None ) -> "DataFilesPat...
11
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py
def resolve( self, base_path: str, download_config: Optional[DownloadConfig] = None, ) -> "DataFilesDict": out = DataFilesDict() for key, data_files_patterns_list in self.items(): out[key] = data_files_patterns_list.resolve(base_path, download_config) retu...
11
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/data_files.py
class _BaseExamplesIterable: """Base class for the examples iterable used by an IterableDataset""" def __init__(self) -> None: self._state_dict: Optional[Union[list, dict]] = None def __iter__(self) -> Iterator[Tuple[Key, dict]]: """An examples iterable should yield tuples (example_key, ex...
12
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def shuffle_data_sources(self, generator: np.random.Generator) -> "_BaseExamplesIterable": """ Either shuffle the shards/sources of the dataset, or propagate the shuffling to the underlying iterable. If the order of the shards must stay fixed (when using .skip or .take for example), then this me...
12
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def split_shard_indices_by_worker(self, num_shards: int, index: int, contiguous=True) -> List[int]: if contiguous: div = self.num_shards // num_shards mod = self.num_shards % num_shards start = div * index + min(index, mod) end = start + div + (1 if index < mod el...
12
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def load_state_dict(self, state_dict: dict) -> dict: def _inner_load_state_dict(state, new_state): if new_state is not None and isinstance(state, dict): for key in new_state: state[key] = _inner_load_state_dict(state[key], new_state[key]) return st...
12
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
class ExamplesIterable(_BaseExamplesIterable): def __init__(self, generate_examples_fn: Callable[..., Tuple[Key, dict]], kwargs: dict): super().__init__() self.generate_examples_fn = generate_examples_fn self.kwargs = kwargs def _init_state_dict(self) -> dict: self._state_dict =...
13
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def __iter__(self): shard_idx_start = self._state_dict["shard_idx"] if self._state_dict else 0 for gen_kwags in islice(_split_gen_kwargs(self.kwargs, max_num_jobs=self.num_shards), shard_idx_start, None): shard_example_idx_start = self._state_dict["shard_example_idx"] if self._state_dict els...
13
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "ExamplesIterable": """Keep only the requested shard.""" gen_kwargs_list = _split_gen_kwargs(self.kwargs, max_num_jobs=self.num_shards) shard_indices = self.split_shard_indices_by_worker(num_shards, index, contiguous=c...
13
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
class ShuffledDataSourcesExamplesIterable(ExamplesIterable): def __init__( self, generate_examples_fn: Callable[..., Tuple[Key, dict]], kwargs: dict, generator: np.random.Generator ): super().__init__(generate_examples_fn, kwargs) self.generator = deepcopy(generator) def _init_state...
14
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def __iter__(self): """Shuffle the kwargs order to shuffle shards""" rng = deepcopy(self.generator) kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs) shard_idx_start = self._state_dict["shard_idx"] if self._state_dict else 0 for gen_kwags in islice( ...
14
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "ExamplesIterable": """Keep only the requested shard.""" rng = deepcopy(self.generator) kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs) return ExamplesIterable(self.generate_examples_fn, kwa...
14
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
class ArrowExamplesIterable(_BaseExamplesIterable): def __init__(self, generate_tables_fn: Callable[..., Tuple[Key, pa.Table]], kwargs: dict): super().__init__() self.generate_tables_fn = generate_tables_fn self.kwargs = kwargs @property def iter_arrow(self): return self._it...
15
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def __iter__(self): formatter = PythonFormatter() shard_idx_start = self._state_dict["shard_idx"] if self._state_dict else 0 for gen_kwags in islice(_split_gen_kwargs(self.kwargs, max_num_jobs=self.num_shards), shard_idx_start, None): shard_example_idx_start = self._state_dict["shard...
15
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
self._state_dict["shard_example_idx"] += 1 yield key, example shard_example_idx += 1 if self._state_dict: self._state_dict["shard_idx"] += 1 self._state_dict["shard_example_idx"] = 0
15
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def _iter_arrow(self): shard_idx_start = self._state_dict["shard_idx"] if self._state_dict else 0 for gen_kwags in islice(_split_gen_kwargs(self.kwargs, max_num_jobs=self.num_shards), shard_idx_start, None): shard_example_idx_start = self._state_dict["shard_example_idx"] if self._state_dict ...
15
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def shuffle_data_sources(self, generator: np.random.Generator) -> "ArrowExamplesIterable": return ShuffledDataSourcesArrowExamplesIterable(self.generate_tables_fn, self.kwargs, generator) def shard_data_sources(self, num_shards: int, index: int, contiguous=True) -> "ArrowExamplesIterable": """Keep ...
15
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
class ShuffledDataSourcesArrowExamplesIterable(ArrowExamplesIterable): def __init__( self, generate_tables_fn: Callable[..., Tuple[Key, pa.Table]], kwargs: dict, generator: np.random.Generator, ): super().__init__(generate_tables_fn, kwargs) self.generator = deepc...
16
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def __iter__(self): """Shuffle the kwargs order to shuffle shards""" rng = deepcopy(self.generator) kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs) formatter = PythonFormatter() shard_idx_start = self._state_dict["shard_idx"] if self._state_dict else 0 ...
16
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
formatted_batch = formatter.format_batch(pa_subtable) for example in _batch_to_examples(formatted_batch): if shard_example_idx >= shard_example_idx_start: if self._state_dict: self._state_dict["shard_example_idx"] +=...
16
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py
def _iter_arrow(self): rng = deepcopy(self.generator) kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs) shard_idx_start = self._state_dict["shard_idx"] if self._state_dict else 0 for gen_kwags in islice( _split_gen_kwargs(kwargs_with_shuffled_shards, max_num...
16
/Users/nielsrogge/Documents/python_projecten/datasets/src/datasets/iterable_dataset.py